repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
Randl/MNASNet-pytorch
[ "b949c36753b66979158c44ed0dc1368794b5c63d" ]
[ "MnasNet.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n# from https://github.com/billhhh/MnasNet-pytorch-pretrained\nfrom dropblock import DropBlockScheduled, DropBlock2D\n\n\ndef Conv_3x3(inp, oup, stride, activation=nn.ReLU6, act_params={\"inplace\": True}):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 3, stride, 1, bias=False),\n nn.BatchNorm2d(oup),\n activation(**act_params)\n )\n\n\ndef Conv_1x1(inp, oup, activation=nn.ReLU6, act_params={\"inplace\": True}):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n activation(**act_params)\n )\n\n\ndef SepConv_3x3(inp, oup, activation=nn.ReLU6, act_params={\"inplace\": True}): # input=32, output=16\n return nn.Sequential(\n # dw\n nn.Conv2d(inp, inp, 3, 1, 1, groups=inp, bias=False),\n nn.BatchNorm2d(inp),\n activation(**act_params),\n # pw-linear\n nn.Conv2d(inp, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, oup, stride, expand_ratio, kernel, drop_prob=0.0, num_steps=3e5, activation=nn.ReLU6,\n act_params={\"inplace\": True}):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n self.use_res_connect = self.stride == 1 and inp == oup\n\n self.conv = nn.Sequential(\n # pw\n nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),\n nn.BatchNorm2d(inp * expand_ratio),\n DropBlockScheduled(\n DropBlock2D(drop_prob=drop_prob, block_size=7),\n start_value=0.,\n stop_value=drop_prob,\n nr_steps=num_steps),\n activation(**act_params),\n # dw\n nn.Conv2d(inp * expand_ratio, inp * expand_ratio, kernel, stride, kernel // 2, groups=inp * expand_ratio,\n bias=False),\n nn.BatchNorm2d(inp * expand_ratio),\n DropBlockScheduled(\n DropBlock2D(drop_prob=drop_prob, block_size=7),\n start_value=0.,\n stop_value=drop_prob,\n nr_steps=num_steps),\n activation(**act_params),\n # pw-linear\n nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n DropBlockScheduled(\n DropBlock2D(drop_prob=drop_prob, block_size=7),\n start_value=0.,\n stop_value=drop_prob,\n nr_steps=num_steps),\n )\n if self.use_res_connect:\n self.skip_drop = DropBlockScheduled(\n DropBlock2D(drop_prob=drop_prob, block_size=7),\n start_value=0.,\n stop_value=drop_prob,\n nr_steps=num_steps)\n\n def forward(self, x):\n if self.use_res_connect:\n return self.skip_drop(x + self.conv(x))\n else:\n return self.conv(x)\n\n\nclass MnasNet(nn.Module):\n def __init__(self, n_class=1000, input_size=224, width_mult=1., drop_prob=0.0, num_steps=3e5, activation=nn.ReLU6,\n act_params={\"inplace\": True}):\n super(MnasNet, self).__init__()\n\n self.activation = activation\n self.act_params = act_params\n\n # setting of inverted residual blocks\n self.interverted_residual_setting = [\n # t, c, n, s, k, dp\n [3, 24, 3, 2, 3, 0], # -> 56x56\n [3, 40, 3, 2, 5, 0], # -> 28x28\n [6, 80, 3, 2, 5, 0], # -> 14x14\n [6, 96, 2, 1, 3, drop_prob], # -> 14x14\n [6, 192, 4, 2, 5, drop_prob], # -> 7x7\n [6, 320, 1, 1, 3, drop_prob], # -> 7x7\n ]\n self.num_steps = num_steps\n\n assert input_size % 32 == 0\n input_channel = int(32 * width_mult)\n self.last_channel = int(1280 * width_mult) if width_mult > 1.0 else 1280\n\n # building first two layer\n self.features = [Conv_3x3(3, input_channel, 2, self.activation, self.act_params),\n SepConv_3x3(input_channel, 16, self.activation, self.act_params)]\n input_channel = 16\n\n # building inverted residual blocks (MBConv)\n for t, c, n, s, k, dp in self.interverted_residual_setting:\n output_channel = int(c * width_mult)\n for i in range(n):\n if i == 0:\n self.features.append(InvertedResidual(input_channel, output_channel, s, t, k, dp, self.num_steps,\n self.activation, self.act_params))\n else:\n self.features.append(InvertedResidual(input_channel, output_channel, 1, t, k, dp, self.num_steps,\n self.activation, self.act_params))\n input_channel = output_channel\n\n # building last several layers\n self.features.append(Conv_1x1(input_channel, self.last_channel, self.activation, self.act_params))\n self.features.append(nn.AdaptiveAvgPool2d(1))\n\n # make it nn.Sequential\n self.features = nn.Sequential(*self.features)\n\n # building classifier\n self.classifier = nn.Sequential(\n nn.Dropout(0.0), # TODO\n nn.Linear(self.last_channel, n_class),\n )\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(-1, self.last_channel)\n x = self.classifier(x)\n return x\n\n\nif __name__ == '__main__':\n net = MnasNet()\n print(net)\n x_image = Variable(torch.randn(1, 3, 224, 224))\n y = net(x_image)\n # print(y)\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.randn", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.BatchNorm2d" ] ]
chelseajohn/isle
[ "f610b55a1e8b6d2584896eb649092b0524cc1f8c" ]
[ "src/isle/checks.py" ]
[ "\"\"\"!\nVarious basic checks.\n\"\"\"\n\n## \\defgroup check Consistency checks\n# Perform consistency checks on HMC trajectories.\n#\n# All callables in this module satisfy the requirements of hmc.hmc, i.e.\n# they have arguments\n# - `startPhi`/`endPhi`: Configuration before and after the evolver.\n# - `startPi`/`endPi`: Momentum before and after the evolver.\n# - `startEnergy`/`endEnergy`: Energy before and after the evolver.\n#\n# and raise a `ConsistencyCheckFailure` in case of failure.\n#\n\nimport numpy as np\n\nclass ConsistencyCheckFailure(Exception):\n r\"\"\"!\n \\ingroup check\n Indicate failure of a consistency check during HMC.\n \"\"\"\n\ndef realityCheck(startPhi, startPi, startEnergy, endPhi, endPi, endEnergy):\n r\"\"\"!\n \\ingroup check\n Check whether endPhi and endPi are real.\n \"\"\"\n if np.max(np.imag(endPhi)/np.real(endPhi)) > 1e-15:\n raise ConsistencyCheckFailure(\"phi is not real\")\n if np.max(np.imag(endPi)/np.real(endPi)) > 1e-15:\n raise ConsistencyCheckFailure(\"pi is not real\")\n" ]
[ [ "numpy.real", "numpy.imag" ] ]
jpool-nv/apex
[ "d36397d2b8ce5c8854997e4ec2828e056e8fda89" ]
[ "apex/contrib/test/layer_norm/test_fast_layer_norm.py" ]
[ "import unittest\nimport sys\nimport os\n\nimport numpy as np\nimport torch\n\nimport fast_layer_norm as fln\nfrom apex.contrib.layer_norm.layer_norm import FastLayerNorm\n\n\nclass GPUTimer:\n def __init__(self, stream):\n self.start_ = torch.cuda.Event(enable_timing=True)\n self.stop_ = torch.cuda.Event(enable_timing=True)\n self.stream_ = stream\n\n def start(self):\n self.stream_.record_event(self.start_)\n\n def stop(self):\n self.stream_.record_event(self.stop_)\n\n def sync(self):\n self.stream_.synchronize()\n\n def millis(self):\n return self.start_.elapsed_time(self.stop_)\n\n\ndef size_in_bytes(t):\n return torch.numel(t) * t.element_size()\n\n\ndef metrics(y_ref, y, epsilon=1e-6):\n y_ref = y_ref.float()\n y = y.float()\n relerr, mse = (\n (y_ref - y).abs().sum() / (y_ref.abs().sum() + epsilon),\n (y_ref - y).square().mean(),\n )\n return relerr.item(), mse.item()\n\n\ndevice = torch.device(\"cuda\")\nfp32 = torch.float32\nfp16 = torch.float16\nbf16 = torch.bfloat16\n\n\ndef backward_(dz, x, mu, rs, gamma):\n\n wtype = gamma.dtype\n itype = x.dtype\n otype = dz.dtype\n ctype = mu.dtype\n mu = mu.unsqueeze(1)\n rs = rs.unsqueeze(1)\n\n hidden_size = gamma.numel()\n y = rs * (x.to(ctype) - mu)\n dbeta = dz.view(-1, hidden_size).sum(0, dtype=ctype)\n dgamma = (dz * y).view(-1, hidden_size).sum(0, dtype=ctype)\n dy = dz.view(-1, hidden_size).to(ctype) * gamma.unsqueeze(0).to(ctype)\n mdy = dy.mean(1, keepdim=True, dtype=ctype)\n\n mdyy = (dy * y).mean(1, keepdim=True, dtype=ctype)\n dx = rs * (dy - mdyy * y - mdy)\n\n return dx.to(itype), dgamma.to(wtype), dbeta.to(wtype)\n\n\ndef benchmark_(S, B, hidden_size, itype, wtype, runs=100):\n epsilon = 1e-5\n\n x = torch.randn((S * B, hidden_size), dtype=itype, device=device)\n beta = torch.randn(hidden_size, dtype=wtype, device=device)\n gamma = torch.randn(hidden_size, dtype=wtype, device=device)\n dz = torch.randn(x.shape, dtype=wtype, device=device)\n\n stream = torch.cuda.Stream()\n with torch.cuda.stream(stream):\n\n timer = GPUTimer(stream)\n\n # warmup\n for r in range(runs):\n z, mu, rsigma = fln.ln_fwd(x, gamma, beta, epsilon)\n\n timer.start()\n for r in range(runs):\n z, mu, rsigma = fln.ln_fwd(x, gamma, beta, epsilon)\n timer.stop()\n timer.sync()\n\n total_bytes_fwd = sum([size_in_bytes(t) for t in [x, z, gamma, beta, mu, rsigma]])\n\n ms_fwd = timer.millis() / runs\n\n print(\n \"[FWD] Time: {:.4f}ms Throughput: {:.4f} GB/sec\".format(\n ms_fwd, total_bytes_fwd * 1e-6 / ms_fwd\n )\n )\n\n timer.start()\n for r in range(runs):\n dx, dgamma, dbeta, dbp, dgp = fln.ln_bwd(dz, x, mu, rsigma, gamma)\n timer.stop()\n timer.sync()\n\n total_bytes_bwd = sum(\n [\n size_in_bytes(t)\n for t in [dz, x, mu, rsigma, gamma, dx, dgamma, dbeta, dbp, dbp, dgp, dgp]\n ]\n )\n\n ms_bwd = timer.millis() / runs\n\n print(\n \"[BWD] Time: {:.4f}ms Throughput: {:.4f} GB/sec\".format(\n ms_bwd, total_bytes_bwd * 1e-6 / ms_bwd\n )\n )\n\n\ndef test_(S, B, hidden_size, itype, wtype, ctype=fp32):\n\n seed = 1243\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n otype = wtype\n print(\"========================================================\")\n print(f\"S={S} B={B} Hidden={hidden_size} {itype} {wtype}\")\n print(\"--------------------------------------------------------\")\n\n x = torch.randn(S * B, hidden_size, dtype=itype, device=device)\n gamma = torch.randn(hidden_size, dtype=wtype, device=device) * 0.2\n beta = torch.randn(hidden_size, dtype=wtype, device=device) * 0.2\n epsilon = 1e-5\n\n x.requires_grad = True\n gamma.requires_grad = True\n beta.requires_grad = True\n\n mu_ref = x.mean(1, dtype=ctype, keepdim=True)\n v = torch.square(x - mu_ref).mean(1, dtype=ctype, keepdim=True)\n rs_ref = torch.rsqrt(v + epsilon)\n y_ref = rs_ref * (x.to(ctype) - mu_ref)\n z_ref = (gamma.unsqueeze(0) * (y_ref).to(otype) + beta.unsqueeze(0)).to(otype)\n\n mu_ref = mu_ref.flatten()\n rs_ref = rs_ref.flatten()\n\n dz = torch.randn_like(z_ref)\n\n # z_ref.backward(dz)\n # dx_ref = x.grad\n # dgamma_ref = gamma.grad\n # dbeta_ref = beta.grad\n\n dx_ref, dg_ref, db_ref = backward_(dz, x, mu_ref, rs_ref, gamma)\n\n z, mu, rs = fln.ln_fwd(x, gamma, beta, epsilon)\n dx, dg, db, dg_part, db_part = fln.ln_bwd(dz, x, mu, rs, gamma)\n\n re_z, mse_z = metrics(z_ref, z)\n re_mu, mse_mu = metrics(mu_ref, mu)\n re_rs, mse_rs = metrics(rs_ref, rs)\n\n re_dx, mse_dx = metrics(dx_ref, dx)\n re_dg, mse_dg = metrics(dg_ref, dg)\n re_db, mse_db = metrics(db_ref, db)\n\n print(f\" z: relerr={re_z :.4e} mse={mse_z :.4e}\")\n print(f\"mu: relerr={re_mu:.4e} mse={mse_mu:.4e}\")\n print(f\"rs: relerr={re_mu:.4e} mse={mse_mu:.4e}\")\n\n print(f\"dx: relerr={re_dx:.4e} mse={mse_dx:.4e}\")\n print(f\"dg: relerr={re_dg:.4e} mse={mse_dg:.4e}\")\n print(f\"db: relerr={re_db:.4e} mse={mse_db:.4e}\")\n\n def check_err(x, relerr):\n tol = 1e-3 if x.dtype == torch.float16 else 5e-6\n return relerr < tol\n\n return [\n check_err(x, re)\n for x, re in zip([z, mu, rs, dx, dg, db], [re_z, re_mu, re_rs, re_dx, re_dg, re_db])\n ]\n\n\nclass TestFastLayerNorm(unittest.TestCase):\n def assertAll(self, l):\n if not all(l):\n print(l)\n for x in l:\n self.assertTrue(x)\n\n def test_all_configs(self):\n\n hidden_sizes = [\n 768,\n 1024,\n 1536,\n 2048,\n 2304,\n 3072,\n 3840,\n 4096,\n 5120,\n 6144,\n 8192,\n 10240,\n 12288,\n 12800,\n 14336,\n 15360,\n 16384,\n 18432,\n 20480,\n 24576,\n 25600,\n 30720,\n 32768,\n 40960,\n 49152,\n 65536,\n ]\n\n for h in hidden_sizes:\n with self.subTest(f\"hidden_size={h}\"):\n self.assertAll(test_(256, 2, h, fp32, fp32))\n self.assertAll(test_(256, 2, h, fp16, fp16))\n self.assertAll(test_(256, 2, h, fp32, fp16))\n self.assertAll(test_(256, 2, h, bf16, bf16))\n self.assertAll(test_(256, 2, h, fp32, bf16))\n\n def test_run_benchmark(self):\n for (S, B, hidden_size, runs) in (\n (512, 32, 768, 1000),\n (512, 32, 1024, 1000),\n (512, 8, 4096, 1000),\n (512, 8, 5120, 1000),\n (512, 8, 6144, 1000),\n (256, 2, 20480, 500),\n (256, 2, 25600, 500),\n (256, 2, 40960, 250),\n (256, 2, 65536, 250),\n ):\n with self.subTest(f\"(S, B, hidden_size)=({S}, {B}, {hidden_size})\"):\n benchmark_(S, B, hidden_size, fp16, fp16, runs)\n\n def test_compat_with_autocast(self):\n autocast_dtypes = (\n (torch.half, torch.bfloat16) if torch.cuda.is_bf16_supported() else (torch.half,)\n )\n input_shape = (512, 32, 768)\n layer_norm = FastLayerNorm(input_shape[-1]).cuda()\n input = torch.randn(input_shape).cuda()\n\n for dtype in autocast_dtypes:\n layer_norm.zero_grad(set_to_none=True)\n with self.subTest(f\"autocast_dtype={dtype}\"):\n with torch.cuda.amp.autocast(enabled=True, dtype=dtype):\n out = layer_norm(input)\n self.assertEqual(dtype, out.dtype)\n grad = torch.randn_like(out)\n out.backward(grad)\n self.assertEqual(torch.float32, layer_norm.weight.grad.dtype)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.randn_like", "torch.cuda.manual_seed", "torch.cuda.is_bf16_supported", "torch.randn", "torch.manual_seed", "torch.cuda.Event", "torch.cuda.amp.autocast", "torch.numel", "torch.rsqrt", "torch.square", "torch.cuda.stream", "torch.device", "torch.cuda.Stream" ] ]
uzair789/ssd.pytorch
[ "e57779bff3b740e9d8a5fca1f4751169b54900a6" ]
[ "data/coco.py" ]
[ "from .config import HOME\nimport os\nimport os.path as osp\nimport sys\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport cv2\nimport numpy as np\n\n#COCO_ROOT = osp.join(HOME, 'data/coco/')\nCOCO_ROOT = osp.join('/media/apple/ssd.pytorch', 'data/')\nIMAGES = 'images'\nANNOTATIONS = 'annotations'\nCOCO_API = 'PythonAPI'\nINSTANCES_SET = 'instances_{}.json'\nCOCO_CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n 'train', 'truck', 'boat', 'traffic light', 'fire', 'hydrant',\n 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',\n 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',\n 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',\n 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',\n 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',\n 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',\n 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',\n 'keyboard', 'cell phone', 'microwave oven', 'toaster', 'sink',\n 'refrigerator', 'book', 'clock', 'vase', 'scissors',\n 'teddy bear', 'hair drier', 'toothbrush')\n\n\ndef get_label_map(label_file):\n label_map = {}\n labels = open(label_file, 'r')\n for line in labels:\n ids = line.split(',')\n label_map[int(ids[0])] = int(ids[1])\n return label_map\n\n\nclass COCOAnnotationTransform(object):\n \"\"\"Transforms a COCO annotation into a Tensor of bbox coords and label index\n Initilized with a dictionary lookup of classnames to indexes\n \"\"\"\n def __init__(self):\n self.label_map = get_label_map(osp.join(COCO_ROOT, 'coco_labels.txt'))\n\n def __call__(self, target, width, height):\n \"\"\"\n Args:\n target (dict): COCO target json annotation as a python dict\n height (int): height\n width (int): width\n Returns:\n a list containing lists of bounding boxes [bbox coords, class idx]\n \"\"\"\n scale = np.array([width, height, width, height])\n res = []\n for obj in target:\n if 'bbox' in obj:\n bbox = obj['bbox']\n bbox[2] += bbox[0]\n bbox[3] += bbox[1]\n label_idx = self.label_map[obj['category_id']] - 1\n final_box = list(np.array(bbox)/scale)\n final_box.append(label_idx)\n res += [final_box] # [xmin, ymin, xmax, ymax, label_idx]\n else:\n print(\"no bbox problem!\")\n\n return res # [[xmin, ymin, xmax, ymax, label_idx], ... ]\n\n\nclass COCODetection(data.Dataset):\n \"\"\"`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.\n Args:\n root (string): Root directory where images are downloaded to.\n set_name (string): Name of the specific set of COCO images.\n transform (callable, optional): A function/transform that augments the\n raw images`\n target_transform (callable, optional): A function/transform that takes\n in the target (bbox) and transforms it.\n \"\"\"\n\n def __init__(self, root, image_set='trainval35k', transform=None,\n target_transform=COCOAnnotationTransform(), dataset_name='MS COCO'):\n sys.path.append(osp.join(root, COCO_API))\n from pycocotools.coco import COCO\n self.root = osp.join(root, IMAGES, image_set)\n self.coco = COCO(osp.join(root, ANNOTATIONS,\n INSTANCES_SET.format(image_set)))\n self.ids = list(self.coco.imgToAnns.keys())\n self.transform = transform\n self.target_transform = target_transform\n self.name = dataset_name\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, target).\n target is the object returned by ``coco.loadAnns``.\n \"\"\"\n im, gt, h, w = self.pull_item(index)\n return im, gt\n\n def __len__(self):\n return len(self.ids)\n\n def pull_item(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: Tuple (image, target, height, width).\n target is the object returned by ``coco.loadAnns``.\n \"\"\"\n img_id = self.ids[index]\n target = self.coco.imgToAnns[img_id]\n ann_ids = self.coco.getAnnIds(imgIds=img_id)\n\n target = self.coco.loadAnns(ann_ids)\n path = osp.join(self.root, self.coco.loadImgs(img_id)[0]['file_name'])\n assert osp.exists(path), 'Image path does not exist: {}'.format(path)\n img = cv2.imread(osp.join(self.root, path))\n height, width, _ = img.shape\n if self.target_transform is not None:\n target = self.target_transform(target, width, height)\n if self.transform is not None:\n target = np.array(target)\n img, boxes, labels = self.transform(img, target[:, :4],\n target[:, 4])\n # to rgb\n img = img[:, :, (2, 1, 0)]\n\n target = np.hstack((boxes, np.expand_dims(labels, axis=1)))\n return torch.from_numpy(img).permute(2, 0, 1), target, height, width\n\n def pull_image(self, index):\n '''Returns the original image object at index in PIL form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n cv2 img\n '''\n img_id = self.ids[index]\n path = self.coco.loadImgs(img_id)[0]['file_name']\n return cv2.imread(osp.join(self.root, path), cv2.IMREAD_COLOR)\n\n def pull_anno(self, index):\n '''Returns the original annotation of image at index\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to get annotation of\n Return:\n list: [img_id, [(label, bbox coords),...]]\n eg: ('001718', [('dog', (96, 13, 438, 332))])\n '''\n img_id = self.ids[index]\n ann_ids = self.coco.getAnnIds(imgIds=img_id)\n return self.coco.loadAnns(ann_ids)\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n" ]
[ [ "numpy.array", "numpy.expand_dims", "torch.from_numpy" ] ]
rmathsphys/python-mathsphys
[ "ce0b3968e6e68dbeb76b1096b7536ec16fa5aef4" ]
[ "Code/trochoidal-waves.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as anim\n\nxmax = 18 # maximum x-axis value to display (xmin = 0)\nymax = 2.5 # maximum x-axis value to display (ymin = -ymax)\ncolour = '#f0f8ff'\n\nr = 1 # radius of the generating circle\nk = 60 # resolution of the surface\nturbulence = 0.90 # how calm/rough the waves are (0: no waves)\nframes = 220 # this controls the speed of the waves\n\ntstep = (2*np.pi)/frames # Automatic Enables looping, In radians\nphi = np.linspace(0, 2*np.pi, 100)\nx0 = np.linspace(0-2*r, xmax+2*r, k)\ncircles = np.arange(0, xmax+(2*r), 2*r)\n\nt = lambda x, n: (n*tstep + (0.2*np.pi*x*turbulence % (2*np.pi))) % (2*np.pi) # period: x=10\ncircle_x = lambda a, b: a + r*np.cos(b)\ncircle_y = lambda a, b: a + r*np.sin(b)\n\nfig, ax = plt.subplots(figsize=(7.2, 4.06), facecolor=colour)\nfig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None)\nax.set(aspect=True, xlim=(0,xmax), ylim=(-ymax,ymax))\nax.axis(False)\n\nax.text(x=0.97, y=-0.4, transform=ax.transAxes, s='@RMathsPhys',\n ha='right', va='top', fontsize=7, color='black', fontfamily='Serif')\n\nsurf_x = [circle_x(i, t(i, 0)) for i in x0]\nsurf_y = [circle_y(0, t(i, 0)) for i in x0]\ndots_x = [circle_x(i, t(i, 0)) for i in circles]\ndots_y = [circle_y(0, t(i, 0)) for i in circles]\n\nfor i in circles:\n ax.plot(circle_x(i, phi), circle_y(0, phi), c='#008b8b', alpha=0.2)\n \ndots = ax.scatter(dots_x, dots_y, c='#ff8c00', zorder=9)\nsurface, = ax.plot(surf_x, surf_y, c='#0047ab', lw=4)\ndef evolver(n): \n surf_x = [circle_x(i, t(i, n)) for i in x0]\n surf_y = [circle_y(0, t(i, n)) for i in x0]\n dots_x = [circle_x(i, t(i, n)) for i in circles]\n dots_y = [circle_y(0, t(i, n)) for i in circles]\n \n dots.set_offsets(np.column_stack([dots_x, dots_y]))\n surface.set_data(np.array([surf_x, surf_y])) \n return dots, surface\n\nani = anim.FuncAnimation(fig, evolver, frames=frames, interval=10, repeat=True)\nmetadata = {'title':'Trochoidal Waves', 'author':'RMathsPhys'}\nwriter = anim.FFMpegWriter(metadata=metadata, fps=100)\n# ani.save('trochoidalwaves.mp4', dpi=320, writer=writer, savefig_kwargs={'facecolor':colour})\n\nplt.show()\n" ]
[ [ "numpy.linspace", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.cos", "numpy.sin", "matplotlib.animation.FuncAnimation", "numpy.column_stack", "numpy.array", "matplotlib.pyplot.show", "matplotlib.animation.FFMpegWriter" ] ]
804173948/nslt
[ "0faae60e19b7df425fb0635e8705e3a791fb2fd3" ]
[ "nslt/utils/iterator_utils_old.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"For loading data into NMT models.\"\"\"\nfrom __future__ import print_function\n\nimport collections\n\nfrom os import listdir\nfrom os.path import isfile, join\nimport numpy as np\nimport cv2\n\nimport tensorflow as tf\n\n__all__ = [\"BatchedInput\", \"get_iterator\", \"get_infer_iterator\"]\n\n\n# NOTE(ebrevdo): When we subclass this, instances' __dict__ becomes empty.\nclass BatchedInput(collections.namedtuple(\"BatchedInput\",\n (\"initializer\",\n \"source\",\n \"target_input\",\n \"target_output\",\n \"source_sequence_length\",\n \"target_sequence_length\"))):\n pass\n\n\ndef get_infer_iterator(src_dataset,\n source_reverse,\n src_max_len=None):\n\n # Get number of Frames\n src_dataset = src_dataset.map(lambda src: (src, tf.py_func(get_number_of_frames, [src], tf.int32)))\n\n # Filter Out Samples\n src_dataset = src_dataset.filter(lambda src, src_len: tf.logical_and(src_len > 0, src_len < src_max_len))\n\n src_dataset = src_dataset.map(lambda src, src_len:\n (tf.reshape(tf.pad(tf.py_func(read_video, [src, source_reverse], tf.float32), [[0, src_max_len - src_len], [0, 0], [0, 0], [0, 0]], \"CONSTANT\"), [300, 227, 227, 3]),\n tf.reshape(src_len, [1])))\n\n\n batched_iter = src_dataset.make_initializable_iterator()\n\n (src_video, src_seq_len) = batched_iter.get_next()\n\n return BatchedInput(initializer=batched_iter.initializer,\n source=src_video,\n target_input=None,\n target_output=None,\n source_sequence_length=src_seq_len,\n target_sequence_length=None)\n\n\ndef get_number_of_frames(src):\n # listdir(x) : 返回 x 路径下的文件列表\n # 返回 src 下文件数\n print('get_number_of_frames:'+str(src))\n return np.int32(len([f for f in listdir(src) if isfile(join(src, f))]))\n\n# 读视频\ndef read_video(src, source_reverse):\n images = sorted([f for f in listdir(src) if isfile(join(src, f))])\n video = np.zeros((len(images), 227, 227, 3)).astype(np.float32)\n\n # Cihan_CR: Harcoded Path, Need to Change This\n mean_image = np.load('../Mean/FulFrame_Mean_Image_227x227.npy').astype(np.float32)[..., ::-1]\n\n # for each image\n for i in range(0, len(images)):\n video[i, :, :, :] = cv2.imread(src + images[i]).astype(np.float32) - mean_image\n\n if source_reverse:\n video = np.flip(video, axis=0)\n\n return video\n\ndef get_iterator(src_dataset,\n tgt_dataset,\n tgt_vocab_table,\n sos, # start of sentence\n eos, # end of sentence\n source_reverse,\n random_seed,\n src_max_len=None,\n tgt_max_len=None,\n num_threads=4,\n output_buffer_size=None,\n skip_count=None):\n\n # Cihan_CR: Hard Codded - Need to Change this\n # if not output_buffer_size:\n # output_buffer_size = 10 # batch_size * 1000\n\n output_buffer_size = 10\n\n tgt_sos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(sos)), tf.int32)\n tgt_eos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(eos)), tf.int32)\n\n # Concat Datasets\n # 合并数据集\n src_tgt_dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset))\n\n # Skip Data\n if skip_count is not None:\n src_tgt_dataset = src_tgt_dataset.skip(skip_count)\n\n # Shuffle Samples: You must do it as early as possible\n src_tgt_dataset = src_tgt_dataset.shuffle(output_buffer_size * 1000, random_seed)\n\n # Get number of frames from videos\n # 获取帧数\n src_tgt_dataset = src_tgt_dataset.map(lambda src, tgt:\n (src, tgt, tf.py_func(get_number_of_frames, [src], tf.int32)))\n\n # Split Translation into Tokens\n # 分词\n src_tgt_dataset = src_tgt_dataset.map(lambda src, tgt, src_len:\n (src, tf.string_split([tgt]).values, src_len))\n\n # Sequence Length Checks\n # 过滤非法数据\n src_tgt_dataset = src_tgt_dataset.filter(lambda src, tgt, src_len:\n tf.logical_and(src_len > 0, tf.size(tgt) > 0))\n src_tgt_dataset = src_tgt_dataset.filter(lambda src, tgt, src_len:\n tf.logical_and(src_len < src_max_len, tf.size(tgt) < tgt_max_len))\n\n # Convert Tokens to IDs\n src_tgt_dataset = src_tgt_dataset.map(lambda src, tgt, src_len:\n (src, tf.cast(tgt_vocab_table.lookup(tgt), tf.int32), src_len))\n\n # Create Input and Output for Target\n src_tgt_dataset = src_tgt_dataset.map(lambda src, tgt, src_len:\n (src,\n tf.concat(([tgt_sos_id], tgt), 0),\n tf.concat((tgt, [tgt_eos_id]), 0),\n src_len))\n\n # Get Target Sequence Length\n src_tgt_dataset = src_tgt_dataset.map(lambda src, tgt_in, tgt_out, src_len:\n (src, tgt_in, tgt_out, src_len, tf.size(tgt_in)))\n\n # Pad Target Sequence With 0s\n # src_tgt_dataset = src_tgt_dataset.map(lambda src, tgt_in, tgt_out, src_len, tgt_len:\n # (src,\n # tf.pad(tgt_in, [[0, tgt_max_len - tgt_len]], \"CONSTANT\"),\n # tf.pad(tgt_out, [[0, tgt_max_len - tgt_len]], \"CONSTANT\"),\n # src_len,\n # tgt_len),\n # num_threads=num_threads, output_buffer_size=output_buffer_size)\n\n # Read and Pad Source Video from source path\n src_tgt_dataset = src_tgt_dataset.map(lambda src, tgt_in, tgt_out, src_len, tgt_len:\n (tf.reshape( # 改变形状\n tf.pad( # 填充\n tf.py_func( # 读视频\n read_video, [src, source_reverse], tf.float32\n ),\n [[0, src_max_len - src_len], [0, 0], [0, 0], [0, 0]],\n \"CONSTANT\"\n ),\n [300,227,227,3]\n ),\n tf.expand_dims(tgt_in, 0),\n tf.expand_dims(tgt_out, 0),\n tf.reshape(src_len, [1]),\n tf.reshape(tgt_len, [1])))\n\n # Create Initializer\n batched_iter = src_tgt_dataset.make_initializable_iterator()\n\n # Get Next Function\n src_video, tgt_input_ids, tgt_output_ids, src_seq_len, tgt_seq_len = batched_iter.get_next()\n\n # Return Input\n return BatchedInput(initializer=batched_iter.initializer, source=src_video, target_input=tgt_input_ids,\n target_output=tgt_output_ids,\n source_sequence_length=src_seq_len, target_sequence_length=tgt_seq_len)\n" ]
[ [ "tensorflow.constant", "tensorflow.concat", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.data.Dataset.zip", "tensorflow.string_split", "numpy.load", "numpy.flip", "tensorflow.size", "tensorflow.py_func", "tensorflow.logical_and" ] ]
sihaanssr/semVI
[ "3d7a773eb533e2f05249839d3730c8b4cbefca15", "3d7a773eb533e2f05249839d3730c8b4cbefca15" ]
[ "ML/Simple-Naive-Bayes-Weather-Prediction-master/bayes.py", "DWH/Expt10/info_gain_decision_tree.py" ]
[ "from functools import reduce\n\nimport pandas as pd\nimport pprint\n\nclass Classifier():\n data = None\n class_attr = None\n priori = {}\n cp = {}\n hypothesis = None\n\n\n def __init__(self,filename=None, class_attr=None ):\n self.data = pd.read_csv(filename, sep=',', header =(0))\n self.class_attr = class_attr\n\n '''\n probability(class) = How many times it appears in cloumn\n __________________________________________\n count of all class attribute\n '''\n def calculate_priori(self):\n class_values = list(set(self.data[self.class_attr]))\n class_data = list(self.data[self.class_attr])\n for i in class_values:\n self.priori[i] = class_data.count(i)/float(len(class_data))\n print (\"Priori Values: \", self.priori)\n\n '''\n Here we calculate the individual probabilites \n P(outcome|evidence) = P(Likelihood of Evidence) x Prior prob of outcome\n ____________________________________________________\n P(Evidence)\n '''\n def get_cp(self, attr, attr_type, class_value):\n data_attr = list(self.data[attr])\n print(data_attr)\n class_data = list(self.data[self.class_attr])\n print(class_data)\n total =1\n for i in range(0, len(data_attr)):\n if class_data[i] == class_value and data_attr[i] == attr_type:\n total+=1\n return total/float(class_data.count(class_value))\n\n '''\n Here we calculate Likelihood of Evidence and multiple all individual probabilities with priori\n (Outcome|Multiple Evidence) = P(Evidence1|Outcome) x P(Evidence2|outcome) x ... x P(EvidenceN|outcome) x P(Outcome)\n scaled by P(Multiple Evidence)\n '''\n def calculate_conditional_probabilities(self, hypothesis):\n for i in self.priori:\n self.cp[i] = {}\n for j in hypothesis:\n self.cp[i].update({ hypothesis[j]: self.get_cp(j, hypothesis[j], i)})\n print (\"\\n Calculated Conditional Probabilities: \\n \")\n pprint.pprint(self.cp)\n\n def classify(self):\n print (\"Result: \")\n for i in self.cp:\n print (i, \" ==> \", reduce(lambda x, y: x*y, self.cp[i].values())*self.priori[i])\n\nif __name__ == \"__main__\":\n c = Classifier(filename=\"new_dataset.csv\", class_attr=\"Play\" )\n c.calculate_priori()\n c.hypothesis = {\"Outlook\":'Rainy', \"Temp\":\"Mild\", \"Humidity\":'Normal' , \"Windy\":'t'}\n\n c.calculate_conditional_probabilities(c.hypothesis)\n c.classify()\n\n'''\nOUTPUT\n\nPriori Values: {'yes': 0.6428571428571429, 'no': 0.35714285714285715}\n\nCalculated Conditional Probabilities:\n\n{'no': {'Mild': 0.6, 'Normal': 0.4, 'Rainy': 0.8, 't': 0.8},\n 'yes': {'Mild': 0.5555555555555556,\n 'Normal': 0.7777777777777778,\n 'Rainy': 0.3333333333333333,\n 't': 0.4444444444444444}}\nResult:\nyes ==> 0.04115226337448559\nno ==> 0.05485714285714286\n\n'''\n", "# -*- coding: utf-8 -*-\n\"\"\"info-gain-decision-tree.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/1yxruxa0XPFuBsndylsw17NGcivBEtM4k\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\neps = np.finfo(float).eps\nfrom numpy import log2 as log\nimport pprint\n\noutlook = 'overcast,overcast,overcast,overcast,rainy,rainy,rainy,rainy,rainy,sunny,sunny,sunny,sunny,sunny'.split(',')\ntemp = 'hot,cool,mild,hot,mild,cool,cool,mild,mild,hot,hot,mild,cool,mild'.split(',')\nhumidity = 'high,normal,high,normal,high,normal,normal,normal,high,high,high,high,normal,normal'.split(',')\nwindy = 'FALSE,TRUE,TRUE,FALSE,FALSE,FALSE,TRUE,FALSE,TRUE,FALSE,TRUE,FALSE,FALSE,TRUE'.split(',')\nplay = 'yes,yes,yes,yes,yes,yes,no,yes,no,no,no,no,yes,yes'.split(',')\n\ndataset ={'outlook':outlook,'temp':temp,'humidity':humidity,'windy':windy,'play':play}\ndf = pd.DataFrame(dataset,columns=['outlook','temp','humidity','windy','play'])\nprint(df.head())\n\ndef find_entropy(df):\n Class = df.keys()[-1] #To make the code generic, changing target variable class name\n entropy = 0\n values = df[Class].unique()\n for value in values:\n fraction = df[Class].value_counts()[value]/len(df[Class])\n entropy += -fraction*np.log2(fraction)\n return entropy\n \n \ndef find_entropy_attribute(df,attribute):\n Class = df.keys()[-1] #To make the code generic, changing target variable class name\n target_variables = df[Class].unique() #This gives all 'Yes' and 'No'\n variables = df[attribute].unique() #This gives different features in that attribute (like 'Hot','Cold' in Temperature)\n entropy2 = 0\n for variable in variables:\n entropy = 0\n for target_variable in target_variables:\n num = len(df[attribute][df[attribute]==variable][df[Class] ==target_variable])\n den = len(df[attribute][df[attribute]==variable])\n fraction = num/(den+eps)\n entropy += -fraction*log(fraction+eps)\n fraction2 = den/len(df)\n entropy2 += -fraction2*entropy\n return abs(entropy2)\n\n\ndef find_winner(df):\n Entropy_att = []\n IG = []\n for key in df.keys()[:-1]:\n# Entropy_att.append(find_entropy_attribute(df,key))\n IG.append(find_entropy(df)-find_entropy_attribute(df,key))\n return df.keys()[:-1][np.argmax(IG)]\n \n \ndef get_subtable(df, node,value):\n return df[df[node] == value].reset_index(drop=True)\n\n\ndef buildTree(df,tree=None): \n Class = df.keys()[-1] #To make the code generic, changing target variable class name\n \n #Here we build our decision tree\n\n #Get attribute with maximum information gain\n node = find_winner(df)\n \n #Get distinct value of that attribute e.g Salary is node and Low,Med and High are values\n attValue = np.unique(df[node])\n \n #Create an empty dictionary to create tree \n if tree is None: \n tree={}\n tree[node] = {}\n \n #We make loop to construct a tree by calling this function recursively. \n #In this we check if the subset is pure and stops if it is pure. \n\n for value in attValue:\n \n subtable = get_subtable(df,node,value)\n clValue,counts = np.unique(subtable['play'],return_counts=True) \n \n if len(counts)==1:#Checking purity of subset\n tree[node][value] = clValue[0] \n else: \n tree[node][value] = buildTree(subtable) #Calling the function recursively \n \n return tree\n\ntree = buildTree(df)\n\npprint.pprint(tree)" ]
[ [ "pandas.read_csv" ], [ "numpy.log2", "numpy.unique", "pandas.DataFrame", "numpy.finfo", "numpy.argmax" ] ]
UU-tracktech/fast-reid
[ "8e367315fc3b95d326fc37a9bde7b83f90eaf825" ]
[ "fastreid/layers/splat.py" ]
[ "# encoding: utf-8\n\"\"\"\n@author: xingyu liao\n@contact: sherlockliao01@gmail.com\n\"\"\"\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn import Conv2d, ReLU\nfrom torch.nn.modules.utils import _pair\ntry:\n from processor.pipeline.reidentification.fastreid.fastreid.layers import get_norm\nexcept ImportError:\n from fastreid.layers import get_norm\n\n\nclass SplAtConv2d(nn.Module):\n \"\"\"Split-Attention Conv2d\n \"\"\"\n\n def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0),\n dilation=(1, 1), groups=1, bias=True,\n radix=2, reduction_factor=4,\n rectify=False, rectify_avg=False, norm_layer=None,\n dropblock_prob=0.0, **kwargs):\n super(SplAtConv2d, self).__init__()\n padding = _pair(padding)\n self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)\n self.rectify_avg = rectify_avg\n inter_channels = max(in_channels * radix // reduction_factor, 32)\n self.radix = radix\n self.cardinality = groups\n self.channels = channels\n self.dropblock_prob = dropblock_prob\n if self.rectify:\n from rfconv import RFConv2d\n self.conv = RFConv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation,\n groups=groups * radix, bias=bias, average_mode=rectify_avg, **kwargs)\n else:\n self.conv = Conv2d(in_channels, channels * radix, kernel_size, stride, padding, dilation,\n groups=groups * radix, bias=bias, **kwargs)\n self.use_bn = norm_layer is not None\n if self.use_bn:\n self.bn0 = get_norm(norm_layer, channels * radix)\n self.relu = ReLU(inplace=True)\n self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)\n if self.use_bn:\n self.bn1 = get_norm(norm_layer, inter_channels)\n self.fc2 = Conv2d(inter_channels, channels * radix, 1, groups=self.cardinality)\n if dropblock_prob > 0.0:\n self.dropblock = DropBlock2D(dropblock_prob, 3)\n self.rsoftmax = rSoftMax(radix, groups)\n\n def forward(self, x):\n x = self.conv(x)\n if self.use_bn:\n x = self.bn0(x)\n if self.dropblock_prob > 0.0:\n x = self.dropblock(x)\n x = self.relu(x)\n\n batch, rchannel = x.shape[:2]\n if self.radix > 1:\n if torch.__version__ < '1.5':\n splited = torch.split(x, int(rchannel // self.radix), dim=1)\n else:\n splited = torch.split(x, rchannel // self.radix, dim=1)\n gap = sum(splited)\n else:\n gap = x\n gap = F.adaptive_avg_pool2d(gap, 1)\n gap = self.fc1(gap)\n\n if self.use_bn:\n gap = self.bn1(gap)\n gap = self.relu(gap)\n\n atten = self.fc2(gap)\n atten = self.rsoftmax(atten).view(batch, -1, 1, 1)\n\n if self.radix > 1:\n if torch.__version__ < '1.5':\n attens = torch.split(atten, int(rchannel // self.radix), dim=1)\n else:\n attens = torch.split(atten, rchannel // self.radix, dim=1)\n out = sum([att * split for (att, split) in zip(attens, splited)])\n else:\n out = atten * x\n return out.contiguous()\n\n\nclass rSoftMax(nn.Module):\n def __init__(self, radix, cardinality):\n super().__init__()\n self.radix = radix\n self.cardinality = cardinality\n\n def forward(self, x):\n batch = x.size(0)\n if self.radix > 1:\n x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)\n x = F.softmax(x, dim=1)\n x = x.reshape(batch, -1)\n else:\n x = torch.sigmoid(x)\n return x\n\n\nclass DropBlock2D(object):\n def __init__(self, *args, **kwargs):\n raise NotImplementedError\n" ]
[ [ "torch.nn.functional.softmax", "torch.sigmoid", "torch.split", "torch.nn.Conv2d", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.modules.utils._pair", "torch.nn.ReLU" ] ]
LinWeizheDragon/AutoFidgetDetection
[ "8a0d0fcc8938c2c9e97655e999e226c61f414cfe" ]
[ "src/utility/interpolation.py" ]
[ "import numpy as np\nfrom scipy.interpolate import interp1d\n\ndef cubic_interpolate(collection, confidence):\n '''\n Cubic Interpolation\n :param collection: np.array shape(n_frames,)\n :param confidence: np.array shpae(n_frames,)\n :return: interpolated_collection: np.array shape(n_frames,)\n '''\n t_max = collection.shape[0]\n\n processed_collection = []\n time_axis = []\n for x in range(t_max):\n if confidence[x] != 0:\n processed_collection.append(collection[x])\n time_axis.append(x)\n #else:\n # print('missing data point!')\n\n f = interp1d(np.array(time_axis), np.array(processed_collection), kind='cubic', fill_value='extrapolate')\n #plt.plot([t for t in range(t_max)], pose_x_collection[index], 'o', dense_time_axis, f(dense_time_axis), '--')\n #plt.plot(dense_time_axis, f(dense_time_axis))\n #$plt.show()\n #print(f(np.array([t for t in range(t_max)])))\n return f(np.array([t for t in range(t_max)]))\n" ]
[ [ "numpy.array" ] ]
njimenezd/Pandora
[ "9e3c2054415301edac6da7510056af0136790277" ]
[ "pandora/matching_cost/zncc.py" ]
[ "#!/usr/bin/env python\n# coding: utf8\n#\n# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).\n#\n# This file is part of PANDORA\n#\n# https://github.com/CNES/Pandora\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nThis module contains functions associated to ZNCC method used in the cost volume measure step.\n\"\"\"\n\nfrom typing import Dict, Union\n\nimport numpy as np\nimport xarray as xr\nfrom json_checker import Checker, And\n\nfrom pandora.img_tools import shift_right_img, compute_mean_raster, compute_std_raster\nfrom pandora.matching_cost import matching_cost\n\n\n@matching_cost.AbstractMatchingCost.register_subclass(\"zncc\")\nclass Zncc(matching_cost.AbstractMatchingCost):\n \"\"\"\n Zero mean normalized cross correlation\n Zncc class allows to compute the cost volume\n \"\"\"\n\n # Default configuration, do not change these values\n _WINDOW_SIZE = 5\n _SUBPIX = 1\n\n def __init__(self, **cfg: Union[str, int]) -> None:\n \"\"\"\n :param cfg: optional configuration, {'window_size': value, 'subpix': value}\n :type cfg: dictionary\n :return: None\n \"\"\"\n self.cfg = self.check_conf(**cfg)\n self._window_size = self.cfg[\"window_size\"]\n self._subpix = self.cfg[\"subpix\"]\n\n def check_conf(self, **cfg: Union[str, int]) -> Dict[str, Union[str, int]]:\n \"\"\"\n Add default values to the dictionary if there are missing elements and check if the dictionary is correct\n\n :param cfg: matching cost configuration\n :type cfg: dict\n :return cfg: matching cost configuration updated\n :rtype: dict\n \"\"\"\n # Give the default value if the required element is not in the configuration\n if \"window_size\" not in cfg:\n cfg[\"window_size\"] = self._WINDOW_SIZE\n if \"subpix\" not in cfg:\n cfg[\"subpix\"] = self._SUBPIX\n\n schema = {\n \"matching_cost_method\": And(str, lambda input: \"zncc\"),\n \"window_size\": And(int, lambda input: input > 0 and (input % 2) != 0),\n \"subpix\": And(int, lambda input: input > 0 and ((input % 2) == 0) or input == 1),\n }\n\n checker = Checker(schema)\n checker.validate(cfg)\n return cfg\n\n def desc(self) -> None:\n \"\"\"\n Describes the matching cost method\n :return: None\n \"\"\"\n print(\"zncc similarity measure\")\n\n def compute_cost_volume(\n self, img_left: xr.Dataset, img_right: xr.Dataset, disp_min: int, disp_max: int\n ) -> xr.Dataset:\n \"\"\"\n Computes the cost volume for a pair of images\n\n :param img_left: left Dataset image\n :type img_left:\n xarray.Dataset containing :\n - im : 2D (row, col) xarray.DataArray\n - msk : 2D (row, col) xarray.DataArray\n :param img_right: right Dataset image\n :type img_right:\n xarray.Dataset containing :\n - im : 2D (row, col) xarray.DataArray\n - msk : 2D (row, col) xarray.DataArray\n :param disp_min: minimum disparity\n :type disp_min: int\n :param disp_max: maximum disparity\n :type disp_max: int\n :return: the cost volume dataset\n :rtype:\n xarray.Dataset, with the data variables:\n - cost_volume 3D xarray.DataArray (row, col, disp)\n \"\"\"\n # Contains the shifted right images\n img_right_shift = shift_right_img(img_right, self._subpix)\n\n # Computes the standard deviation raster for the whole images\n # The standard deviation raster is truncated for points that are not calculable\n img_left_std = compute_std_raster(img_left, self._window_size)\n img_right_std = []\n for i, img in enumerate(img_right_shift): # pylint: disable=unused-variable\n img_right_std.append(compute_std_raster(img, self._window_size))\n\n # Computes the mean raster for the whole images\n # The standard mean raster is truncated for points that are not calculable\n img_left_mean = compute_mean_raster(img_left, self._window_size)\n img_right_mean = []\n for i, img in enumerate(img_right_shift):\n img_right_mean.append(compute_mean_raster(img, self._window_size))\n\n # Maximal cost of the cost volume with zncc measure\n cmax = 1\n\n # Cost volume metadata\n offset_row_col = int((self._window_size - 1) / 2)\n metadata = {\n \"measure\": \"zncc\",\n \"subpixel\": self._subpix,\n \"offset_row_col\": offset_row_col,\n \"window_size\": self._window_size,\n \"type_measure\": \"max\",\n \"cmax\": cmax,\n }\n\n # Disparity range\n if self._subpix == 1:\n disparity_range = np.arange(disp_min, disp_max + 1)\n else:\n disparity_range = np.arange(disp_min, disp_max, step=1 / float(self._subpix))\n disparity_range = np.append(disparity_range, [disp_max])\n\n # Allocate the numpy cost volume cv = (disp, col, row), for efficient memory management\n cv = np.zeros(\n (len(disparity_range), img_left[\"im\"].shape[1], img_right[\"im\"].shape[0]),\n dtype=np.float32,\n )\n cv += np.nan\n\n # If offset, do not consider border position for cost computation\n if offset_row_col != 0:\n cv_crop = cv[:, offset_row_col:-offset_row_col, offset_row_col:-offset_row_col]\n else:\n cv_crop = cv\n\n # Computes the matching cost\n for disp in disparity_range:\n i_right = int((disp % 1) * self._subpix)\n point_p, point_q = self.point_interval(img_left, img_right_shift[i_right], disp)\n dsp = int((disp - disp_min) * self._subpix)\n\n # Point interval in the left standard deviation image\n # - (win_radius * 2) because img_std is truncated for points that are not calculable\n p_std = (point_p[0], point_p[1] - (int(self._window_size / 2) * 2))\n # Point interval in the right standard deviation image\n q_std = (point_q[0], point_q[1] - (int(self._window_size / 2) * 2))\n\n # Compute the normalized summation of the product of intensities\n zncc_ = (\n img_left[\"im\"].data[:, point_p[0] : point_p[1]]\n * img_right_shift[i_right][\"im\"].data[:, point_q[0] : point_q[1]]\n )\n zncc_ = xr.Dataset(\n {\"im\": ([\"row\", \"col\"], zncc_)},\n coords={\n \"row\": np.arange(zncc_.shape[0]),\n \"col\": np.arange(zncc_.shape[1]),\n },\n )\n zncc_ = compute_mean_raster(zncc_, self._window_size)\n # Subtracting the local mean value of intensities\n zncc_ -= img_left_mean[:, p_std[0] : p_std[1]] * img_right_mean[i_right][:, q_std[0] : q_std[1]]\n\n # Divide by the standard deviation of the intensities of the images :\n # If the standard deviation of the intensities of the images is greater than 0\n divide_standard = np.multiply(\n img_left_std[:, p_std[0] : p_std[1]],\n img_right_std[i_right][:, q_std[0] : q_std[1]],\n )\n valid = np.where(divide_standard > 0)\n zncc_[valid] /= divide_standard[valid]\n\n # Otherwise zncc is equal to 0\n zncc_[np.where(divide_standard <= 0)] = 0\n\n # Places the result in the cost_volume\n cv_crop[dsp, point_p[0] : p_std[1], :] = np.swapaxes(zncc_, 0, 1)\n\n # Create the xarray.DataSet that will contain the cost_volume of dimensions (row, col, disp)\n cv = self.allocate_costvolume(\n img_left,\n self._subpix,\n disp_min,\n disp_max,\n self._window_size,\n metadata,\n np.swapaxes(cv, 0, 2),\n )\n\n return cv\n" ]
[ [ "numpy.swapaxes", "numpy.multiply", "numpy.arange", "numpy.append", "numpy.where" ] ]
etri/dtsim
[ "927c8e05c08c74ed376ec233ff677cd35b29e6f0" ]
[ "util/Visualizer/animateSingle.py" ]
[ "#!/usr/bin/python3.6\n\nimport shapefile\nimport matplotlib.pyplot as plt\nimport matplotlib.animation\nimport numpy as np\nimport configparser\nimport random\nimport time \n\n######################################\n# STEP1 : load config parameter\n######################################\nconfig = configparser.ConfigParser()\nconfig.read('animate.conf')\n\nopt_xinch = float(config.get('PARAMETER', 'screen.xwidth.inch'))\nopt_shape_path = config.get('PARAMETER', 'shape.file.path')\nopt_road_enable = int(config.get('PARAMETER', 'shape.road.enable'))\nopt_area_enable = int(config.get('PARAMETER', 'shape.area.enable'))\nopt_building_enable = int(config.get('PARAMETER', 'shape.building.enable'))\nopt_human_path = config.get('PARAMETER', 'file.path.human')\nopt_human_size = int(config.get('PARAMETER', 'human.icon.size'))\nopt_human_color = config.get('PARAMETER', 'human.icon.color')\nopt_human_rate = float(config.get('PARAMETER', 'human.sampling.rate'))\nopt_bus_path = config.get('PARAMETER', 'file.path.bus')\nopt_bus_size = int(config.get('PARAMETER', 'bus.icon.size'))\nopt_bus_color = config.get('PARAMETER', 'bus.icon.color')\n\n######################################\n# STEP2 : set global variables\n######################################\n# declare geometry for Sejong city\nxmin = 127.127729912708\nxmax = 127.410803835874\nymin = 36.3972620634463\nymax = 36.7338218767278\n\n# list for containing all agent's location within a step\nSTEPS = \"\"\nX_HUMAN = []\nY_HUMAN = []\n\nEND = False\n\n# canvas size in proportion to the real geometry\nx_inch = opt_xinch\ny_inch = x_inch * ((ymax-ymin) / (xmax-xmin))\n\n######################################\n# STEP3 : create matplot object \n######################################\nfig = plt.figure()\nfig.set_size_inches(x_inch, y_inch)\n\nax = fig.add_subplot(111)\n\nplt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)\nplt.xlim([xmin, xmax])\nplt.ylim([ymin, ymax])\n\nannotation = plt.annotate('', xy=(0, 0), xytext=(0, 0))\n\n######################################\n# STEP4 : read shape object \n######################################\nif opt_road_enable == 1:\n sf = shapefile.Reader(opt_shape_path + \"/ROAD\")\n for shape in sf.shapes():\n ap = plt.Polygon(shape.points, closed=False, fill=False, alpha=0.3, linewidth=0.5, edgecolor=\"k\")\n ax.add_patch(ap)\n\nif opt_area_enable == 1 :\n sf_area = shapefile.Reader(opt_shape_path + \"/areas\")\n for shape in sf_area.shapes():\n ap = plt.Polygon(shape.points, fill=False, alpha=0.5, linewidth=0.5, edgecolor=\"k\")\n ax.add_patch(ap)\n\nif opt_building_enable == 1 :\n sf_building = shapefile.Reader(opt_shape_path + \"/buildings\")\n for shape in sf_building.shapes():\n ap = plt.Polygon(shape.points, fill=True, alpha=0.5, linewidth=0.7, edgecolor=\"k\")\n ax.add_patch(ap)\n\n######################################\n# STEP5 : load routing information \n######################################\nfile_human = open(opt_human_path, 'rt')\n\ndef loadHumanMovement():\n global STEPS\n global END\n\n while True:\n line = file_human.readline()\n if line == \"\":\n END = True;\n break\n\n if line.startswith('STEP'):\n STEPS = line\n break\n\n # filtering agent by probability\n if opt_human_rate != 1.0 :\n if random.random() > opt_human_rate :\n continue;\n\n ar = line.strip('\\n').split(',')\n xval = float(ar[0])\n yval = float(ar[1])\n X_HUMAN.append(xval)\n Y_HUMAN.append(yval)\n\n######################################\n# STEP6 : animate agent movement \n######################################\ndef animate(i):\n global annotation \n\n # clear list \n X_HUMAN.clear()\n Y_HUMAN.clear()\n\n # load agents location in a step\n loadHumanMovement()\n\n if END == True:\n time.sleep(1)\n return\n\n # clear canvas\n ax = plt.gca()\n del ax.collections[:]\n annotation.remove()\n\n # print agents location \n plt.scatter(X_HUMAN, Y_HUMAN, s=opt_human_size, c=opt_human_color)\n\n annotation = plt.annotate(STEPS, xy=(127.35, 36.70), xytext=(127.35, 36.70))\n\n############### START ###############\n\n# set animation loop\nani = matplotlib.animation.FuncAnimation(fig, animate, frames=1440, interval=100, repeat=True)\n\n# draw pyplot\nplt.show()\n\n# close data file\nfile_human.close();\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylim", "matplotlib.pyplot.annotate", "matplotlib.pyplot.Polygon", "matplotlib.pyplot.xlim", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
mbaak/Eskapade
[ "00c8f6ca52eb5b738b4268257e277dab71b804cb" ]
[ "python/eskapade/analysis/links/df_concatenator.py" ]
[ "\"\"\"Project: Eskapade - A python-based package for data analysis.\n\nClass: DataFrameColumnRenamer\n\nCreated: 2016/11/08\n\nDescription:\n Algorithm to concatenate multiple pandas datadrames\n\nAuthors:\n KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted according to the terms listed in the file\nLICENSE.\n\"\"\"\n\nimport copy\n\nimport pandas as pd\n\nfrom eskapade import DataStore\nfrom eskapade import Link\nfrom eskapade import StatusCode\nfrom eskapade import process_manager\n\n\nclass DfConcatenator(Link):\n \"\"\"Concatenates multiple pandas datadrames.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize link instance.\n\n :param str name: name of link\n :param str store_key: key of data to store in data store\n :param list read_keys: keys of pandas dataframes in the data store\n :param bool ignore_missing_input: Skip missing input datasets.\n If all missing, store empty dataset. Default is false.\n :param kwargs: all other key word arguments are passed on to pandas concat function.\n \"\"\"\n Link.__init__(self, kwargs.pop('name', 'DfConcatenator'))\n\n # process and register all relevant kwargs. kwargs are added as attributes of the link.\n # second arg is default value for an attribute. key is popped from kwargs.\n self._process_kwargs(kwargs, read_keys=[])\n self._process_kwargs(kwargs, store_key=None)\n self._process_kwargs(kwargs, ignore_missing_input=False)\n\n # pass on remaining kwargs to pandas reader\n self.kwargs = copy.deepcopy(kwargs)\n\n def initialize(self):\n \"\"\"Initialize the link.\"\"\"\n assert self.read_keys, 'read_keys have not been set.'\n assert isinstance(self.store_key, str) and self.store_key, 'storage key not set.'\n\n self.logger.info('kwargs passed on to pandas concat function are: {kwargs}', kwargs=self.kwargs)\n\n return StatusCode.Success\n\n def execute(self):\n \"\"\"Execute the link.\n\n Perform concatenation of multiple pandas datadrames.\n \"\"\"\n ds = process_manager.service(DataStore)\n\n # check if all input dataframes exist. if so configured, skip missing inputs, else raise e.\n data = []\n for c in self.read_keys:\n if c not in ds:\n if self.ignore_missing_input:\n self.logger.warning(\"<{key}> is not a key in the datastore. Configured to skip it.\", key=c)\n continue\n raise Exception(\"<{}> is not a key in the datastore.\".format(c))\n data.append(ds[c])\n\n # concatenate the dataframes\n if len(data):\n df = pd.concat(data, **self.kwargs).reset_index(drop=True)\n elif self.ignore_missing_input:\n self.logger.warning(\"Nothing to concatenate. Configured to return empty dataframe.\")\n df = pd.DataFrame()\n else:\n raise Exception(\"Nothing to concatenate.\")\n\n # store the result\n ds[self.store_key] = df\n ds['n_' + self.store_key] = len(df.index)\n\n return StatusCode.Success\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
CIFASIS/splitting_gan
[ "292fac0ea587e67c095a5ffa8f6ca7575afb1ce1" ]
[ "tflib/save_images.py" ]
[ "\"\"\"\nImage grid saver, based on color_grid_vis from github.com/Newmu\n\"\"\"\n\nimport numpy as np\nimport scipy.misc\nfrom scipy.misc import imsave\n\ndef save_images(X, save_path, mod=None):\n # [0, 1] -> [0,255]\n if isinstance(X.flatten()[0], np.floating):\n X = (255.99*X).astype('uint8')\n\n n_samples = X.shape[0]\n rows = int(np.sqrt(n_samples))\n if mod is not None:\n cols = n_samples/rows\n if cols % mod != 0:\n diff = mod - cols % mod\n newcols = cols + diff\n rows = n_samples/newcols\n\n while n_samples % rows != 0:\n rows -= 1\n\n nh, nw = rows, n_samples/rows\n\n if X.ndim == 2:\n X = np.reshape(X, (X.shape[0], int(np.sqrt(X.shape[1])), int(np.sqrt(X.shape[1]))))\n\n if X.ndim == 4:\n # BCHW -> BHWC\n X = X.transpose(0,2,3,1)\n h, w = X[0].shape[:2]\n img = np.zeros((h*nh, w*nw, 3))\n elif X.ndim == 3:\n h, w = X[0].shape[:2]\n img = np.zeros((h*nh, w*nw))\n\n for n, x in enumerate(X):\n j = n/nw\n i = n%nw\n img[j*h:j*h+h, i*w:i*w+w] = x\n\n imsave(save_path, img)" ]
[ [ "scipy.misc.imsave", "numpy.zeros", "numpy.sqrt" ] ]
TimHuynh0905/final_project
[ "50cde6b425b54b5a2a9cd63123af220c2240bb8d" ]
[ "exp/knn_dist_diff.py" ]
[ "\"\"\"\nExperiment summary\n------------------\nTreat each province/state in a country cases over time\nas a vector, do a simple K-Nearest Neighbor between \ncountries. Take the difference between cases. Get\nthe distribution of this data (to make it time-invariant).\nUse the distribution as the feature vector.\n\"\"\"\n\nimport sys, os\nsys.path.insert(0, os.getcwd())\n\nfrom utils import data\nimport sklearn\nimport numpy as np\nfrom sklearn.neighbors import (\n KNeighborsClassifier,\n DistanceMetric\n)\nimport json\n\n# ------------ HYPERPARAMETERS -------------\nBASE_PATH = f\"{os.getcwd()}/COVID-19/csse_covid_19_data/\"\nN_NEIGHBORS = 5\nMIN_CASES = 1000\nN_BINS = 20\nNORMALIZE = True\n# ------------------------------------------\n\nconfirmed = os.path.join(\n BASE_PATH, \n 'csse_covid_19_time_series',\n 'time_series_covid19_confirmed_global.csv')\nconfirmed = data.load_csv_data(confirmed)\nfeatures = []\ntargets = []\n\nfor val in np.unique(confirmed[\"Country/Region\"]):\n df = data.filter_by_attribute(\n confirmed, \"Country/Region\", val)\n cases, labels = data.get_cases_chronologically(df)\n features.append(cases)\n targets.append(labels)\n\nfeatures = np.concatenate(features, axis=0)\ntargets = np.concatenate(targets, axis=0)\npredictions = {}\n\nfor _dist in ['minkowski', 'manhattan']:\n for val in np.unique(confirmed[\"Country/Region\"]):\n # test data\n df = data.filter_by_attribute(\n confirmed, \"Country/Region\", val)\n cases, labels = data.get_cases_chronologically(df)\n\n # filter the rest of the data to get rid of the country we are\n # trying to predict\n mask = targets[:, 1] != val\n tr_features = features[mask]\n tr_targets = targets[mask][:, 1]\n\n above_min_cases = tr_features.sum(axis=-1) > MIN_CASES\n tr_features = np.diff(tr_features[above_min_cases], axis=-1)\n \n if NORMALIZE:\n tr_features = tr_features / tr_features.sum(axis=-1, keepdims=True)\n tr_features = np.apply_along_axis(\n lambda a: np.histogram(a, bins=N_BINS)[0], -1, tr_features)\n \n tr_targets = tr_targets[above_min_cases]\n\n # train knn\n knn = KNeighborsClassifier(n_neighbors=N_NEIGHBORS, metric=_dist)\n knn.fit(tr_features, tr_targets)\n\n # predict\n cases = cases.sum(axis=0, keepdims=True)\n cases = np.apply_along_axis(\n lambda a: np.histogram(a, bins=N_BINS)[0], -1, cases)\n # nearest country to this one based on trajectory\n label = knn.predict(cases)\n \n if val not in predictions:\n predictions[val] = {}\n predictions[val][_dist] = label.tolist()\n\nwith open('exp/results/knn_dist_diff.json', 'w') as f:\n json.dump(predictions, f, indent=4)\n" ]
[ [ "numpy.unique", "sklearn.neighbors.KNeighborsClassifier", "numpy.concatenate", "numpy.diff", "numpy.histogram" ] ]
nntrongnghia/thanos_project
[ "6db56bc232541d2c857ace0cd0d4681d53290422" ]
[ "thanos/dataset/target_transform.py" ]
[ "import torch\nimport torch.nn.functional as F\n\ndef binary_label_transform(target_dict, **kwargs):\n label = target_dict[\"label\"]\n if label != 0:\n label = 1\n return label\n\ndef read_label_from_target_dict(target_dict, **kwargs):\n return target_dict[\"label\"]\n\ndef one_hot_label_transform(target_dict, num_classes):\n label = target_dict[\"label\"]\n return F.one_hot(torch.tensor([label]), num_classes=num_classes).reshape(-1,)" ]
[ [ "torch.tensor" ] ]
drjod/tespy
[ "b90508178ff3527665781b99d20d3dddd8cb23a2", "b90508178ff3527665781b99d20d3dddd8cb23a2" ]
[ "src/tespy/components/heat_exchangers/condenser.py", "src/tespy/components/basics/cycle_closer.py" ]
[ "# -*- coding: utf-8\n\n\"\"\"Module of class Condenser.\n\n\nThis file is part of project TESPy (github.com/oemof/tespy). It's copyrighted\nby the contributors recorded in the version control history of the file,\navailable from its original location\ntespy/components/heat_exchangers/condenser.py\n\nSPDX-License-Identifier: MIT\n\"\"\"\n\nimport numpy as np\n\nfrom tespy.components.component import Component\nfrom tespy.components.heat_exchangers.heat_exchanger import HeatExchanger\nfrom tespy.tools.data_containers import ComponentCharacteristics as dc_cc\nfrom tespy.tools.data_containers import ComponentProperties as dc_cp\nfrom tespy.tools.data_containers import DataContainerSimple as dc_simple\nfrom tespy.tools.data_containers import GroupedComponentCharacteristics as dc_gcc\nfrom tespy.tools.document_models import generate_latex_eq\nfrom tespy.tools.fluid_properties import T_bp_p\nfrom tespy.tools.fluid_properties import T_mix_ph\nfrom tespy.tools.fluid_properties import dh_mix_dpQ\nfrom tespy.tools.fluid_properties import h_mix_pQ\n\n\nclass Condenser(HeatExchanger):\n r\"\"\"\n A Condenser cools a fluid until it is in liquid state.\n\n The condensing fluid is cooled by the cold side fluid. The fluid on the hot\n side of the condenser must be pure. Subcooling is available.\n\n **Mandatory Equations**\n\n - :py:meth:`tespy.components.component.Component.fluid_func`\n - :py:meth:`tespy.components.component.Component.mass_flow_func`\n - :py:meth:`tespy.components.heat_exchangers.heat_exchanger.HeatExchanger.energy_balance_func`\n - condensate outlet state, function can be disabled by specifying\n :code:`set_attr(subcooling=True)`\n :py:meth:`tespy.components.heat_exchangers.condenser.Condenser.subcooling_func`\n\n **Optional Equations**\n\n - :py:meth:`tespy.components.heat_exchangers.heat_exchanger.HeatExchanger.energy_balance_hot_func`\n - :py:meth:`tespy.components.heat_exchangers.condenser.Condenser.kA_func`\n - :py:meth:`tespy.components.heat_exchangers.condenser.Condenser.kA_char_func`\n - :py:meth:`tespy.components.heat_exchangers.condenser.Condenser.ttd_u_func`\n - :py:meth:`tespy.components.heat_exchangers.heat_exchanger.HeatExchanger.ttd_l_func`\n - hot side :py:meth:`tespy.components.component.Component.pr_func`\n - cold side :py:meth:`tespy.components.component.Component.pr_func`\n - hot side :py:meth:`tespy.components.component.Component.zeta_func`\n - cold side :py:meth:`tespy.components.component.Component.zeta_func`\n\n Inlets/Outlets\n\n - in1, in2 (index 1: hot side, index 2: cold side)\n - out1, out2 (index 1: hot side, index 2: cold side)\n\n Image\n\n .. image:: _images/Condenser.svg\n :alt: alternative text\n :align: center\n\n Parameters\n ----------\n label : str\n The label of the component.\n\n design : list\n List containing design parameters (stated as String).\n\n offdesign : list\n List containing offdesign parameters (stated as String).\n\n design_path : str\n Path to the components design case.\n\n local_offdesign : boolean\n Treat this component in offdesign mode in a design calculation.\n\n local_design : boolean\n Treat this component in design mode in an offdesign calculation.\n\n char_warnings : boolean\n Ignore warnings on default characteristics usage for this component.\n\n printout : boolean\n Include this component in the network's results printout.\n\n Q : float, dict\n Heat transfer, :math:`Q/\\text{W}`.\n\n pr1 : float, dict, :code:`\"var\"`\n Outlet to inlet pressure ratio at hot side, :math:`pr/1`.\n\n pr2 : float, dict, :code:`\"var\"`\n Outlet to inlet pressure ratio at cold side, :math:`pr/1`.\n\n zeta1 : float, dict, :code:`\"var\"`\n Geometry independent friction coefficient at hot side,\n :math:`\\frac{\\zeta}{D^4}/\\frac{1}{\\text{m}^4}`.\n\n zeta2 : float, dict, :code:`\"var\"`\n Geometry independent friction coefficient at cold side,\n :math:`\\frac{\\zeta}{D^4}/\\frac{1}{\\text{m}^4}`.\n\n ttd_l : float, dict\n Lower terminal temperature difference :math:`ttd_\\mathrm{l}/\\text{K}`.\n\n ttd_u : float, dict\n Upper terminal temperature difference (referring to saturation\n temprature of condensing fluid) :math:`ttd_\\mathrm{u}/\\text{K}`.\n\n kA : float, dict\n Area independent heat transfer coefficient,\n :math:`kA/\\frac{\\text{W}}{\\text{K}}`.\n\n kA_char : tespy.tools.data_containers.DataContainerSimple\n Area independent heat transfer coefficient characteristic.\n\n kA_char1 : tespy.tools.characteristics.CharLine, dict\n Characteristic line for hot side heat transfer coefficient.\n\n kA_char2 : tespy.tools.characteristics.CharLine, dict\n Characteristic line for cold side heat transfer coefficient.\n\n subcooling : boolean\n Enable/disable subcooling, default value: disabled.\n\n Note\n ----\n The condenser has an additional equation for enthalpy at hot side outlet:\n The fluid leaves the component in saturated liquid state. If subcooling\n is activated, it possible to specify the enthalpy at the outgoing\n connection manually.\n\n It has different calculation method for given heat transfer coefficient and\n upper terminal temperature dierence: These parameters refer to the\n **condensing** temperature, even if the fluid on the hot side enters the\n component in superheated state.\n\n Example\n -------\n Air is used to condensate water in a condenser. 1 kg/s waste steam is\n chilled with a terminal temperature difference of 15 K.\n\n >>> from tespy.components import Sink, Source, Condenser\n >>> from tespy.connections import Connection\n >>> from tespy.networks import Network\n >>> from tespy.tools.fluid_properties import T_bp_p\n >>> import shutil\n >>> nw = Network(fluids=['water', 'air'], T_unit='C', p_unit='bar',\n ... h_unit='kJ / kg', m_range=[0.01, 1000], iterinfo=False)\n >>> amb_in = Sink('ambient air inlet')\n >>> amb_out = Source('air outlet')\n >>> waste_steam = Source('waste steam')\n >>> c = Sink('condensate sink')\n >>> cond = Condenser('condenser')\n >>> cond.component()\n 'condenser'\n >>> amb_he = Connection(amb_out, 'out1', cond, 'in2')\n >>> he_amb = Connection(cond, 'out2', amb_in, 'in1')\n >>> ws_he = Connection(waste_steam, 'out1', cond, 'in1')\n >>> he_c = Connection(cond, 'out1', c, 'in1')\n >>> nw.add_conns(amb_he, he_amb, ws_he, he_c)\n\n The air flow can not be controlled, thus is constant in offdesign\n operation. If the waste steam mass flow or the ambient air temperature\n change, the outlet temperature of the air will change, too.\n\n >>> cond.set_attr(pr1=0.98, pr2=0.999, ttd_u=15, design=['pr2', 'ttd_u'],\n ... offdesign=['zeta2', 'kA_char'])\n >>> ws_he.set_attr(fluid={'water': 1, 'air': 0}, h=2700, m=1)\n >>> amb_he.set_attr(fluid={'water': 0, 'air': 1}, T=20, offdesign=['v'])\n >>> he_amb.set_attr(p=1, T=40, design=['T'])\n >>> nw.solve('design')\n >>> nw.save('tmp')\n >>> round(amb_he.v.val, 2)\n 103.17\n >>> round(ws_he.T.val - he_amb.T.val, 1)\n 66.9\n >>> round(T_bp_p(ws_he.get_flow()) - 273.15 - he_amb.T.val, 1)\n 15.0\n >>> ws_he.set_attr(m=0.7)\n >>> amb_he.set_attr(T=30)\n >>> nw.solve('offdesign', design_path='tmp')\n >>> round(ws_he.T.val - he_amb.T.val, 1)\n 62.5\n >>> round(T_bp_p(ws_he.get_flow()) - 273.15 - he_amb.T.val, 1)\n 11.3\n\n It is possible to activate subcooling. The difference to boiling point\n temperature is specified to 5 K.\n\n >>> cond.set_attr(subcooling=True)\n >>> he_c.set_attr(Td_bp=-5)\n >>> nw.solve('offdesign', design_path='tmp')\n >>> round(ws_he.T.val - he_amb.T.val, 1)\n 62.5\n >>> round(T_bp_p(ws_he.get_flow()) - 273.15 - he_amb.T.val, 1)\n 13.4\n >>> shutil.rmtree('./tmp', ignore_errors=True)\n \"\"\"\n\n @staticmethod\n def component():\n return 'condenser'\n\n def get_variables(self):\n return {\n 'Q': dc_cp(\n max_val=0, func=self.energy_balance_hot_func, num_eq=1,\n deriv=self.energy_balance_hot_deriv,\n latex=self.energy_balance_hot_func_doc),\n 'kA': dc_cp(\n min_val=0, num_eq=1, func=self.kA_func, latex=self.kA_func_doc,\n deriv=self.kA_deriv),\n 'td_log': dc_cp(min_val=0, func='dummy'),\n 'ttd_u': dc_cp(\n min_val=0, num_eq=1, func=self.ttd_u_func,\n deriv=self.ttd_u_deriv, latex=self.ttd_u_func_doc),\n 'ttd_l': dc_cp(\n min_val=0, num_eq=1, func=self.ttd_l_func,\n deriv=self.ttd_l_deriv, latex=self.ttd_l_func_doc),\n 'pr1': dc_cp(\n min_val=1e-4, max_val=1, num_eq=1, deriv=self.pr_deriv,\n latex=self.pr_func_doc,\n func=self.pr_func, func_params={'pr': 'pr1'}),\n 'pr2': dc_cp(\n min_val=1e-4, max_val=1, num_eq=1, latex=self.pr_func_doc,\n deriv=self.pr_deriv, func=self.pr_func,\n func_params={'pr': 'pr2', 'inconn': 1, 'outconn': 1}),\n 'zeta1': dc_cp(\n min_val=0, max_val=1e15, num_eq=1, latex=self.zeta_func_doc,\n deriv=self.zeta_deriv, func=self.zeta_func,\n func_params={'zeta': 'zeta1'}),\n 'zeta2': dc_cp(\n min_val=0, max_val=1e15, num_eq=1, latex=self.zeta_func_doc,\n deriv=self.zeta_deriv, func=self.zeta_func,\n func_params={'zeta': 'zeta2', 'inconn': 1, 'outconn': 1}),\n 'kA_char': dc_gcc(\n elements=['kA_char1', 'kA_char2'],\n num_eq=1, latex=self.kA_char_func_doc, func=self.kA_char_func,\n deriv=self.kA_char_deriv),\n 'kA_char1': dc_cc(param='m'),\n 'kA_char2': dc_cc(\n param='m', char_params={\n 'type': 'rel', 'inconn': 1, 'outconn': 1}),\n 'subcooling': dc_simple(\n val=False, num_eq=1, latex=self.subcooling_func_doc,\n deriv=self.subcooling_deriv, func=self.subcooling_func)\n }\n\n def comp_init(self, nw):\n\n # if subcooling is True, outlet state method must not be calculated\n self.subcooling.is_set = not self.subcooling.val\n Component.comp_init(self, nw)\n\n def subcooling_func(self):\n r\"\"\"\n Equation for hot side outlet state.\n\n Returns\n -------\n residual : float\n Residual value of equation.\n\n .. math::\n\n 0=h_{out,1} -h\\left(p_{out,1}, x=0 \\right)\n\n Note\n ----\n This equation is applied in case subcooling is False!\n \"\"\"\n return self.outl[0].h.val_SI - h_mix_pQ(self.outl[0].get_flow(), 0)\n\n def subcooling_func_doc(self, label):\n r\"\"\"\n Equation for hot side outlet state.\n\n Parameters\n ----------\n label : str\n Label for equation.\n\n Returns\n -------\n latex : str\n LaTeX code of equations applied.\n \"\"\"\n latex = r'0=h_\\mathrm{out,1} -h\\left(p_\\mathrm{out,1}, x=0 \\right)'\n return generate_latex_eq(self, latex, label)\n\n def subcooling_deriv(self, increment_filter, k):\n \"\"\"\n Calculate partial derivates of subcooling function.\n\n Parameters\n ----------\n increment_filter : ndarray\n Matrix for filtering non-changing variables.\n\n k : int\n Position of derivatives in Jacobian matrix (k-th equation).\n \"\"\"\n self.jacobian[k, 2, 1] = -dh_mix_dpQ(self.outl[0].get_flow(), 0)\n self.jacobian[k, 2, 2] = 1\n\n def kA_func(self):\n r\"\"\"\n Calculate heat transfer from heat transfer coefficient.\n\n Returns\n -------\n residual : float\n Residual value of equation.\n\n .. math::\n\n 0 = \\dot{m}_{in,1} \\cdot \\left( h_{out,1} - h_{in,1}\\right) +\n kA \\cdot \\frac{T_{out,1} -\n T_{in,2} - T_{sat} \\left(p_{in,1}\\right) + T_{out,2}}\n {\\ln{\\frac{T_{out,1} - T_{in,2}}\n {T_{sat} \\left(p_{in,1}\\right) - T_{out,2}}}}\n \"\"\"\n i1 = self.inl[0]\n i2 = self.inl[1]\n o1 = self.outl[0]\n o2 = self.outl[1]\n\n T_i1 = T_bp_p(i1.get_flow())\n T_i2 = T_mix_ph(i2.get_flow(), T0=i2.T.val_SI)\n T_o1 = T_mix_ph(o1.get_flow(), T0=o1.T.val_SI)\n T_o2 = T_mix_ph(o2.get_flow(), T0=o2.T.val_SI)\n\n if T_i1 <= T_o2 and not i1.T.val_set:\n T_i1 = T_o2 + 0.5\n if T_i1 <= T_o2 and not o2.T.val_set:\n T_o2 = T_i1 - 0.5\n if T_o1 <= T_i2 and not o1.T.val_set:\n T_o1 = T_i2 + 1\n if T_o1 <= T_i2 and not i2.T.val_set:\n T_i2 = T_o1 - 1\n\n td_log = ((T_o1 - T_i2 - T_i1 + T_o2) /\n np.log((T_o1 - T_i2) / (T_i1 - T_o2)))\n\n return i1.m.val_SI * (o1.h.val_SI - i1.h.val_SI) + self.kA.val * td_log\n\n def kA_func_doc(self, label):\n r\"\"\"\n Calculate heat transfer from heat transfer coefficient.\n\n Parameters\n ----------\n label : str\n Label for equation.\n\n Returns\n -------\n latex : str\n LaTeX code of equations applied.\n \"\"\"\n latex = (\n r'0 = \\dot{m}_\\mathrm{in,1} \\cdot \\left( h_\\mathrm{out,1} - '\n r'h_\\mathrm{in,1}\\right)+ kA \\cdot \\frac{T_\\mathrm{out,1} - '\n r'T_\\mathrm{in,2} -T_\\mathrm{sat}\\left( p_\\mathrm{in,1}\\right)'\n r'+ T_\\mathrm{out,2}}'\n r'{\\ln{\\frac{T_\\mathrm{out,1} - T_\\mathrm{in,2}}'\n r'{T_\\mathrm{sat}\\left( p_\\mathrm{in,1}\\right) -'\n r'T_\\mathrm{out,2}}}}'\n )\n return generate_latex_eq(self, latex, label)\n\n def kA_char_func(self):\n r\"\"\"\n Calculate heat transfer from heat transfer coefficient characteristic.\n\n Returns\n -------\n residual : float\n Residual value of equation.\n\n .. math::\n\n 0 = \\dot{m}_{in,1} \\cdot \\left( h_{out,1} - h_{in,1}\\right) +\n kA_{design} \\cdot f_{kA} \\cdot \\frac{T_{out,1} -\n T_{in,2} - T_{sat} \\left(p_{in,1}\\right) + T_{out,2}}\n {\\ln{\\frac{T_{out,1} - T_{in,2}}\n {T_{sat} \\left(p_{in,1}\\right) - T_{out,2}}}}\n\n f_{kA} = \\frac{2}{\\frac{1}{f_1 \\left( expr_1\\right)} +\n \\frac{1}{f_2 \\left( expr_2\\right)}}\n\n Note\n ----\n For standard functions f\\ :subscript:`1` \\ and f\\ :subscript:`2` \\ see\n module :py:mod:`tespy.data`.\n \"\"\"\n p1 = self.kA_char1.param\n p2 = self.kA_char2.param\n f1 = self.get_char_expr(p1, **self.kA_char1.char_params)\n f2 = self.get_char_expr(p2, **self.kA_char2.char_params)\n\n i1 = self.inl[0]\n i2 = self.inl[1]\n o1 = self.outl[0]\n o2 = self.outl[1]\n\n # temperature value manipulation for convergence stability\n T_i1 = T_bp_p(i1.get_flow())\n T_i2 = T_mix_ph(i2.get_flow(), T0=i2.T.val_SI)\n T_o1 = T_mix_ph(o1.get_flow(), T0=o1.T.val_SI)\n T_o2 = T_mix_ph(o2.get_flow(), T0=o2.T.val_SI)\n\n if T_i1 <= T_o2 and not i1.T.val_set:\n T_i1 = T_o2 + 0.5\n if T_i1 <= T_o2 and not o2.T.val_set:\n T_o2 = T_i1 - 0.5\n if T_o1 <= T_i2 and not o1.T.val_set:\n T_o1 = T_i2 + 1\n if T_o1 <= T_i2 and not i2.T.val_set:\n T_i2 = T_o1 - 1\n\n td_log = ((T_o1 - T_i2 - T_i1 + T_o2) /\n np.log((T_o1 - T_i2) / (T_i1 - T_o2)))\n\n fkA1 = self.kA_char1.char_func.evaluate(f1)\n fkA2 = self.kA_char2.char_func.evaluate(f2)\n fkA = 2 / (1 / fkA1 + 1 / fkA2)\n\n return (\n i1.m.val_SI * (o1.h.val_SI - i1.h.val_SI) +\n self.kA.design * fkA * td_log)\n\n def kA_char_func_doc(self, label):\n r\"\"\"\n Calculate heat transfer from heat transfer coefficient characteristic.\n\n Parameters\n ----------\n label : str\n Label for equation.\n\n Returns\n -------\n latex : str\n LaTeX code of equations applied.\n \"\"\"\n latex = (\n r'\\begin{split}' + '\\n'\n r'0 = & \\dot{m}_\\mathrm{in,1} \\cdot \\left( h_\\mathrm{out,1} - '\n r'h_\\mathrm{in,1}\\right)\\\\' + '\\n'\n r'&+kA_\\mathrm{design} \\cdot '\n r'f_\\mathrm{kA} \\cdot \\frac{T_\\mathrm{out,1} - T_\\mathrm{in,2}'\n r' - T_\\mathrm{sat}\\left( p_\\mathrm{in,1}\\right) +'\n r'T_\\mathrm{out,2}}{\\ln{\\frac{T_\\mathrm{out,1}-'\n r'T_\\mathrm{in,2}}{T_\\mathrm{sat}\\left( p_\\mathrm{in,1}\\right)'\n r'- T_\\mathrm{out,2}}}}\\\\' + '\\n'\n r'f_\\mathrm{kA}=&\\frac{2}{\\frac{1}{f\\left(X_1\\right)}+'\n r'\\frac{1}{f\\left(X_2\\right)}}\\\\' + '\\n'\n r'\\end{split}'\n )\n return generate_latex_eq(self, latex, label)\n\n def ttd_u_func(self):\n r\"\"\"\n Equation for upper terminal temperature difference.\n\n Returns\n -------\n residual : float\n Residual value of equation.\n\n .. math::\n\n 0 = ttd_{u} - T_{sat} \\left(p_{in,1}\\right) + T_{out,2}\n\n Note\n ----\n The upper terminal temperature difference ttd_u refers to boiling\n temperature at hot side inlet.\n \"\"\"\n T_i1 = T_bp_p(self.inl[0].get_flow())\n T_o2 = T_mix_ph(self.outl[1].get_flow(), T0=self.outl[1].T.val_SI)\n return self.ttd_u.val - T_i1 + T_o2\n\n def ttd_u_func_doc(self, label):\n r\"\"\"\n Equation for upper terminal temperature difference.\n\n Parameters\n ----------\n label : str\n Label for equation.\n\n Returns\n -------\n latex : str\n LaTeX code of equations applied.\n \"\"\"\n latex = (\n r'0=ttd_\\mathrm{u}-T_\\mathrm{sat}\\left(p_\\mathrm{in,1}\\right)'\n r' + T_\\mathrm{out,2}')\n return generate_latex_eq(self, latex, label)\n\n def calc_parameters(self):\n r\"\"\"Postprocessing parameter calculation.\"\"\"\n # component parameters\n self.Q.val = self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI)\n self.ttd_u.val = T_bp_p(self.inl[0].get_flow()) - self.outl[1].T.val_SI\n self.ttd_l.val = self.outl[0].T.val_SI - self.inl[1].T.val_SI\n\n # pr and zeta\n for i in range(2):\n self.get_attr('pr' + str(i + 1)).val = (\n self.outl[i].p.val_SI / self.inl[i].p.val_SI)\n self.get_attr('zeta' + str(i + 1)).val = (\n (self.inl[i].p.val_SI - self.outl[i].p.val_SI) * np.pi ** 2 / (\n 4 * self.inl[i].m.val_SI ** 2 *\n (self.inl[i].vol.val_SI + self.outl[i].vol.val_SI)\n ))\n\n # kA and logarithmic temperature difference\n if self.ttd_u.val < 0 or self.ttd_l.val < 0:\n self.td_log.val = np.nan\n self.kA.val = np.nan\n else:\n self.td_log.val = ((self.ttd_l.val - self.ttd_u.val) /\n np.log(self.ttd_l.val / self.ttd_u.val))\n self.kA.val = -self.Q.val / self.td_log.val\n", "# -*- coding: utf-8\n\n\"\"\"Module for class CycleCloser\n\n\nThis file is part of project TESPy (github.com/oemof/tespy). It's copyrighted\nby the contributors recorded in the version control history of the file,\navailable from its original location tespy/components/basics/cycle_closer.py\n\nSPDX-License-Identifier: MIT\n\"\"\"\n\nimport numpy as np\n\nfrom tespy.components.component import Component\nfrom tespy.tools.data_containers import ComponentProperties as dc_cp\n\n# %%\n\n\nclass CycleCloser(Component):\n r\"\"\"\n Component for closing cycles.\n\n **Mandatory Equations**\n\n - :py:meth:`tespy.components.basics.cycle_closer.CycleCloser.pressure_equality_func`\n - :py:meth:`tespy.components.basics.cycle_closer.CycleCloser.enthalpy_equality_func`\n\n Image not available\n\n Parameters\n ----------\n label : str\n The label of the component.\n\n design : list\n List containing design parameters (stated as String).\n\n offdesign : list\n List containing offdesign parameters (stated as String).\n\n design_path : str\n Path to the components design case.\n\n local_offdesign : boolean\n Treat this component in offdesign mode in a design calculation.\n\n local_design : boolean\n Treat this component in design mode in an offdesign calculation.\n\n char_warnings : boolean\n Ignore warnings on default characteristics usage for this component.\n\n printout : boolean\n Include this component in the network's results printout.\n\n Note\n ----\n This component can be used to close a cycle process. The system of\n equations describing your plant will overdetermined, if you close a cycle\n without this component or a cut the cycle with a sink and a source at\n some point of the cycle. This component can be used instead of cutting\n the cycle.\n\n Example\n -------\n Create a cycle containing a pump and a pipe. The pump increases pressure\n the pipe cools the liquid and destroys the pressure rise. The heat\n extracted at the pipe must be the same value of the power input at the\n pump (but negative), as there is no other in- or outputs of energy in the\n system.\n\n >>> from tespy.components import CycleCloser, Pipe, Pump\n >>> from tespy.connections import Connection\n >>> from tespy.networks import Network\n >>> nw = Network(['water'], p_unit='bar', T_unit='C', iterinfo=False)\n >>> pi = Pipe('pipe')\n >>> pu = Pump('pump')\n >>> cc = CycleCloser('cycle closing component')\n >>> cc.component()\n 'cycle closer'\n >>> pu_pi = Connection(pu, 'out1', pi, 'in1')\n >>> pi_cc = Connection(pi, 'out1', cc, 'in1')\n >>> cc_pu = Connection(cc, 'out1', pu, 'in1')\n >>> nw.add_conns(pu_pi, pi_cc, cc_pu)\n >>> pi_cc.set_attr(p=1, T=20, fluid={'water': 1})\n >>> pu_pi.set_attr(p=10)\n >>> pu.set_attr(eta_s=0.8, P=1000)\n >>> nw.solve('design')\n >>> round(pi.Q.val, 1) == -round(pu.P.val, 1)\n True\n \"\"\"\n\n @staticmethod\n def component():\n return 'cycle closer'\n\n @staticmethod\n def get_variables():\n return {\n 'mass_deviation': dc_cp(val=0, max_val=1e-3),\n 'fluid_deviation': dc_cp(val=0, max_val=1e-5)\n }\n\n def get_mandatory_constraints(self):\n return {\n 'pressure_equality_constraints': {\n 'func': self.pressure_equality_func,\n 'deriv': self.pressure_equality_deriv,\n 'constant_deriv': True,\n 'latex': self.pressure_equality_func_doc,\n 'num_eq': 1},\n 'enthalpy_equality_constraints': {\n 'func': self.enthalpy_equality_func,\n 'deriv': self.enthalpy_equality_deriv,\n 'constant_deriv': True,\n 'latex': self.enthalpy_equality_func_doc,\n 'num_eq': 1}\n }\n\n @staticmethod\n def inlets():\n return ['in1']\n\n @staticmethod\n def outlets():\n return ['out1']\n\n def propagate_fluid_to_target(self, inconn, start):\n r\"\"\"\n Fluid propagation to target stops here.\n\n Parameters\n ----------\n inconn : tespy.connections.connection.Connection\n Connection to initialise.\n\n start : tespy.components.component.Component\n This component is the fluid propagation starting point.\n The starting component is saved to prevent infinite looping.\n \"\"\"\n return\n\n def propagate_fluid_to_source(self, outconn, start):\n r\"\"\"\n Fluid propagation to source stops here.\n\n Parameters\n ----------\n outconn : tespy.connections.connection.Connection\n Connection to initialise.\n\n start : tespy.components.component.Component\n This component is the fluid propagation starting point.\n The starting component is saved to prevent infinite looping.\n \"\"\"\n return\n\n def calc_parameters(self):\n r\"\"\"Postprocessing parameter calculation.\"\"\"\n # calculate deviation in mass flow\n self.mass_deviation.val = np.abs(\n self.inl[0].m.val_SI - self.outl[0].m.val_SI)\n\n # calculate deviation in fluid composition\n d1 = self.inl[0].fluid.val\n d2 = self.outl[0].fluid.val\n diff = [d1[key] - d2[key] for key in d1.keys()]\n self.fluid_deviation.val = np.linalg.norm(diff)\n" ]
[ [ "numpy.log" ], [ "numpy.linalg.norm", "numpy.abs" ] ]
bgorr/instrupy
[ "e3dca871ce2dcd2ef279898fcc36bf9d18f0c243" ]
[ "tests/test_util.py" ]
[ "\"\"\"Unit tests for instrupy.util module.\n\"\"\"\n\nimport json\nimport unittest\n\nimport numpy as np\nfrom instrupy.util import *\n\nclass TestEntity(unittest.TestCase):\n \n class ChildClass(Entity): \n def __init__(self):\n self._id = 123\n self.name = \"Ramesh\"\n self.record = {\"workexp\": 5, \"area\": \"electrical\"}\n super(TestEntity.ChildClass,self).__init__(self._id, \"TestClass\")\n \n def test_eq(self):\n self.assertNotEqual(Entity(), Entity())\n self.assertNotEqual(Entity(), Entity(_id=\"test\"))\n self.assertNotEqual(Entity(_id=\"test\"), Entity())\n self.assertNotEqual(Entity(_id=\"foo\"), Entity(_id=\"bar\"))\n self.assertEqual(Entity(_id=\"test\"), Entity(_id=\"test\"))\n\n def test_hash(self):\n self.assertNotEqual(hash(Entity(_id=\"foo\")), hash(Entity(_id=\"bar\")))\n self.assertEqual(hash(Entity(_id=\"test\")), hash(Entity(_id=\"test\")))\n\n def test_to_dict(self):\n d = Entity().to_dict()\n self.assertEqual(d.get(\"@type\"), \"Entity\")\n self.assertIsNone(d.get(\"@id\"))\n\n def test_to_dict_id(self):\n d = Entity(_id=\"test\").to_dict()\n self.assertEqual(d.get(\"@id\"), \"test\")\n\n d = Entity(_id=123).to_dict()\n self.assertEqual(d.get(\"@id\"),123)\n\n d = Entity(_id=\"123\").to_dict()\n self.assertEqual(d.get(\"@id\"),\"123\")\n \n def test_to_dict_childclass(self): \n x = TestEntity.ChildClass()\n d = x.to_dict()\n self.assertEqual(d.get(\"@id\"), 123)\n self.assertEqual(d.get(\"@type\"), \"TestClass\")\n self.assertEqual(d.get(\"name\"), \"Ramesh\")\n self.assertEqual(d.get(\"record\").get(\"workexp\"), 5)\n self.assertEqual(d.get(\"record\").get(\"area\"), \"electrical\")\n\n def test_from_dict(self):\n o = Entity.from_dict({})\n self.assertEqual(o._type, \"Entity\")\n self.assertIsNone(o._id)\n\n def test_from_dict_id(self):\n o = Entity.from_dict(dict({\"@id\": \"test\"}))\n self.assertEqual(o._id, \"test\")\n\n o = Entity.from_dict(dict({\"@id\": 123}))\n self.assertEqual(o._id, 123)\n\n o = Entity.from_dict(dict({\"@id\": \"123\"}))\n self.assertEqual(o._id, \"123\") \n \n def test_to_json(self):\n d = json.loads(Entity().to_json())\n self.assertEqual(d.get(\"@type\"), \"Entity\")\n self.assertIsNone(d.get(\"@id\")) \n\n def test_to_json_id(self):\n d = json.loads(Entity(_id=\"test\").to_json())\n self.assertEqual(d.get(\"@id\"), \"test\")\n\n d = json.loads(Entity(_id=123).to_json())\n self.assertEqual(d.get(\"@id\"),123)\n\n d = json.loads(Entity(_id=\"123\").to_json())\n self.assertEqual(d.get(\"@id\"),\"123\")\n \n def test_to_json_childclass(self): \n x = TestEntity.ChildClass()\n d = json.loads(x.to_json())\n self.assertEqual(d.get(\"@id\"), 123)\n self.assertEqual(d.get(\"@type\"), \"TestClass\")\n self.assertEqual(d.get(\"name\"), \"Ramesh\")\n self.assertEqual(d.get(\"record\").get(\"workexp\"), 5)\n self.assertEqual(d.get(\"record\").get(\"area\"), \"electrical\")\n\n def test_from_json(self):\n o = Entity.from_json('{}')\n self.assertEqual(o._type, \"Entity\")\n self.assertIsNone(o._id)\n\n def test_from_json_id(self):\n o = Entity.from_json('{\"@id\": \"test\"}')\n self.assertEqual(o._id, \"test\")\n\n o = Entity.from_json('{\"@id\": 123}')\n self.assertEqual(o._id, 123)\n\n o = Entity.from_json('{\"@id\": \"123\"}')\n self.assertEqual(o._id, \"123\")\n\nclass TestOrientation(unittest.TestCase):\n\n def test_from_json_basic(self): \n # Test valid nominal case\n o = Orientation.from_json(\n '{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"SIDE_LOOK\",\"sideLookAngle\":10, \"@id\": 123}')\n self.assertEqual(o._type, \"Orientation\")\n self.assertEqual(o._id, 123)\n self.assertEqual(o.ref_frame, ReferenceFrame.get(\"SC_BODY_FIXED\"))\n self.assertEqual(o.euler_angle2, 10)\n # No reference frame specification\n o = Orientation.from_json(\n '{\"convention\": \"SIDE_LOOK\",\"sideLookAngle\":10, \"@id\": 123}')\n self.assertEqual(o._type, \"Orientation\")\n self.assertEqual(o._id, 123)\n self.assertEqual(o.ref_frame, ReferenceFrame.get(\"NADIR_POINTING\"))\n self.assertEqual(o.euler_angle2, 10) \n # Test for wrong convention specification\n with self.assertRaises(Exception):\n Orientation.from_json(\n '{\"referenceFrame\": \"NADIR_POINTING\", \"convention\": \"SIDE_LOOK1\",\"sideLookAngle\":10}')\n with self.assertRaises(Exception):\n Orientation.from_json(\n '{\"referenceFrame\": \"NADIR_POINTING\", \"convention\": \"XXZ\",\"xRotation\":10,\"yRotation\":-10.4,\"zRotation\":20.78}')\n # Test for case-insensitivity\n o = Orientation.from_json(\n '{\"referenceFrame\": \"NADIR_POINTING\", \"convention\": \"SIDE_look\",\"sideLookAngle\":10}')\n self.assertEqual(o._type, \"Orientation\")\n self.assertIsNone(o._id)\n self.assertEqual(o.euler_angle2, 10)\n # Test wraping of angle values to [0, 360]deg range.\n o = Orientation.from_json(\n '{\"referenceFrame\": \"NADIR_POINTING\", \"convention\": \"XYz\",\"xRotation\":10,\"yRotation\":-10.4,\"zRotation\":20.78}')\n self.assertEqual(o._type, \"Orientation\")\n self.assertIsNone(o._id)\n self.assertEqual(o.euler_angle2, 360-10.4)\n\n def test_from_json_SIDELOOKANGLE_convention(self):\n # Test for positive angle less than 360 deg\n o = Orientation.from_json(\n '{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"SIDE_LOOK\",\"sideLookAngle\":10}')\n self.assertEqual(o.ref_frame, ReferenceFrame.get(\"SC_BODY_FIXED\"))\n self.assertEqual(o.euler_angle1, 0)\n self.assertEqual(o.euler_angle2, 10)\n self.assertEqual(o.euler_angle3, 0)\n self.assertEqual(o.euler_seq1, 1)\n self.assertEqual(o.euler_seq2, 2)\n self.assertEqual(o.euler_seq3, 3)\n self.assertEqual(o._type, \"Orientation\")\n self.assertIsNone(o._id)\n # Test for negative angle\n o = Orientation.from_json(\n '{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"SIDE_LOOK\",\"sideLookAngle\":-10}')\n self.assertEqual(o.ref_frame, ReferenceFrame.get(\"SC_BODY_FIXED\"))\n self.assertEqual(o.euler_angle1, 0)\n self.assertEqual(o.euler_angle2, 350)\n self.assertEqual(o.euler_angle3, 0)\n self.assertEqual(o.euler_seq1, 1)\n self.assertEqual(o.euler_seq2, 2)\n self.assertEqual(o.euler_seq3, 3)\n self.assertEqual(o._type, \"Orientation\")\n self.assertIsNone(o._id)\n # Test for positive angle greater than 360 deg\n o = Orientation.from_json(\n '{\"referenceFrame\": \"NADIR_POINTING\", \"convention\": \"SIDE_LOOK\",\"sideLookAngle\":380}')\n self.assertEqual(o.ref_frame, ReferenceFrame.get(\"NADIR_POINTING\"))\n self.assertEqual(o.euler_angle1, 0)\n self.assertEqual(o.euler_angle2, 20)\n self.assertEqual(o.euler_angle3, 0)\n self.assertEqual(o.euler_seq1, 1)\n self.assertEqual(o.euler_seq2, 2)\n self.assertEqual(o.euler_seq3, 3)\n self.assertEqual(o._type, \"Orientation\")\n self.assertIsNone(o._id)\n # Test for negative angle less than -360 deg\n o = Orientation.from_json(\n '{\"convention\": \"SIDE_LOOK\",\"sideLookAngle\":-380}')\n self.assertEqual(o.ref_frame, ReferenceFrame.get(\"NADIR_POINTING\"))\n self.assertEqual(o.euler_angle1, 0)\n self.assertEqual(o.euler_angle2, 340)\n self.assertEqual(o.euler_angle3, 0)\n self.assertEqual(o.euler_seq1, 1)\n self.assertEqual(o.euler_seq2, 2)\n self.assertEqual(o.euler_seq3, 3)\n self.assertEqual(o._type, \"Orientation\")\n self.assertIsNone(o._id)\n # Test for no convention specification\n with self.assertRaises(Exception):\n Orientation.from_json('{\"sideLookAngle\":-30}')\n\n def test_from_json_XYZ_convention(self):\n # Test for positive, negative angles\n o = Orientation.from_json(\n '{\"referenceFrame\": \"EARTH_CENTERED_INERTIAL\", \"convention\": \"XYZ\",\"xRotation\":10,\"yRotation\":-10.4,\"zRotation\":20.78}')\n self.assertEqual(o.ref_frame, ReferenceFrame.get(\"EARTH_CENTERED_INERTIAL\"))\n self.assertEqual(o.euler_angle1, 10)\n self.assertEqual(o.euler_angle2, 349.6)\n self.assertEqual(o.euler_angle3, 20.78)\n self.assertEqual(o.euler_seq1, 1)\n self.assertEqual(o.euler_seq2, 2)\n self.assertEqual(o.euler_seq3, 3)\n self.assertEqual(o._type, \"Orientation\")\n self.assertIsNone(o._id)\n # Test for positive angles greater than 360 deg, negative angles lesser than -360 deg\n o = Orientation.from_json(\n '{\"referenceFrame\": \"EARTH_CENTERED_INERTIAL\", \"convention\": \"XYZ\",\"xRotation\":410,\"yRotation\":1045.8,\"zRotation\":-458}')\n self.assertEqual(o.ref_frame, ReferenceFrame.get(\"EARTH_CENTERED_INERTIAL\"))\n self.assertEqual(o.euler_angle1, 50)\n self.assertAlmostEqual(o.euler_angle2, 325.8)\n self.assertEqual(o.euler_angle3, 262)\n self.assertEqual(o.euler_seq1, 1)\n self.assertEqual(o.euler_seq2, 2)\n self.assertEqual(o.euler_seq3, 3)\n self.assertEqual(o._type, \"Orientation\")\n self.assertIsNone(o._id)\n \n def test_from_json_REF_FRAME_ALIGNED_convention(self):\n o = Orientation.from_json(\n '{\"referenceFrame\": \"NADIR_POINTING\", \"convention\": \"REF_FRAME_ALIGNED\"}')\n self.assertEqual(o.ref_frame, ReferenceFrame.get(\"NADIR_POINTING\"))\n self.assertEqual(o.euler_angle1, 0)\n self.assertEqual(o.euler_angle2, 0)\n self.assertEqual(o.euler_angle3, 0)\n self.assertEqual(o.euler_seq1, 1)\n self.assertEqual(o.euler_seq2, 2)\n self.assertEqual(o.euler_seq3, 3)\n self.assertEqual(o._type, \"Orientation\")\n self.assertIsNone(o._id)\n\n o = Orientation.from_json(\n '{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"REF_FRAME_ALIGNED\"}')\n self.assertEqual(o.ref_frame, ReferenceFrame.get(\"SC_BODY_FIXED\"))\n self.assertEqual(o.euler_angle1, 0)\n self.assertEqual(o.euler_angle2, 0)\n self.assertEqual(o.euler_angle3, 0)\n self.assertEqual(o._type, \"Orientation\")\n self.assertIsNone(o._id)\n\n def test_from_json_EULER_convention(self):\n o = Orientation.from_json(\n '{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"EULER\",\"eulerAngle1\":400,\"eulerAngle2\":1345.8, \\\n \"eulerAngle3\":-458,\"eulerSeq1\":3, \"eulerSeq2\":1, \"eulerSeq3\":3}')\n self.assertEqual(o.ref_frame, ReferenceFrame.get(\"SC_BODY_FIXED\"))\n self.assertEqual(o.euler_angle1, 40)\n self.assertAlmostEqual(o.euler_angle2, 265.8)\n self.assertEqual(o.euler_angle3, 262)\n self.assertEqual(o.euler_seq1, 3)\n self.assertEqual(o.euler_seq2, 1)\n self.assertEqual(o.euler_seq3, 3)\n self.assertEqual(o._type, \"Orientation\")\n self.assertIsNone(o._id)\n\n def test_to_dict(self):\n # SIDE_LOOK convention\n o = Orientation.from_json(\n '{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"SIDE_LOOK\",\"sideLookAngle\":10}')\n d = o.to_dict()\n self.assertEqual(d[\"referenceFrame\"], \"SC_BODY_FIXED\")\n self.assertEqual(d[\"convention\"], \"EULER\")\n self.assertEqual(d[\"eulerAngle1\"], 0)\n self.assertAlmostEqual(d[\"eulerAngle2\"], 10)\n self.assertEqual(d[\"eulerAngle3\"], 0)\n self.assertEqual(d[\"eulerSeq1\"], 1)\n self.assertEqual(d[\"eulerSeq2\"], 2)\n self.assertEqual(d[\"eulerSeq3\"], 3)\n self.assertIsNone(d[\"@id\"])\n # XYZ convention\n o = Orientation.from_json(\n '{\"referenceFrame\": \"EARTH_CENTERED_INERTIAL\", \"convention\": \"XYZ\",\"xRotation\":-10,\"yRotation\":-78.2,\"zRotation\":-20.5}')\n d = o.to_dict()\n self.assertEqual(d[\"referenceFrame\"], \"EARTH_CENTERED_INERTIAL\")\n self.assertEqual(d[\"convention\"], \"EULER\")\n self.assertEqual(d[\"eulerAngle1\"], 350)\n self.assertAlmostEqual(d[\"eulerAngle2\"], 281.8)\n self.assertEqual(d[\"eulerAngle3\"], 339.5)\n self.assertEqual(d[\"eulerSeq1\"], 1)\n self.assertEqual(d[\"eulerSeq2\"], 2)\n self.assertEqual(d[\"eulerSeq3\"], 3)\n self.assertIsNone(d[\"@id\"])\n # REF_FRAME_ALIGNED convention\n o = Orientation.from_json(\n '{\"referenceFrame\": \"SENSOR_BODY_FIXED\", \"convention\": \"REF_FRAME_ALIGNED\", \"@id\":\"123\"}')\n d = o.to_dict()\n self.assertEqual(d[\"referenceFrame\"], \"SENSOR_BODY_FIXED\")\n self.assertEqual(d[\"convention\"], \"EULER\")\n self.assertEqual(d[\"eulerAngle1\"], 0)\n self.assertAlmostEqual(d[\"eulerAngle2\"], 0)\n self.assertEqual(d[\"eulerAngle3\"], 0)\n self.assertEqual(d[\"eulerSeq1\"], 1)\n self.assertEqual(d[\"eulerSeq2\"], 2)\n self.assertEqual(d[\"eulerSeq3\"], 3)\n self.assertEqual(d[\"@id\"], \"123\")\n # EULER convention\n o = Orientation.from_json(\n '{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"EULER\",\"eulerAngle1\":400,\"eulerAngle2\":1345.8, \\\n \"eulerAngle3\":-458,\"eulerSeq1\":3, \"eulerSeq2\":1, \"eulerSeq3\":3, \"@id\":123}')\n d = o.to_dict()\n self.assertEqual(d[\"referenceFrame\"], \"SC_BODY_FIXED\")\n self.assertEqual(d[\"convention\"], \"EULER\")\n self.assertEqual(d[\"eulerAngle1\"], 40)\n self.assertAlmostEqual(d[\"eulerAngle2\"], 265.8)\n self.assertEqual(d[\"eulerAngle3\"], 262)\n self.assertEqual(d[\"eulerSeq1\"], 3)\n self.assertEqual(d[\"eulerSeq2\"], 1)\n self.assertEqual(d[\"eulerSeq3\"], 3)\n self.assertEqual(d[\"@id\"], 123)\n \n def test___eq__(self):\n # SIDE_LOOK convention\n o1 = Orientation.from_json('{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"SIDE_LOOK\",\"sideLookAngle\":10}')\n o2 = Orientation.from_json('{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"SIDE_LOOK\",\"sideLookAngle\":10}')\n self.assertTrue(o1==o2)\n o1 = Orientation.from_json('{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"SIDE_LOOK\",\"sideLookAngle\":10}')\n o2 = Orientation.from_json('{\"referenceFrame\": \"NADIR_POINTING\", \"convention\": \"SIDE_LOOK\",\"sideLookAngle\":10}')\n self.assertFalse(o1==o2)\n\n # XYZ convention\n o1 = Orientation.from_json(\n '{\"referenceFrame\": \"EARTH_CENTERED_INERTIAL\", \"convention\": \"XYZ\",\"xRotation\":-10,\"yRotation\":-78.2,\"zRotation\":-20.5, \"@id\":123}')\n o2 = Orientation.from_json(\n '{\"referenceFrame\": \"EARTH_CENTERED_INERTIAL\", \"convention\": \"XYZ\",\"xRotation\":-10,\"yRotation\":-78.2,\"zRotation\":-20.5, \"@id\":\"xyz\"}')\n self.assertTrue(o1==o2) # @id parameter could be different, but the equality holds\n o1 = Orientation.from_json(\n '{\"referenceFrame\": \"EARTH_CENTERED_INERTIAL\", \"convention\": \"XYZ\",\"xRotation\":-10,\"yRotation\":-78.2,\"zRotation\":-20.5}')\n o2 = Orientation.from_json(\n '{\"referenceFrame\": \"EARTH_CENTERED_INERTIAL\", \"convention\": \"XYZ\",\"xRotation\": 10,\"yRotation\":-78.2,\"zRotation\":-20.5}')\n self.assertFalse(o1==o2)\n\n # REF_FRAME_ALIGNED convention\n o1 = Orientation.from_json('{\"referenceFrame\": \"SENSOR_BODY_FIXED\", \"convention\": \"REF_FRAME_ALIGNED\", \"@id\":\"123\"}')\n o2 = Orientation.from_json('{\"referenceFrame\": \"SENSOR_BODY_FIXED\", \"convention\": \"REF_FRAME_ALIGNED\", \"@id\":\"123\"}')\n self.assertTrue(o1==o2)\n o1 = Orientation.from_json('{\"referenceFrame\": \"SENSOR_BODY_FIXED\", \"convention\": \"REF_FRAME_ALIGNED\", \"@id\":\"123\"}')\n o2 = Orientation.from_json('{\"referenceFrame\": \"EARTH_CENTERED_INERTIAL\", \"convention\": \"REF_FRAME_ALIGNED\", \"@id\":\"123\"}') \n self.assertFalse(o1==o2)\n\n # EULER convention\n o1 = Orientation.from_json(\n '{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"EULER\",\"eulerAngle1\":10,\"eulerAngle2\":13.8, \\\n \"eulerAngle3\":-45,\"eulerSeq1\":3, \"eulerSeq2\":1, \"eulerSeq3\":3}')\n o2 = Orientation.from_json(\n '{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"EULER\",\"eulerAngle1\":10,\"eulerAngle2\":13.8, \\\n \"eulerAngle3\":-45,\"eulerSeq1\":3, \"eulerSeq2\":1, \"eulerSeq3\":3, \"@id\":123}')\n self.assertTrue(o1==o2)\n o1 = Orientation.from_json(\n '{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"EULER\",\"eulerAngle1\":10,\"eulerAngle2\":13.8, \\\n \"eulerAngle3\":-45,\"eulerSeq1\":3, \"eulerSeq2\":1, \"eulerSeq3\":3}')\n o2 = Orientation.from_json(\n '{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"EULER\",\"eulerAngle1\":400,\"eulerAngle2\":1345.8, \\\n \"eulerAngle3\":-45,\"eulerSeq1\":1, \"eulerSeq2\":2, \"eulerSeq3\":3, \"@id\":123}')\n self.assertFalse(o1==o2)\n \n def test___repr__(self):\n # _id = None\n o = Orientation.from_json(\n '{\"referenceFrame\": \"SC_BODY_FIXED\", \"convention\": \"EULER\",\"eulerAngle1\":400,\"eulerAngle2\":20, \\\n \"eulerAngle3\":-458,\"eulerSeq1\":3, \"eulerSeq2\":1, \"eulerSeq3\":3}')\n self.assertEqual(o.__repr__(), \"Orientation(ref_frame='SC_BODY_FIXED',euler_angle1=40.0,euler_angle2=20.0,euler_angle3=262.0,euler_seq1=3,euler_seq2=1,euler_seq3=3,_id=None)\")\n # _id = 123 (integer)\n o = Orientation.from_json('{\"referenceFrame\": \"NADIR_POINTING\", \"convention\": \"REF_FRAME_ALIGNED\", \"@id\": 123}')\n self.assertEqual(o.__repr__(), \"Orientation(ref_frame='NADIR_POINTING',euler_angle1=0.0,euler_angle2=0.0,euler_angle3=0.0,euler_seq1=1,euler_seq2=2,euler_seq3=3,_id=123)\")\n # _id = 123 (string)\n o = Orientation.from_json('{\"convention\": \"SIDE_LOOK\",\"sideLookAngle\":-380,\"@id\":\"123\"}')\n self.assertEqual(o.__repr__(), \"Orientation(ref_frame='NADIR_POINTING',euler_angle1=0.0,euler_angle2=340.0,euler_angle3=0.0,euler_seq1=1,euler_seq2=2,euler_seq3=3,_id='123')\")\n\nclass TestSphericalGeometry(unittest.TestCase):\n\n def test_from_json_custom_specs(self):\n # Test for typical case\n o = SphericalGeometry.from_json(\n '{\"shape\": \"CUSTOM\", \"customConeAnglesVector\": [10,10,10,10,10] , \"customClockAnglesVector\": [30,60,180,220,30]}')\n self.assertIsInstance(o, SphericalGeometry)\n self.assertEqual(o.shape, SphericalGeometry.Shape.CUSTOM)\n self.assertEqual(o.cone_angle_vec, [10, 10, 10, 10, 10])\n self.assertEqual(o.clock_angle_vec, [30, 60, 180, 220, 30])\n self.assertIsNone(o.angle_height)\n self.assertIsNone(o.angle_width)\n self.assertIsNone(o.diameter)\n self.assertIsNone(o._id)\n\n o = SphericalGeometry.from_json(\n '{\"shape\": \"Custom\", \"customConeAnglesVector\": [10], \"@id\": 123}')\n self.assertIsInstance(o, SphericalGeometry)\n self.assertEqual(o.shape, SphericalGeometry.Shape.CUSTOM)\n self.assertEqual(o.cone_angle_vec, [10])\n self.assertIsNone(o.clock_angle_vec)\n self.assertIsNone(o.angle_height)\n self.assertIsNone(o.angle_width)\n self.assertIsNone(o.diameter)\n self.assertEqual(o._id, 123)\n\n o = SphericalGeometry.from_json(\n '{\"shape\": \"CusTOM\", \"customConeAnglesVector\": 10, \"@id\": \"123\"}')\n self.assertIsInstance(o, SphericalGeometry)\n self.assertEqual(o.shape, SphericalGeometry.Shape.CUSTOM)\n self.assertEqual(o.cone_angle_vec, [10])\n self.assertIsNone(o.clock_angle_vec)\n self.assertIsNone(o.angle_height)\n self.assertIsNone(o.angle_width)\n self.assertIsNone(o.diameter)\n self.assertEqual(o._id, \"123\")\n\n # test Exception is raised for invalid cone, clock angle inputs\n with self.assertRaises(Exception):\n SphericalGeometry.from_json(\n '{\"shape\": \"CUSTOM\", \"customConeAnglesVector\": [10] , \"customClockAnglesVector\": [30]}')\n with self.assertRaises(Exception):\n SphericalGeometry.from_json(\n '{\"shape\": \"CUSTOM\", \"customConeAnglesVector\": 10 , \"customClockAnglesVector\": 30}')\n with self.assertRaises(Exception):\n SphericalGeometry.from_json(\n '{\"shape\": \"CUSTOM\", \"customConeAnglesVector\": [10,10,100,10] , \"customClockAnglesVector\": [30,60,180,220,30]}') # number of entires in the cone angle vector is different from that in the clock angle vector\n with self.assertRaises(Exception):\n SphericalGeometry.from_json(\n '{\"shape\": \"CUSTOM\", \"customConeAnglesVector\": [10,10,100] , \"customClockAnglesVector\": [30,60,180]}') # last vertex not same as the first vertex\n with self.assertRaises(Exception):\n SphericalGeometry.from_json(\n '{\"shape\": \"CUSTOM\", \"customConeAnglesVector\": [10,20,10] , \"customClockAnglesVector\": [30,20,30]}') # minimum number of vertices not satisfied.\n\n def test_from_json_circular_specs(self):\n # Test for typical case\n o = SphericalGeometry.from_json(\n '{\"shape\": \"CIRCULAR\", \"diameter\": 30.56}')\n self.assertIsInstance(o, SphericalGeometry)\n self.assertEqual(o.shape, SphericalGeometry.Shape.CIRCULAR)\n self.assertEqual(o.cone_angle_vec, [15.28])\n self.assertIsNone(o.clock_angle_vec)\n self.assertEqual(o.diameter, 30.56)\n self.assertEqual(o.angle_height, 30.56)\n self.assertEqual(o.angle_width, 30.56)\n self.assertIsNone(o._id) \n\n o = SphericalGeometry.from_json(\n '{\"shape\": \"Circular\", \"diameter\": 15.4242, \"@id\":123}')\n self.assertIsInstance(o, SphericalGeometry)\n self.assertEqual(o.shape, SphericalGeometry.Shape.CIRCULAR)\n self.assertEqual(o.cone_angle_vec, [7.7121])\n self.assertIsNone(o.clock_angle_vec)\n self.assertIsNone(o.clock_angle_vec)\n self.assertEqual(o.diameter, 15.4242)\n self.assertEqual(o.angle_height, 15.4242)\n self.assertEqual(o.angle_width, 15.4242)\n self.assertEqual(o._id, 123)\n\n o = SphericalGeometry.from_json(\n '{\"shape\": \"CirCuLar\", \"diameter\": 25, \"@id\":\"123\"}')\n self.assertIsInstance(o, SphericalGeometry)\n self.assertEqual(o.shape, SphericalGeometry.Shape.CIRCULAR)\n self.assertEqual(o.cone_angle_vec, [12.5])\n self.assertIsNone(o.clock_angle_vec)\n self.assertIsNone(o.clock_angle_vec)\n self.assertEqual(o.diameter, 25)\n self.assertEqual(o.angle_height, 25)\n self.assertEqual(o.angle_width, 25)\n self.assertEqual(o._id, \"123\")\n # Test for incomplete specification\n with self.assertRaises(Exception):\n SphericalGeometry.from_json('{\"shape\": \"CIRCULAR\"}')\n # Test for full cone angle more than 180 deg\n with self.assertRaises(Exception):\n SphericalGeometry.from_json(\n '{\"shape\": \"CIRCULAR\", \"diameter\": 230}')\n # Test for full cone angle less than 0 deg\n with self.assertRaises(Exception):\n SphericalGeometry.from_json(\n '{\"shape\": \"CIRCULAR\", \"diameter\": -10}') \n\n def test_from_json_rectangular_specs(self):\n # Test for typical cases\n o = SphericalGeometry.from_json(\n '{\"shape\": \"RECTANGULAR\", \"angleHeight\": 10 , \"angleWidth\": 30}')\n self.assertIsInstance(o, SphericalGeometry)\n self.assertEqual(o.shape, SphericalGeometry.Shape.RECTANGULAR)\n np.testing.assert_almost_equal(o.cone_angle_vec, [\n 15.79322415135941, 15.79322415135941, 15.79322415135941, 15.79322415135941, 15.79322415135941])\n np.testing.assert_almost_equal(o.clock_angle_vec, [\n 18.6768081421232, 161.3231918578768, 198.6768081421232, 341.3231918578768, 18.6768081421232])\n self.assertAlmostEqual(o.angle_height, 10)\n self.assertAlmostEqual(o.angle_width, 30)\n self.assertIsNone(o.diameter)\n self.assertIsNone(o._id)\n\n # Square FOV => Clock angle almost(?) 45 deg\n o = SphericalGeometry.from_json(\n '{\"shape\": \"RECTANGULAR\", \"angleHeight\": 15 , \"angleWidth\": 15, \"@id\": 123}')\n self.assertIsInstance(o, SphericalGeometry)\n self.assertEqual(o.shape, SphericalGeometry.Shape.RECTANGULAR)\n np.testing.assert_almost_equal(o.cone_angle_vec, [\n 10.591411134810208, 10.591411134810208, 10.591411134810208, 10.591411134810208, 10.591411134810208])\n np.testing.assert_almost_equal(o.clock_angle_vec, [\n 45.246138033024906, 134.75386196697508, 225.24613803302492, 314.7538619669751, 45.246138033024906])\n self.assertAlmostEqual(o.angle_height, 15)\n self.assertAlmostEqual(o.angle_width, 15)\n self.assertIsNone(o.diameter)\n self.assertEqual(o._id, 123)\n\n # angleWidth > angleHeight\n o = SphericalGeometry.from_json(\n '{\"shape\": \"RECTANGULAR\", \"angleHeight\": 30 , \"angleWidth\": 10}')\n self.assertIsInstance(o, SphericalGeometry)\n self.assertEqual(o.shape, SphericalGeometry.Shape.RECTANGULAR)\n np.testing.assert_almost_equal(o.cone_angle_vec, [\n 15.79322415135941, 15.79322415135941, 15.79322415135941, 15.79322415135941, 15.79322415135941])\n np.testing.assert_almost_equal(o.clock_angle_vec, [\n 71.98186515628623, 108.01813484371377, 251.98186515628623, 288.01813484371377, 71.98186515628623])\n self.assertAlmostEqual(o.angle_height, 30)\n self.assertAlmostEqual(o.angle_width, 10)\n self.assertIsNone(o.diameter)\n self.assertIsNone(o._id)\n\n # Test a edge case when the along-track fov is very small.\n o = SphericalGeometry.from_json(\n '{\"shape\": \"RECTANGULAR\", \"angleHeight\": 0.1 , \"angleWidth\": 50, \"@id\": \"123\"}')\n self.assertIsInstance(o, SphericalGeometry)\n self.assertEqual(o.shape, SphericalGeometry.Shape.RECTANGULAR)\n self.assertAlmostEqual(o.cone_angle_vec[0], 25, 2)\n self.assertAlmostEqual(o.clock_angle_vec[0], 0, delta=0.2)\n self.assertAlmostEqual(o.clock_angle_vec[1], 180, delta=0.2)\n self.assertAlmostEqual(o.clock_angle_vec[2], 180, delta=0.2)\n self.assertAlmostEqual(o.clock_angle_vec[3], 360, delta=0.2)\n self.assertAlmostEqual(o.clock_angle_vec[4], 0, delta=0.2)\n self.assertEqual(o.angle_height, 0.1)\n self.assertAlmostEqual(o.angle_width, 50)\n self.assertIsNone(o.diameter)\n self.assertEqual(o._id, \"123\")\n\n # Test case with incomplete specification\n with self.assertRaises(Exception):\n SphericalGeometry.from_json(\n '{\"shape\": \"RECTANGULAR\", \"angleHeight\": 60 }')\n # Test for out-of-range specification\n with self.assertRaises(Exception):\n SphericalGeometry.from_json(\n '{\"shape\": \"RECTANGULAR\", \"angleHeight\": 30 , \"angleWidth\": 210}')\n with self.assertRaises(Exception):\n SphericalGeometry.from_json(\n '{\"shape\": \"RECTANGULAR\", \"angleHeight\": -10 , \"angleWidth\": 5}')\n with self.assertRaises(Exception):\n SphericalGeometry.from_json(\n '{\"shape\": \"RECTANGULAR\", \"angleHeight\": -1110 , \"angleWidth\": 50}')\n\n def test_get_rect_poly_specs_from_cone_clock_angles(self): \n # Square case\n o = SphericalGeometry.from_json('{\"shape\": \"RECTANGULAR\", \"angleHeight\": 15 , \"angleWidth\": 15}')\n [angle_height, angle_width] = SphericalGeometry.get_rect_poly_specs_from_cone_clock_angles(o.cone_angle_vec, o.clock_angle_vec)\n self.assertAlmostEqual(angle_height, 15)\n self.assertAlmostEqual(angle_width, 15)\n \n # Test edge case with small along-track fov\n o = SphericalGeometry.from_json(\n '{\"shape\": \"RECTANGULAR\", \"angleHeight\": 0.1 , \"angleWidth\": 30}')\n self.assertIsInstance(o, SphericalGeometry)\n [angle_height, angle_width] = SphericalGeometry.get_rect_poly_specs_from_cone_clock_angles(o.cone_angle_vec, o.clock_angle_vec)\n self.assertAlmostEqual(angle_height, 0.1)\n self.assertAlmostEqual(angle_width, 30)\n\n o = SphericalGeometry.from_json(\n '{\"shape\": \"CUSTOM\", \"customConeAnglesVector\": [30,30,30,30,30] , \"customClockAnglesVector\": [20,160,200,-20,20]}')\n [angle_height, angle_width] = SphericalGeometry.get_rect_poly_specs_from_cone_clock_angles(o.cone_angle_vec, o.clock_angle_vec)\n self.assertAlmostEqual(angle_height, 19.693103879668154)\n self.assertAlmostEqual(angle_width, 56.96247656267892)\n\n # Check for cases when the input cone, clock do not correspond to a rectangular fov\n with self.assertRaises(Exception):\n o.get_rect_poly_specs_from_cone_clock_angles([20], None)\n with self.assertRaises(Exception):\n o.get_rect_poly_specs_from_cone_clock_angles([10,10,10,10,10], [30,60,180,220,30])\n # slight tweaking of cone, clock angles corresponding to valid rectangular shape (15 deg x 15 deg) \n with self.assertRaises(Exception):\n o.get_rect_poly_specs_from_cone_clock_angles([10.591411134810208, 10.591411134810208, 10.591411134810208, 10.591411134810208, 10.591411134810208],\n [45.246138033024906, 134.75386196697508, 125.24613803302492, 314.7538619669751, 45.246138033024906])\n with self.assertRaises(Exception):\n o.get_rect_poly_specs_from_cone_clock_angles([10.591411134810208, 10.591411134810208, 10.591411134810208, 10.591411134810208, 10.591411134810208, 10.591411134810208], \n [45.246138033024906, 134.75386196697508, 225.24613803302492, 314.7538619669751, 45.246138033024906])\n with self.assertRaises(Exception):\n o.get_rect_poly_specs_from_cone_clock_angles([10.591411134810208, 20.591411134810208, 10.591411134810208, 10.591411134810208, 10.591411134810208], \n [45.246138033024906, 134.75386196697508, 225.24613803302492, 314.7538619669751, 45.246138033024906])\n\n def test_to_dict(self):\n # custom shape\n o = SphericalGeometry.from_json(\n '{\"shape\": \"CUSTOM\", \"customConeAnglesVector\": [10,10,10,10,10] , \"customClockAnglesVector\": [30,60,180,220,30]}')\n d = o.to_dict()\n self.assertEqual(d[\"shape\"], \"CUSTOM\")\n self.assertEqual(d[\"customConeAnglesVector\"], \"[10.0,10.0,10.0,10.0,10.0]\")\n self.assertEqual(d[\"customClockAnglesVector\"], \"[30.0,60.0,180.0,220.0,30.0]\")\n self.assertIsNone(d[\"@id\"])\n # circular shape\n o = SphericalGeometry.from_json(\n '{\"shape\": \"CIRCULAR\", \"diameter\": 30, \"@id\": 123}')\n d = o.to_dict()\n self.assertEqual(d[\"shape\"], \"CIRCULAR\")\n self.assertEqual(d[\"diameter\"], 30.0)\n self.assertEqual(d[\"@id\"], 123)\n \n # rectangular shape\n o = SphericalGeometry.from_json(\n '{\"shape\": \"RECTANGULAR\", \"angleHeight\": 10 , \"angleWidth\": 30, \"@id\": \"123\"}')\n d = o.to_dict()\n self.assertEqual(d[\"shape\"], \"RECTANGULAR\")\n self.assertAlmostEqual(d[\"angleHeight\"], 10.0)\n self.assertAlmostEqual(d[\"angleWidth\"], 30.0)\n self.assertEqual(d[\"@id\"], \"123\")\n \n def test___eq__(self):\n # custom shape\n o1 = SphericalGeometry.from_json(\n '{\"shape\": \"CUSTOM\", \"customConeAnglesVector\": [10,10,10,10,10] , \"customClockAnglesVector\": [30,60,180,220,30]}')\n o2 = SphericalGeometry.from_json(\n '{\"shape\": \"CUSTOM\", \"customConeAnglesVector\": [10,10,10,10,10] , \"customClockAnglesVector\": [30,60,180,220,30]}')\n self.assertTrue(o1==o2)\n\n o1 = SphericalGeometry.from_json(\n '{\"shape\": \"CUSTOM\", \"customConeAnglesVector\": [20,10,10,10,20] , \"customClockAnglesVector\": [30,60,180,220,30]}')\n o2 = SphericalGeometry.from_json(\n '{\"shape\": \"CUSTOM\", \"customConeAnglesVector\": [10,10,10,10,10] , \"customClockAnglesVector\": [30,60,180,220,30]}')\n self.assertFalse(o1==o2)\n\n # circular shape\n o1 = SphericalGeometry.from_json('{\"shape\": \"CIRCULAR\", \"diameter\": 30, \"@id\": 123}')\n o2 = SphericalGeometry.from_json('{\"shape\": \"CIRCULAR\", \"diameter\": 30, \"@id\": 123}')\n self.assertTrue(o1==o2)\n # @id may be different, but still satisfies the equality criteria\n o1 = SphericalGeometry.from_json('{\"shape\": \"CIRCULAR\", \"diameter\": 30, \"@id\": 245}')\n o2 = SphericalGeometry.from_json('{\"shape\": \"CIRCULAR\", \"diameter\": 30, \"@id\": 123}')\n self.assertTrue(o1==o2)\n \n o1 = SphericalGeometry.from_json('{\"shape\": \"CIRCULAR\", \"diameter\": 30, \"@id\": 123}')\n o2 = SphericalGeometry.from_json('{\"shape\": \"CIRCULAR\", \"diameter\": 20, \"@id\": 123}')\n self.assertFalse(o1==o2)\n\n # rectangular shape\n o1 = SphericalGeometry.from_json('{\"shape\": \"RECTANGULAR\", \"angleHeight\": 10 , \"angleWidth\": 30, \"@id\": \"xyz\"}')\n o2 = SphericalGeometry.from_json('{\"shape\": \"RECTANGULAR\", \"angleHeight\": 10 , \"angleWidth\": 30, \"@id\": \"123\"}')\n self.assertTrue(o1==o2)\n\n o1 = SphericalGeometry.from_json('{\"shape\": \"RECTANGULAR\", \"angleHeight\": 10 , \"angleWidth\": 30}')\n o2 = SphericalGeometry.from_json('{\"shape\": \"RECTANGULAR\", \"angleHeight\": 10 , \"angleWidth\": 40}')\n self.assertFalse(o1==o2)\n\n # test equality with object of another type\n o1 = SphericalGeometry.from_json('{\"shape\": \"RECTANGULAR\", \"angleHeight\": 10 , \"angleWidth\": 30}')\n o2 = [1,2,3]\n with self.assertRaises(Exception):\n self.assertTrue(o1==o2)\n\nclass TestViewGeometry(unittest.TestCase): #TODO\n pass\n\nclass TestManeuver(unittest.TestCase):\n \n def test_from_json_CIRCULAR(self):\n\n o = Maneuver.from_json('{\"maneuverType\": \"CIRCULAR\", \"diameter\":10.3}')\n self.assertIsInstance(o, Maneuver)\n self.assertEqual(o.maneuver_type, Maneuver.Type.CIRCULAR)\n self.assertEqual(o.diameter, 10.3)\n self.assertIsNone(o.A_roll_min)\n self.assertIsNone(o.A_roll_max)\n self.assertIsNone(o.B_roll_min)\n self.assertIsNone(o.B_roll_max)\n self.assertIsNone(o._id)\n\n o = Maneuver.from_json('{\"maneuverType\": \"circular\", \"diameter\":10, \"@id\":\"123\"}')\n self.assertIsInstance(o, Maneuver)\n self.assertEqual(o.maneuver_type, Maneuver.Type.CIRCULAR)\n self.assertIsInstance(o.diameter, float)\n self.assertIsNone(o.A_roll_min)\n self.assertIsNone(o.A_roll_max)\n self.assertIsNone(o.B_roll_min)\n self.assertIsNone(o.B_roll_max)\n self.assertEqual(o._id, \"123\")\n \n with self.assertRaises(Exception):\n o = Maneuver.from_json('{\"maneuverType\": \"CIRCULAR\"}')\n \n with self.assertRaises(Exception):\n o = Maneuver.from_json('{\"maneuverType\": \"CIRCULAR\", \"diameter\":-10}')\n \n with self.assertRaises(Exception):\n o = Maneuver.from_json('{\"maneuverType\": \"CIRCULAR\", \"diameter\":190}')\n\n with self.assertRaises(Exception):\n o = Maneuver.from_json('{\"maneuverType\": \"CIRC\", \"diameter\":10}')\n \n def test_from_json_SINGLE_ROLL_ONLY(self):\n\n o = Maneuver.from_json('{\"maneuverType\": \"SINGLE_ROLL_ONLY\", \"A_rollMin\":0, \"A_rollMax\": 30}')\n self.assertIsInstance(o, Maneuver)\n self.assertEqual(o.maneuver_type, Maneuver.Type.SINGLE_ROLL_ONLY)\n self.assertIsNone(o.diameter)\n self.assertEqual(o.A_roll_min, 0)\n self.assertEqual(o.A_roll_max, 30)\n self.assertIsNone(o.B_roll_min)\n self.assertIsNone(o.B_roll_max)\n self.assertIsNone(o._id)\n\n o = Maneuver.from_json('{\"maneuverType\": \"SINGLE_ROLL_ONLY\", \"A_rollMin\":-10, \"A_rollMax\": 0, \"@id\":123}')\n self.assertIsInstance(o, Maneuver)\n self.assertEqual(o.maneuver_type, Maneuver.Type.SINGLE_ROLL_ONLY)\n self.assertIsNone(o.diameter)\n self.assertEqual(o.A_roll_min, -10)\n self.assertEqual(o.A_roll_max, 0)\n self.assertIsNone(o.B_roll_min)\n self.assertIsNone(o.B_roll_max)\n self.assertEqual(o._id, 123)\n\n with self.assertRaises(Exception):\n o = Maneuver.from_json('{\"maneuverType\": \"SINGLE_ROLL_ONLY\"}')\n\n with self.assertRaises(Exception):\n o = Maneuver.from_json('{\"maneuverType\": \"SINGLE_ROLL_ONLY\", \"A_rollMin\":10}')\n\n with self.assertRaises(Exception):\n o = Maneuver.from_json('{\"maneuverType\": \"SINGLE_ROLL_ONLY\", \"A_rollMax\": 0}')\n\n with self.assertRaises(Exception):\n o = Maneuver.from_json('{\"maneuverType\": \"SINGLE_ROLL_ONLY\", \"A_rollMin\":190, \"A_rollMax\": 200}')\n\n with self.assertRaises(Exception):\n o = Maneuver.from_json('{\"maneuverType\": \"SINGLE_ROLL_ONLY\", \"A_rollMin\":0, \"A_rollMax\": -190}')\n \n def test_from_json_DOUBLE_ROLL_ONLY(self):\n\n o = Maneuver.from_json('{\"maneuverType\": \"DOUBLE_ROLL_ONLY\", \"A_rollMin\":0, \"A_rollMax\": 30, \"B_rollMin\":-30, \"B_rollMax\": 0}')\n self.assertIsInstance(o, Maneuver)\n self.assertEqual(o.maneuver_type, Maneuver.Type.DOUBLE_ROLL_ONLY)\n self.assertIsNone(o.diameter)\n self.assertEqual(o.A_roll_min, 0)\n self.assertEqual(o.A_roll_max, 30)\n self.assertEqual(o.B_roll_min, -30)\n self.assertEqual(o.B_roll_max, 0)\n self.assertIsNone(o._id)\n\n o = Maneuver.from_json('{\"maneuverType\": \"DOUBLE_ROLL_ONLY\", \"A_rollMin\":-10, \"A_rollMax\": 0, \"B_rollMin\":10, \"B_rollMax\": 60, \"@id\":\"123\"}')\n self.assertIsInstance(o, Maneuver)\n self.assertEqual(o.maneuver_type, Maneuver.Type.DOUBLE_ROLL_ONLY)\n self.assertIsNone(o.diameter)\n self.assertEqual(o.A_roll_min, -10)\n self.assertEqual(o.A_roll_max, 0)\n self.assertEqual(o.B_roll_min, 10)\n self.assertEqual(o.B_roll_max, 60)\n self.assertEqual(o._id, \"123\")\n\n with self.assertRaises(Exception):\n o = Maneuver.from_json('{\"maneuverType\": \"DOUBLE_ROLL_ONLY\"}')\n\n with self.assertRaises(Exception):\n o = Maneuver.from_json('{\"maneuverType\": \"DOUBLE_ROLL_ONLY\", \"A_rollMin\":10, \"A_rollMax\": 50}')\n\n with self.assertRaises(Exception):\n o = Maneuver.from_json('{\"maneuverType\": \"DOUBLE_ROLL_ONLY\", \"B_rollMin\":10, \"B_rollMax\": 60}')\n\n with self.assertRaises(Exception):\n o = Maneuver.from_json('{\"maneuverType\": \"DOUBLE_ROLL_ONLY\", \"A_rollMin\":30, \"A_rollMax\": 0, \"B_rollMin\":-30, \"B_rollMax\": 0}')\n\n with self.assertRaises(Exception):\n o = Maneuver.from_json('{\"maneuverType\": \"DOUBLE_ROLL_ONLY\", \"A_rollMin\":0, \"A_rollMax\": 30, \"B_rollMin\":-30, \"B_rollMax\": -60}')\n\n def test_to_dict(self):\n \n # CIRCULAR\n o = Maneuver.from_json('{\"maneuverType\": \"circular\", \"diameter\":10, \"@id\":\"123\"}')\n d = o.to_dict()\n self.assertEqual(d[\"maneuverType\"], \"CIRCULAR\")\n self.assertEqual(d[\"diameter\"], 10.0)\n self.assertEqual(d[\"@id\"], \"123\")\n\n # SINGLE_ROLL_ONLY\n o = Maneuver.from_json(\n '{\"maneuverType\": \"single_Roll_only\", \"A_rollMin\":0, \"A_rollMax\": 30, \"@id\": 123}')\n d = o.to_dict()\n self.assertEqual(d[\"maneuverType\"], \"SINGLE_ROLL_ONLY\")\n self.assertEqual(d[\"A_rollMin\"], 0.0)\n self.assertEqual(d[\"A_rollMax\"], 30.0)\n self.assertEqual(d[\"@id\"], 123)\n\n # DOUBLE_ROLL_ONLY\n o = Maneuver.from_json(\n '{\"maneuverType\": \"DOUBLE_ROLL_ONLY\", \"A_rollMin\":-10, \"A_rollMax\": 0, \"B_rollMin\":10, \"B_rollMax\": 60}')\n d = o.to_dict()\n self.assertEqual(d[\"maneuverType\"], \"DOUBLE_ROLL_ONLY\")\n self.assertEqual(d[\"A_rollMin\"], -10.0)\n self.assertEqual(d[\"A_rollMax\"], 0.0)\n self.assertEqual(d[\"B_rollMin\"], 10.0)\n self.assertEqual(d[\"B_rollMax\"], 60.0)\n self.assertIsNone(d[\"@id\"])\n \n def test___eq__(self):\n\n # CIRCULAR\n o1 = Maneuver.from_json('{\"maneuverType\": \"circular\", \"diameter\":10, \"@id\":\"123\"}')\n o2 = Maneuver.from_json('{\"maneuverType\": \"circular\", \"diameter\":10, \"@id\":\"abc\"}')\n self.assertTrue(o1==o2)\n o1 = Maneuver.from_json('{\"maneuverType\": \"circular\", \"diameter\":20, \"@id\":\"123\"}')\n o2 = Maneuver.from_json('{\"maneuverType\": \"circular\", \"diameter\":10, \"@id\":\"123\"}')\n self.assertFalse(o1==o2)\n\n # SINGLE_ROLL_ONLY\n o1 = Maneuver.from_json(\n '{\"maneuverType\": \"single_Roll_only\", \"A_rollMin\":0, \"A_rollMax\": 30}')\n o2 = Maneuver.from_json(\n '{\"maneuverType\": \"single_Roll_only\", \"A_rollMin\":0, \"A_rollMax\": 30}')\n self.assertTrue(o1==o2)\n o1 = Maneuver.from_json(\n '{\"maneuverType\": \"single_Roll_only\", \"A_rollMin\":0, \"A_rollMax\": 30}')\n o2 = Maneuver.from_json(\n '{\"maneuverType\": \"single_Roll_only\", \"A_rollMin\":0, \"A_rollMax\": 20.1}')\n self.assertFalse(o1==o2)\n\n # DOUBLE_ROLL_ONLY\n o1 = Maneuver.from_json(\n '{\"maneuverType\": \"DOUBLE_ROLL_ONLY\", \"A_rollMin\":-10, \"A_rollMax\": 0, \"B_rollMin\":10, \"B_rollMax\": 60}')\n o2 = Maneuver.from_json(\n '{\"maneuverType\": \"DOUBLE_ROLL_ONLY\", \"A_rollMin\":-10, \"A_rollMax\": 0, \"B_rollMin\":10, \"B_rollMax\": 60}')\n self.assertTrue(o1==o2)\n o1 = Maneuver.from_json(\n '{\"maneuverType\": \"DOUBLE_ROLL_ONLY\", \"A_rollMin\":-10, \"A_rollMax\": 10, \"B_rollMin\":10, \"B_rollMax\": 60}')\n o2 = Maneuver.from_json(\n '{\"maneuverType\": \"DOUBLE_ROLL_ONLY\", \"A_rollMin\":-10, \"A_rollMax\": 0, \"B_rollMin\":10, \"B_rollMax\": 60}')\n self.assertFalse(o1==o2)\n\n def test_calc_field_of_regard(self):\n\n # CIRCULAR Maneuver\n # circular input fov\n o = Maneuver.from_json('{\"maneuverType\": \"circular\", \"diameter\":10, \"@id\":\"123\"}')\n x = o.calc_field_of_regard(SphericalGeometry.from_dict({\"shape\":\"Circular\", \"diameter\":5}))\n self.assertEqual(x[0], ViewGeometry.from_dict({\"orientation\":{\"referenceFrame\":\"Nadir_pointing\", \"convention\":\"REF_FRAME_ALIGNED\"}, \n \"sphericalGeometry\":{\"shape\":\"Circular\", \"diameter\":15}}))\n\n # rectangular input fov\n o = Maneuver.from_json('{\"maneuverType\": \"circular\", \"diameter\":10, \"@id\":\"123\"}')\n x = o.calc_field_of_regard(SphericalGeometry.from_dict({\"shape\":\"rectangular\", \"angleWidth\":5, \"angleHeight\":5}))\n self.assertEqual(x[0].orien, Orientation(ref_frame=\"Nadir_pointing\"))\n proxy_fov_geom = x[0].sph_geom\n valid_fov_geom = SphericalGeometry.from_dict({\"shape\":\"Circular\", \"diameter\":17.069945578430072})\n self.assertAlmostEqual(proxy_fov_geom.diameter, valid_fov_geom.diameter)\n \n # SINGLE_ROLL_ONLY\n # circular input fov\n o = Maneuver.from_json('{\"maneuverType\": \"single_Roll_only\", \"A_rollMin\":10, \"A_rollMax\": 30}')\n x = o.calc_field_of_regard(SphericalGeometry.from_dict({\"shape\":\"Circular\", \"diameter\":5}))\n self.assertEqual(x[0], ViewGeometry.from_dict({\"orientation\":{\"referenceFrame\":\"Nadir_pointing\", \"convention\": \"SIDE_LOOK\", \"sideLookAngle\": 20}, \n \"sphericalGeometry\":{\"shape\":\"rectangular\", \"angleWidth\":25, \"angleHeight\":5}}))\n\n # rectangular input fov\n o = Maneuver.from_json('{\"maneuverType\": \"single_Roll_only\", \"A_rollMin\":10, \"A_rollMax\": 30}')\n x = o.calc_field_of_regard(SphericalGeometry.from_dict({\"shape\":\"rectangular\", \"angleWidth\":5, \"angleHeight\":5}))\n self.assertEqual(x[0].orien, Orientation.from_dict({\"referenceFrame\":\"Nadir_pointing\", \"convention\": \"SIDE_LOOK\", \"sideLookAngle\": 20}))\n proxy_fov_geom = x[0].sph_geom\n valid_fov_geom = SphericalGeometry.from_dict({\"shape\":\"rectangular\", \"angleWidth\":25, \"angleHeight\":5})\n self.assertAlmostEqual(proxy_fov_geom.angle_width, valid_fov_geom.angle_width)\n self.assertAlmostEqual(proxy_fov_geom.angle_height, valid_fov_geom.angle_height)\n\n # DOUBLE_ROLL_ONLY\n # circular input fov\n o = Maneuver.from_json('{\"maneuverType\": \"DOUBLE_ROLL_ONLY\", \"A_rollMin\":-10, \"A_rollMax\": 0, \"B_rollMin\":10, \"B_rollMax\": 60}')\n x = o.calc_field_of_regard(SphericalGeometry.from_dict({\"shape\":\"Circular\", \"diameter\":7.5}))\n self.assertEqual(x[0], ViewGeometry.from_dict({\"orientation\":{\"referenceFrame\":\"Nadir_pointing\", \"convention\": \"SIDE_LOOK\", \"sideLookAngle\": -5}, \n \"sphericalGeometry\":{\"shape\":\"rectangular\", \"angleWidth\":17.5, \"angleHeight\":7.5}}))\n self.assertEqual(x[1], ViewGeometry.from_dict({\"orientation\":{\"referenceFrame\":\"Nadir_pointing\", \"convention\": \"SIDE_LOOK\", \"sideLookAngle\": 35}, \n \"sphericalGeometry\":{\"shape\":\"rectangular\", \"angleWidth\":57.5, \"angleHeight\":7.5}}))\n\n # rectangular input fov\n o = Maneuver.from_json('{\"maneuverType\": \"DOUBLE_ROLL_ONLY\", \"A_rollMin\":-10, \"A_rollMax\": 0, \"B_rollMin\":10, \"B_rollMax\": 60}')\n x = o.calc_field_of_regard(SphericalGeometry.from_dict({\"shape\":\"rectangular\", \"angleWidth\":7.5, \"angleHeight\":7.5}))\n self.assertEqual(x[0].orien, Orientation.from_dict({\"referenceFrame\":\"Nadir_pointing\", \"convention\": \"SIDE_LOOK\", \"sideLookAngle\": -5}))\n self.assertEqual(x[1].orien, Orientation.from_dict({\"referenceFrame\":\"Nadir_pointing\", \"convention\": \"SIDE_LOOK\", \"sideLookAngle\": 35}))\n proxy_fov_geom = x[0].sph_geom\n valid_fov_geom = SphericalGeometry.from_dict({\"shape\":\"rectangular\", \"angleWidth\":17.5, \"angleHeight\":7.5})\n self.assertAlmostEqual(proxy_fov_geom.angle_width, valid_fov_geom.angle_width)\n self.assertAlmostEqual(proxy_fov_geom.angle_height, valid_fov_geom.angle_height)\n proxy_fov_geom = x[1].sph_geom\n valid_fov_geom = SphericalGeometry.from_dict({\"shape\":\"rectangular\", \"angleWidth\":57.5, \"angleHeight\":7.5})\n self.assertAlmostEqual(proxy_fov_geom.angle_width, valid_fov_geom.angle_width)\n self.assertAlmostEqual(proxy_fov_geom.angle_height, valid_fov_geom.angle_height)\n\nclass TestSyntheticDataConfiguration(unittest.TestCase):\n def test_from_json(self):\n o = SyntheticDataConfiguration.from_json(' {\"sourceFilePaths\": [ \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f001.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f002.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f003.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f004.nc\"], \\\n \"geophysicalVar\": \"TMP_P0_L1_GLL0\", \\\n \"interpolMethod\": \"SCIPY_LINEAR\" }'\n )\n self.assertIsInstance(o, SyntheticDataConfiguration)\n self.assertIsInstance(o.sourceFilePaths, list)\n self.assertEqual(o.sourceFilePaths[0], \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\")\n self.assertEqual(o.sourceFilePaths[1], \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f001.nc\")\n self.assertEqual(o.sourceFilePaths[2], \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f002.nc\")\n self.assertEqual(o.sourceFilePaths[3], \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f003.nc\")\n self.assertEqual(o.sourceFilePaths[4], \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f004.nc\")\n self.assertEqual(o.geophysicalVar, \"TMP_P0_L1_GLL0\")\n self.assertEqual(o.interpolMethod, SyntheticDataConfiguration.InterpolationMethod.SCIPY_LINEAR)\n self.assertIsNone(o._id)\n # only 1 source file \n o = SyntheticDataConfiguration.from_json(' {\"sourceFilePaths\": [ \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\"], \\\n \"geophysicalVar\": \"TMP_P0_L1_GLL0\", \\\n \"interpolMethod\": \"METPY_LINEAR\", \\\n \"@id\": 123 }'\n )\n self.assertIsInstance(o, SyntheticDataConfiguration)\n self.assertIsInstance(o.sourceFilePaths, list)\n self.assertEqual(o.sourceFilePaths[0], \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\")\n self.assertEqual(o.geophysicalVar, \"TMP_P0_L1_GLL0\")\n self.assertEqual(o.interpolMethod, SyntheticDataConfiguration.InterpolationMethod.METPY_LINEAR)\n self.assertEqual(o._id, 123)\n # only 1 source file (not in a list)\n o = SyntheticDataConfiguration.from_json(' {\"sourceFilePaths\": \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\", \\\n \"geophysicalVar\": \"TMP_P0_L1_GLL0\", \\\n \"interpolMethod\": \"METPY_LINEAR\", \\\n \"@id\": \"123\" }'\n )\n self.assertIsInstance(o, SyntheticDataConfiguration)\n self.assertIsInstance(o.sourceFilePaths, list)\n self.assertEqual(o.sourceFilePaths[0], \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\")\n self.assertEqual(o.geophysicalVar, \"TMP_P0_L1_GLL0\")\n self.assertEqual(o.interpolMethod, SyntheticDataConfiguration.InterpolationMethod.METPY_LINEAR)\n self.assertEqual(o._id, \"123\")\n\n def test_to_dict(self):\n \n o = SyntheticDataConfiguration.from_json(' {\"sourceFilePaths\": [ \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f001.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f002.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f003.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f004.nc\"], \\\n \"geophysicalVar\": \"TMP_P0_L1_GLL0\", \\\n \"interpolMethod\": \"SCIPY_LINEAR\" }')\n d = o.to_dict()\n self.assertEqual(d[\"sourceFilePaths\"], [\"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f001.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f002.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f003.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f004.nc\"])\n self.assertEqual(d[\"geophysicalVar\"], \"TMP_P0_L1_GLL0\")\n self.assertEqual(d[\"interpolMethod\"], \"SCIPY_LINEAR\")\n self.assertIsNone(d[\"@id\"])\n\n o = SyntheticDataConfiguration.from_json(' {\"sourceFilePaths\": \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\", \\\n \"geophysicalVar\": \"TMP_P0_L1_GLL0\", \\\n \"interpolMethod\": \"METPY_LINEAR\", \\\n \"@id\": \"123\" }'\n )\n d = o.to_dict()\n self.assertEqual(d[\"sourceFilePaths\"], [\"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\"])\n self.assertEqual(d[\"geophysicalVar\"], \"TMP_P0_L1_GLL0\")\n self.assertEqual(d[\"interpolMethod\"], \"METPY_LINEAR\")\n self.assertEqual(d[\"@id\"], \"123\")\n \n def test_get_interpolator(self):\n\n o = SyntheticDataConfiguration.from_json(' {\"interpolMethod\": \"SCIPY_LINEAR\" }')\n self.assertEqual(o.get_interpolator(), SyntheticDataInterpolator.scipy_linear)\n o = SyntheticDataConfiguration.from_json(' {\"interpolMethod\": \"METPY_LINEAR\" }')\n self.assertEqual(o.get_interpolator(), SyntheticDataInterpolator.metpy_linear)\n \n def test___eq__(self):\n\n o1 = SyntheticDataConfiguration.from_json(' {\"sourceFilePaths\": [ \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f001.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f002.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f003.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f004.nc\"], \\\n \"geophysicalVar\": \"TMP_P0_L1_GLL0\", \\\n \"interpolMethod\": \"SCIPY_LINEAR\" }')\n o2 = SyntheticDataConfiguration.from_json(' {\"sourceFilePaths\": [ \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f001.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f002.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f003.nc\", \\\n \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f004.nc\"], \\\n \"geophysicalVar\": \"TMP_P0_L1_GLL0\", \\\n \"interpolMethod\": \"SCIPY_LINEAR\" }')\n \n self.assertTrue(o1==o2)\n\n o1 = SyntheticDataConfiguration.from_json(' {\"sourceFilePaths\": [ \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\"], \\\n \"geophysicalVar\": \"TMP_P0_L1_GLL0\", \\\n \"interpolMethod\": \"METPY_LINEAR\", \\\n \"@id\": 123 }'\n )\n o2 = SyntheticDataConfiguration.from_json(' {\"sourceFilePaths\": \"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\", \\\n \"geophysicalVar\": \"TMP_P0_L1_GLL0\", \\\n \"interpolMethod\": \"METPY_LINEAR\", \\\n \"@id\": \"abc\" }'\n )\n self.assertTrue(o1==o2)\n\n o1 = SyntheticDataConfiguration.from_json(' {\"sourceFilePaths\": [\"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\"], \\\n \"geophysicalVar\": \"TMP_P0_L1_GLL0\", \\\n \"interpolMethod\": \"SCIPY_LINEAR\", \\\n \"@id\": 123 }'\n )\n o2 = SyntheticDataConfiguration.from_json(' {\"sourceFilePaths\": [\"C:/workspace/gfs_forecast_data/gfs.t12z.pgrb2.0p25.f000.nc\"], \\\n \"geophysicalVar\": \"TMP_P0_L1_GLL0\", \\\n \"interpolMethod\": \"METPY_LINEAR\", \\\n \"@id\": 123 }'\n )\n self.assertFalse(o1==o2)\n\nclass TestSyntheticDataInterpolator(unittest.TestCase):\n \n def test_scipy_linear(self):\n \n x = np.arange(-2, 2, 0.1)\n y = np.arange(-2, 2, 0.1)\n lons, lats = np.meshgrid(x, y)\n \n # uniform field \n var_data = np.sin(lons**2+lats**2)*0 + 1\n pixel_center_pos = [{\"lon[deg]\": 0, \"lat[deg]\": 0}, {\"lon[deg]\": 0.5, \"lat[deg]\": 0.5}, {\"lon[deg]\": -0.5, \"lat[deg]\": 0.5}, {\"lon[deg]\": 0.5, \"lat[deg]\": -0.5}, {\"lon[deg]\": -0.5, \"lat[deg]\": -0.5}]\n\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[0], 1)\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[1], 1)\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[2], 1)\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[3], 1)\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[4], 1)\n\n # sine field\n var_data = np.sin(lons**2+lats**2)\n pixel_center_pos = [{\"lon[deg]\": 0, \"lat[deg]\": 0}, {\"lon[deg]\": 0.5, \"lat[deg]\": 0.5}, {\"lon[deg]\": 1.5, \"lat[deg]\": 0.5}, {\"lon[deg]\": 0.5, \"lat[deg]\": 1}, {\"lon[deg]\": -1.5, \"lat[deg]\": 0}]\n\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[0], 0)\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[1], np.sin(0.5**2 + 0.5**2))\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[2], np.sin(1.5**2 + 0.5**2))\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[3], np.sin(0.5**2 + 1**2))\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[4], np.sin(1.5**2 + 0**2))\n\n \n def test_metpy_linear(self):\n \n x = np.arange(-2, 2, 0.1)\n y = np.arange(-2, 2, 0.1)\n lons, lats = np.meshgrid(x, y)\n var_data = np.sin(lons**2+lats**2)*0 + 1\n \n # uniform field \n var_data = np.sin(lons**2+lats**2)*0 + 1 \n pixel_center_pos = [{\"lon[deg]\": 0, \"lat[deg]\": 0}, {\"lon[deg]\": 0.5, \"lat[deg]\": 0.5}, {\"lon[deg]\": -0.5, \"lat[deg]\": 0.5}, {\"lon[deg]\": 0.5, \"lat[deg]\": -0.5}, {\"lon[deg]\": -0.5, \"lat[deg]\": -0.5}]\n\n self.assertAlmostEqual(SyntheticDataInterpolator.metpy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[0], 1)\n self.assertAlmostEqual(SyntheticDataInterpolator.metpy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[1], 1)\n self.assertAlmostEqual(SyntheticDataInterpolator.metpy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[2], 1)\n self.assertAlmostEqual(SyntheticDataInterpolator.metpy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[3], 1)\n self.assertAlmostEqual(SyntheticDataInterpolator.metpy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[4], 1)\n\n # sine field\n var_data = np.sin(lons**2+lats**2)\n pixel_center_pos = [{\"lon[deg]\": 0, \"lat[deg]\": 0}, {\"lon[deg]\": 0.5, \"lat[deg]\": 0.5}, {\"lon[deg]\": 1.5, \"lat[deg]\": 0.5}, {\"lon[deg]\": 0.5, \"lat[deg]\": 1}, {\"lon[deg]\": -1.5, \"lat[deg]\": 0}]\n\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[0], 0)\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[1], np.sin(0.5**2 + 0.5**2))\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[2], np.sin(1.5**2 + 0.5**2))\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[3], np.sin(0.5**2 + 1**2))\n self.assertAlmostEqual(SyntheticDataInterpolator.scipy_linear(lons.flatten(),lats.flatten(),var_data.flatten(), pixel_center_pos)[4], np.sin(1.5**2 + 0**2))\n\nclass TestConstants(unittest.TestCase):\n def test_radiusOfEarthInKM(self):\n self.assertEqual(Constants.radiusOfEarthInKM, 6378.137)\n\n def test_speedOfLight(self):\n self.assertEqual(Constants.speedOfLight, 299792458)\n\n def test_Boltzmann(self):\n self.assertEqual(Constants.Boltzmann, 1.380649e-23)\n\n def test_angularSpeedOfEarthInRadPerSec(self):\n self.assertAlmostEqual(\n Constants.angularSpeedOfEarthInRadPerSec, 2*np.pi / 86400.0, places=5)\n\n def test_Planck(self):\n self.assertEqual(Constants.Planck, 6.62607015e-34)\n\n def test_SunBlackBodyTemperature(self):\n self.assertEqual(Constants.SunBlackBodyTemperature, 6000)\n\nclass TestMathUtilityFunctions(unittest.TestCase):\n\n def test_normalize(self):\n\n # Test if returned vector has unit magnitude\n self.assertAlmostEqual(np.linalg.norm(\n MathUtilityFunctions.normalize([2, 4, 6])), 1)\n self.assertAlmostEqual(np.linalg.norm(\n MathUtilityFunctions.normalize([-2, 4, 65])), 1)\n self.assertAlmostEqual(np.linalg.norm(\n MathUtilityFunctions.normalize([2, 0, 0])), 1)\n self.assertAlmostEqual(np.linalg.norm(\n MathUtilityFunctions.normalize([2, -4, -42343])), 1)\n\n with self.assertRaises(Exception):\n MathUtilityFunctions.normalize([0, 0, 0])\n\n def test_angle_between_vectors(self):\n\n # Test trivial cases\n self.assertAlmostEqual(MathUtilityFunctions.angle_between_vectors(\n [100, 0, 0], [0, 1, 0]), np.pi/2)\n self.assertAlmostEqual(MathUtilityFunctions.angle_between_vectors(\n [100, 0, 0], [-5000, 0, 0]), np.pi)\n self.assertAlmostEqual(MathUtilityFunctions.angle_between_vectors(\n [45, 45, 45], [-45, -45, -45]), np.pi)\n self.assertAlmostEqual(MathUtilityFunctions.angle_between_vectors(\n [45, 45, 45], [45, 45, 45]), 0)\n self.assertAlmostEqual(MathUtilityFunctions.angle_between_vectors(\n [.5, .5, 0], [1, 0, 0]), np.pi/4)\n\nclass TestGeoUtilityFunctions(unittest.TestCase):\n\n @staticmethod\n def compute_satellite_footprint_speed_with_EF_vectors(_gmat_r_ef, _gmat_v_ef):\n \"\"\" GMAT does not directly output the ground-speed. Compute using EF position vector and EF velocity vector available from GMAT. \n In this case no compensation needs to be performed for Earth-rotation (unlike in the case of instrupy.GeoUtilityFunctions.compute_satellite_footprint_speed(r,v) \n where the input vectors (r,v) are in ECI frame) since the (r,v) vectors are taken in EF frame. \n\n \"\"\"\n _gmat_omega = np.cross(\n _gmat_r_ef, _gmat_v_ef) / (np.linalg.norm(_gmat_r_ef)**2)\n _gmat_fp_speed = np.linalg.norm(_gmat_omega)*6378100\n return _gmat_fp_speed\n\n def test_compute_satellite_footprint_speed(self):\n\n # Validating using 'GMAT R2018a 64bit' as external reference.\n # Test 1 (low inclination orbit)\n _gmat_r_eci = [7100, 0, 1300]\n _gmat_v_eci = [0, 7.35, 1]\n _gmat_r_ef = [1272.929354832122, 6984.992046762509, 1299.821897134824]\n _gmat_v_ef = [-6.721571319063451,\n 1.224987254217343, 0.9997979087785365]\n _gmat_fp_speed = TestGeoUtilityFunctions.compute_satellite_footprint_speed_with_EF_vectors(\n _gmat_r_ef, _gmat_v_ef)\n fp_speed = GeoUtilityFunctions.compute_satellite_footprint_speed(\n _gmat_r_eci, _gmat_v_eci)\n # acceptable error limits of 10 m/s\n self.assertAlmostEqual(_gmat_fp_speed, fp_speed, delta=10)\n\n # Test 2 (mid inclination orbit)\n _gmat_r_eci = [-5436.533450168191, -\n 3053.079465330414, 3181.636343704307]\n _gmat_v_eci = [1.114632787950382, -\n 6.244419534847031, -4.087510077679621]\n _gmat_r_ef = [2028.820780817868, -5895.733640536318, 3181.856545975942]\n _gmat_v_ef = [5.913247918270616, -\n 0.1710549493218195, -4.087366758963451]\n _gmat_fp_speed = TestGeoUtilityFunctions.compute_satellite_footprint_speed_with_EF_vectors(\n _gmat_r_ef, _gmat_v_ef)\n fp_speed = GeoUtilityFunctions.compute_satellite_footprint_speed(\n _gmat_r_eci, _gmat_v_eci)\n # acceptable error limits of 10 m/s\n self.assertAlmostEqual(_gmat_fp_speed, fp_speed, delta=10)\n\n # Test (retrograde high inclination orbit)\n _gmat_r_eci = [-2138.840731205298, -\n 4957.003244328315, 4455.724313987103]\n _gmat_v_eci = [-3.12197717174031, -3.798411634168159, -5.7243556677441]\n _gmat_r_ef = [4493.108372866067, -\n 2992.792499467348, 4455.914070627137]\n _gmat_v_ef = [2.959015709181303, -\n 4.08021854816618, -5.724173608651788]\n _gmat_fp_speed = TestGeoUtilityFunctions.compute_satellite_footprint_speed_with_EF_vectors(\n _gmat_r_ef, _gmat_v_ef)\n fp_speed = GeoUtilityFunctions.compute_satellite_footprint_speed(\n _gmat_r_eci, _gmat_v_eci)\n # acceptable error limits of 10 m/s\n self.assertAlmostEqual(_gmat_fp_speed, fp_speed, delta=10)\n\n # Test (retrograde mid-incinaton orbit)\n _gmat_r_eci = [74.22234833534203, -6234.715809034727, 3181.63634370431]\n _gmat_v_eci = [-5.965142343040525, -\n 2.15690945716741, -4.087510077679617]\n _gmat_r_ef = [6146.925885818154, -1044.708013320701, 3181.80566992428]\n _gmat_v_ef = [0.9763805839389828, -\n 6.703558863610055, -4.087302022035398]\n _gmat_fp_speed = TestGeoUtilityFunctions.compute_satellite_footprint_speed_with_EF_vectors(\n _gmat_r_ef, _gmat_v_ef)\n fp_speed = GeoUtilityFunctions.compute_satellite_footprint_speed(\n _gmat_r_eci, _gmat_v_eci)\n # acceptable error limits of 10 m/s\n self.assertAlmostEqual(_gmat_fp_speed, fp_speed, delta=10)\n\n def test_latlonalt_To_Cartesian(self):\n\n # Test, trivial case with point at (0 deg,0 deg,0 km)\n p_vec = GeoUtilityFunctions.latlonalt_To_Cartesian(0, 0, 0)\n self.assertAlmostEqual(p_vec[0], 6378.137, delta=1)\n self.assertAlmostEqual(p_vec[1], 0)\n self.assertAlmostEqual(p_vec[2], 0)\n\n # Test, trivial case with point at (0 deg, 90 deg,0 km)\n p_vec = GeoUtilityFunctions.latlonalt_To_Cartesian(0, 90, 0)\n self.assertAlmostEqual(p_vec[0], 0)\n self.assertAlmostEqual(p_vec[1], 6378.137, delta=1)\n self.assertAlmostEqual(p_vec[2], 0)\n\n # Test, trivial case with point at (0 deg, -90 deg,0 km)\n p_vec = GeoUtilityFunctions.latlonalt_To_Cartesian(0, -90, 0)\n self.assertAlmostEqual(p_vec[0], 0)\n self.assertAlmostEqual(p_vec[1], -6378.137, delta=1)\n self.assertAlmostEqual(p_vec[2], 0)\n\n # Test, trivial case with point at (0 deg, -90 deg,100 km)\n p_vec = GeoUtilityFunctions.latlonalt_To_Cartesian(0, -90, 100)\n self.assertAlmostEqual(p_vec[0], 0)\n self.assertAlmostEqual(p_vec[1], -6378.137 - 100, delta=1)\n self.assertAlmostEqual(p_vec[2], 0)\n\n # Test, trivial case with point at (90 deg, 90 deg,100 km)\n p_vec = GeoUtilityFunctions.latlonalt_To_Cartesian(90, 90, 500)\n self.assertAlmostEqual(p_vec[0], 0)\n self.assertAlmostEqual(p_vec[1], 0)\n self.assertAlmostEqual(p_vec[2], 6378.137 + 500, delta=1)\n\n # Test, trivial case with point at (40 deg, 270 deg,100 km) and point at (40 deg, -90 deg,100 km) (both coords a off the same point)\n p_vec1 = GeoUtilityFunctions.latlonalt_To_Cartesian(40, 270, 100)\n p_vec2 = GeoUtilityFunctions.latlonalt_To_Cartesian(40, -90, 100)\n self.assertAlmostEqual(p_vec1[0], p_vec2[0])\n self.assertAlmostEqual(p_vec1[1], p_vec2[1])\n self.assertAlmostEqual(p_vec1[2], p_vec2[2])\n\n def test_latlonaltGeodetic_To_Cartesian(self):\n \"\"\" Validating using 'GMAT R2018a 64bit' as external reference. \n Generate some latitude, longitude and corresponding [X,Y,Z] position (in EF frame) of a object in GMAT. \n Check with the generated Cartesian position vector of the instrupy.GeoUtilityFunctions.latlonalt_To_Cartesian(...).\n\n \"\"\"\n # Test case 1, positive lat, positive lon\n p_vec = GeoUtilityFunctions.latlonaltGeodetic_To_Cartesian(\n 65.8772312763883, 102.0971826243001, 643.382903131308)\n _gmat_p_vec = [-602.9227650102, 2813.05830480065, 6385.563708800454]\n # acceptable error limits of 1 km\n self.assertAlmostEqual(p_vec[0], _gmat_p_vec[0], delta=1)\n self.assertAlmostEqual(p_vec[1], _gmat_p_vec[1], delta=1)\n self.assertAlmostEqual(p_vec[2], _gmat_p_vec[2], delta=1)\n\n # Test case 2, positive lat, negative lon\n p_vec = GeoUtilityFunctions.latlonaltGeodetic_To_Cartesian(\n 39.70771912759876, -33.66699237384308, 630.5515018592132)\n _gmat_p_vec = [4493.108372866067, -\n 2992.792499467348, 4455.914070627137]\n # acceptable error limits of 1 km\n self.assertAlmostEqual(p_vec[0], _gmat_p_vec[0], delta=1)\n self.assertAlmostEqual(p_vec[1], _gmat_p_vec[1], delta=1)\n self.assertAlmostEqual(p_vec[2], _gmat_p_vec[2], delta=1)\n\n # Test case 3, negative lat, positive lon\n p_vec = GeoUtilityFunctions.latlonaltGeodetic_To_Cartesian(\n -15.57990628688177, 128.1073197882878, 631.6338423255838)\n _gmat_p_vec = [-4167.949699384632,\n 5314.18497094466, -1871.641779273967]\n # acceptable error limits of 1 km\n self.assertAlmostEqual(p_vec[0], _gmat_p_vec[0], delta=1)\n self.assertAlmostEqual(p_vec[1], _gmat_p_vec[1], delta=1)\n self.assertAlmostEqual(p_vec[2], _gmat_p_vec[2], delta=1)\n\n # Test case 4, negative lat, negative lon\n p_vec = GeoUtilityFunctions.latlonaltGeodetic_To_Cartesian(\n -68.9408803669571, -93.36510673726006, 640.3777836151294)\n _gmat_p_vec = [-148.4295520342744, -\n 2524.319956760156, -6527.213550924283]\n # acceptable error limits of 1 km\n self.assertAlmostEqual(p_vec[0], _gmat_p_vec[0], delta=1)\n self.assertAlmostEqual(p_vec[1], _gmat_p_vec[1], delta=1)\n self.assertAlmostEqual(p_vec[2], _gmat_p_vec[2], delta=1)\n\n def test_geo2eci(self):\n \"\"\" Truth data from `IDL Astronomy Users Library <https://idlastro.gsfc.nasa.gov/ftp/pro/astro/geo2eci.pro>`_, on which the \n python function being tested is also written.\n \"\"\"\n # intersection of the equator and Greenwich's meridian on 2002/03/09 21:21:21.021\n sample = GeoUtilityFunctions.geo2eci([0, 0, 0], 2452343.38982663)\n truth = [-3902.9606, 5044.5548, 0.0000000]\n self.assertAlmostEqual(sample[0], truth[0], places=3)\n self.assertAlmostEqual(sample[1], truth[1], places=3)\n self.assertAlmostEqual(sample[2], truth[2], places=3)\n\n def test_compute_sun_zenith(self):\n \"\"\" Truth data from https://www.esrl.noaa.gov/gmd/grad/solcalc/\n \"\"\"\n\n time_JDUT1 = 2452343.38982663 # 2002/03/09 21:21:21.021\n pos_km = GeoUtilityFunctions.geo2eci([0.0, 0.0, 0.0], time_JDUT1)\n self.assertIsNone(GeoUtilityFunctions.compute_sun_zenith(\n time_JDUT1, pos_km)[0]) # cause it is night\n\n time_JDUT1 = 2452343.000000 # 2002/03/09 12:00:00.000\n pos_km = GeoUtilityFunctions.geo2eci([0.0, 0.0, 0.0], time_JDUT1)\n self.assertAlmostEqual(GeoUtilityFunctions.compute_sun_zenith(\n time_JDUT1, pos_km)[0], np.deg2rad(90-84.82), places=2)\n\n time_JDUT1 = 2458619.133333 # A.D. 2019 May 15 15:12:00.0\n pos_km = GeoUtilityFunctions.geo2eci(\n [61.217, -149.9, 0.0], time_JDUT1)\n self.assertAlmostEqual(GeoUtilityFunctions.compute_sun_zenith(\n time_JDUT1, pos_km)[0], np.deg2rad(90-11.44), places=2) \n\n def test_JD2GMST(self):\n \"\"\" Truth data is from David A. Vallado,\"Fundamentals of Astrodynamics and Applications\", 4th ed, page after index titled julian Data Values.\n The table gives the GMST in degrees, hence is converted into hours for testing the intrupy.GeoUtilityFunctions.JD2GMST(...) function.\n Note that in the table the JD is specified at 12h UT, while GMST is at 0h UT. Take this into account.\n For example, for the date 2000 Jan 1 12h UT, the table reads the JD as 2451545. Since the GMST is specified at 0h UT (on the same day), the corresponding\n JD to be input is 2451544.5, corresponding to the date 2000 Jan 1 0h UT.\n \"\"\"\n self.assertAlmostEqual(GeoUtilityFunctions.JD2GMST(\n 2451544.5), 99.9677947 * (24/360), places=3) # row corresponding to year 2000\n\n self.assertAlmostEqual(GeoUtilityFunctions.JD2GMST(\n 2415020.5), 100.1837764 * (24/360), places=3) # row corresponding to year 1900\n self.assertAlmostEqual(GeoUtilityFunctions.JD2GMST(\n 2458119.5), 100.5992406 * (24/360), places=3) # row corresponding to year 2018\n self.assertAlmostEqual(GeoUtilityFunctions.JD2GMST(\n 2466154.5), 100.2728782 * (24/360), places=3) # row corresponding to year 2040 (Leap year)\n\n \"\"\" Truth data from https://www.celnav.de/longterm.htm.\n\n \"\"\"\n self.assertAlmostEqual(GeoUtilityFunctions.JD2GMST(\n 2458542.127859), 1.5460, places=3) # 2019/02/27 15:04:07 UT, after 12h UT test\n self.assertAlmostEqual(GeoUtilityFunctions.JD2GMST(\n 2459823.582662), 0.665478055555556, places=3) # 2022/09/01 01:59:02 UT, before 12h UT test\n\n def test_find_closest_value_in_array(self):\n\n self.assertEqual(MathUtilityFunctions.find_closest_value_in_array(\n [10, 45, 3, -10], 9), [10, 0])\n self.assertEqual(MathUtilityFunctions.find_closest_value_in_array(\n [10, 45, 3, -10], 25), [10, 0])\n self.assertEqual(MathUtilityFunctions.find_closest_value_in_array(\n [10, 45, 3, -10], 0), [3, 2])\n\n def test_SunVector_ECIeq(self):\n \"\"\" Truth data from running Matlab script :code:`sun.m` to compute Sun vector available as companion to \n David A.Vallado, Fundamental of Astrodynamics and Applications, 4th ed.\n Acceptable deviation is kept as 0.1km.\n \"\"\"\n # A.D. 2006 Apr 2, 0 UT1\n self.assertAlmostEqual(GeoUtilityFunctions.SunVector_ECIeq(\n 2453827.5)[0], 146186212.986846, delta=0.1)\n self.assertAlmostEqual(GeoUtilityFunctions.SunVector_ECIeq(\n 2453827.5)[1], 28788976.3117029, delta=0.1)\n self.assertAlmostEqual(GeoUtilityFunctions.SunVector_ECIeq(\n 2453827.5)[2], \t12481063.6450839, delta=0.1)\n\n # A.D. 2019 Feb 27, 15:56:00.0 UT1\n self.assertAlmostEqual(GeoUtilityFunctions.SunVector_ECIeq(\n 2458542.163889)[0], 138092570.424158, delta=0.1)\n self.assertAlmostEqual(GeoUtilityFunctions.SunVector_ECIeq(\n 2458542.163889)[1], -49238069.9169012, delta=0.1)\n self.assertAlmostEqual(GeoUtilityFunctions.SunVector_ECIeq(\n 2458542.163889)[2],\t-21344772.5319679, delta=0.1)\n\n def test_checkLOSavailability(self):\n \"\"\" Truth data from running Matlab script :code:`sight.m` to compute SUn vector available as companion to \n David A.Vallado, Fundamental of Astrodynamics and Applications, 4th ed. \n \"\"\"\n self.assertFalse(GeoUtilityFunctions.checkLOSavailability(\n [-4464.696, -5102.509, 0], [5740.323, 3189.068, 0], 6378.137))\n self.assertTrue(GeoUtilityFunctions.checkLOSavailability(\n [-4464.696, -5102.509, 0], [-4464.696, -5102.509, 100], 6378.137))\n self.assertTrue(GeoUtilityFunctions.checkLOSavailability(\n [-4464.696, -5102.509, 0], [-7464.696, 102.509, 100], 6378.137))\n\n def test_calculate_derived_satellite_coords(self):\n\n # Test for cases where the input satellite position, time is equal to derived satellite position, time\n\n tObs_JDUT1 = 100.0\n obs_position_km = [6378+700, 0, 0]\n obs_vel_vec_kmps = [0, 6.5, 0]\n target_position_km = [6378, 0, 0]\n derived_coords = GeoUtilityFunctions.calculate_derived_satellite_coords(\n tObs_JDUT1, obs_position_km, obs_vel_vec_kmps, target_position_km)\n self.assertEqual(derived_coords[\"derived_obsTime_JDUT1\"], 100)\n self.assertEqual(\n derived_coords[\"derived_obs_pos_km\"], [6378+700, 0, 0])\n self.assertEqual(derived_coords[\"derived_range_vec_km\"], [-700, 0, 0])\n self.assertAlmostEqual(derived_coords[\"derived_alt_km\"], 700, delta=1)\n self.assertEqual(derived_coords[\"derived_incidence_angle_rad\"], 0)\n\n tObs_JDUT1 = 100.0\n obs_position_km = [6378+700, 100, 0]\n obs_vel_vec_kmps = [0, 6.5, 0]\n target_position_km = [6378, 100, 0]\n derived_coords = GeoUtilityFunctions.calculate_derived_satellite_coords(\n tObs_JDUT1, obs_position_km, obs_vel_vec_kmps, target_position_km)\n self.assertEqual(derived_coords[\"derived_obsTime_JDUT1\"], 100)\n self.assertEqual(derived_coords[\"derived_obs_pos_km\"], [\n 6378+700, 100, 0])\n self.assertEqual(derived_coords[\"derived_range_vec_km\"], [-700, 0, 0])\n self.assertAlmostEqual(derived_coords[\"derived_alt_km\"], 700, delta=1)\n self.assertEqual(\n derived_coords[\"derived_incidence_angle_rad\"], 0.015679201843266006)\n\n tObs_JDUT1 = 100.0\n obs_position_km = [6378+700, 0, 0]\n obs_vel_vec_kmps = [0, -6.5, 0]\n target_position_km = [6378, 0, 0]\n derived_coords = GeoUtilityFunctions.calculate_derived_satellite_coords(\n tObs_JDUT1, obs_position_km, obs_vel_vec_kmps, target_position_km)\n self.assertEqual(derived_coords[\"derived_obsTime_JDUT1\"], 100)\n self.assertEqual(\n derived_coords[\"derived_obs_pos_km\"], [6378+700, 0, 0])\n self.assertEqual(derived_coords[\"derived_range_vec_km\"], [-700, 0, 0])\n self.assertAlmostEqual(derived_coords[\"derived_alt_km\"], 700, delta=1)\n self.assertEqual(derived_coords[\"derived_incidence_angle_rad\"], 0)\n\n tObs_JDUT1 = 100.0\n obs_position_km = [6378+700, 0, 0]\n obs_vel_vec_kmps = [0, -6.5, 0]\n target_position_km = [6378, 0, 100]\n derived_coords = GeoUtilityFunctions.calculate_derived_satellite_coords(\n tObs_JDUT1, obs_position_km, obs_vel_vec_kmps, target_position_km)\n self.assertEqual(derived_coords[\"derived_obsTime_JDUT1\"], 100)\n self.assertEqual(\n derived_coords[\"derived_obs_pos_km\"], [6378+700, 0, 0])\n self.assertEqual(\n derived_coords[\"derived_range_vec_km\"], [-700, 0, 100])\n self.assertAlmostEqual(derived_coords[\"derived_alt_km\"], 700, delta=1)\n self.assertAlmostEqual(derived_coords[\"derived_incidence_angle_rad\"], (np.arcsin(np.arctan2(\n 100, 700) * (6378+700)/6378.137)), places=2) # note that look angle of truth data is approximate\n\n # Test for cases where the input satellite position, time is different from derived satellite position, time\n\n tObs_JDUT1 = 100.0\n obs_position_km = [6378+700, 0, 0]\n obs_vel_vec_kmps = [0, 6.5, 0]\n target_position_km = [6378, 6.5, 0]\n derived_coords = GeoUtilityFunctions.calculate_derived_satellite_coords(\n tObs_JDUT1, obs_position_km, obs_vel_vec_kmps, target_position_km)\n self.assertEqual(derived_coords[\"derived_obsTime_JDUT1\"], 101)\n self.assertEqual(derived_coords[\"derived_obs_pos_km\"], [\n 6378+700, 6.5, 0])\n self.assertEqual(derived_coords[\"derived_range_vec_km\"], [-700, 0, 0])\n self.assertAlmostEqual(derived_coords[\"derived_alt_km\"], 700, delta=1)\n # approximate truth data since Earth curvature is ignored\n self.assertAlmostEqual(\n derived_coords[\"derived_incidence_angle_rad\"], 0, places=2)\n\n tObs_JDUT1 = 100.0\n obs_position_km = [6378+700, 0, 0]\n obs_vel_vec_kmps = [0, 6.5, 0]\n target_position_km = [6378, 13.0, 100]\n derived_coords = GeoUtilityFunctions.calculate_derived_satellite_coords(\n tObs_JDUT1, obs_position_km, obs_vel_vec_kmps, target_position_km)\n self.assertEqual(derived_coords[\"derived_obsTime_JDUT1\"], 102)\n self.assertEqual(derived_coords[\"derived_obs_pos_km\"], [\n 6378+700, 13.0, 0])\n self.assertEqual(\n derived_coords[\"derived_range_vec_km\"], [-700, 0, 100])\n self.assertAlmostEqual(derived_coords[\"derived_alt_km\"], 700, delta=1)\n self.assertAlmostEqual(derived_coords[\"derived_incidence_angle_rad\"], (np.arcsin(\n np.arctan2(100, 700) * (6378+700)/6378.137)), places=2)\n\n\nclass TestFileUtilityFunctions(unittest.TestCase):\n def test_from_json(self):\n\n # Test empty JSON string\n o = FileUtilityFunctions.from_json('{}')\n self.assertEqual(o, {})\n\n # Test one string field\n o = FileUtilityFunctions.from_json('{\"name\": \"Maple\"}')\n self.assertEqual(o[\"name\"], \"Maple\")\n self.assertIsInstance(o[\"name\"], str)\n\n # Test erroneous JSON format\n with self.assertRaises(Exception):\n FileUtilityFunctions.from_json('{\"name\": }')\n\n # Test two string fields\n o = FileUtilityFunctions.from_json(\n '{\"name\": \"Maple\", \"@type\": \"Syrup\"}')\n self.assertEqual(o[\"name\"], \"Maple\")\n self.assertEqual(o[\"@type\"], \"Syrup\")\n\n # Test two string fields, one number field\n o = FileUtilityFunctions.from_json(\n '{\"name\": \"Maple\", \"@type\": \"Syrup\", \"volume\": 10.4}')\n self.assertEqual(o[\"name\"], \"Maple\")\n self.assertEqual(o[\"@type\"], \"Syrup\")\n self.assertEqual(o[\"volume\"], 10.4)\n self.assertIsInstance(o[\"volume\"], float)\n\n # Test two string fields, one number field, one list (str) field\n o = FileUtilityFunctions.from_json(\n '{\"name\": \"Maple\", \"@type\": \"Syrup\", \"volume\": 10.4, \"places\": [\"CA\",\"TX\",\"WA\"]}')\n self.assertEqual(o[\"name\"], \"Maple\")\n self.assertEqual(o[\"@type\"], \"Syrup\")\n self.assertEqual(o[\"volume\"], 10.4)\n self.assertEqual(o[\"places\"], [\"CA\", \"TX\", \"WA\"])\n self.assertIsInstance(o[\"places\"], list)\n\n # Test two string fields, one number field, one list (value) field\n o = FileUtilityFunctions.from_json(\n '{\"name\": \"Maple\", \"@type\": \"Syrup\", \"volume\": 10.4, \"batches\": [2011,2018,2023]}')\n self.assertEqual(o[\"name\"], \"Maple\")\n self.assertEqual(o[\"@type\"], \"Syrup\")\n self.assertEqual(o[\"volume\"], 10.4)\n self.assertEqual(o[\"batches\"], [2011, 2018, 2023])\n self.assertIsInstance(o[\"batches\"], list)\n\n # Test passing of a dictionary\n data = {'name': 'Maple', '@type': 'Syrup',\n 'batches': [2011, 2018, 2023]}\n o = FileUtilityFunctions.from_json(data)\n self.assertEqual(o[\"name\"], \"Maple\")\n self.assertEqual(o[\"@type\"], \"Syrup\")\n self.assertEqual(o[\"batches\"], [2011, 2018, 2023])\n\n # Test nested json fields\n o = FileUtilityFunctions.from_json(\n '{\"name\": \"Maple\", \"@type\": \"Syrup\", \"volume\": 10.4, \"nutrition\": { \"Fat\": 0, \"Sodium\": 2, \"Protein\": 0}}')\n self.assertEqual(o[\"name\"], \"Maple\")\n self.assertEqual(o[\"@type\"], \"Syrup\")\n self.assertEqual(o[\"nutrition\"], {\n 'Fat': 0, 'Sodium': 2, 'Protein': 0})\n self.assertEqual(o[\"nutrition\"][\"Fat\"], 0)\n \nclass TestAntenna(unittest.TestCase): #TODO\n pass\n" ]
[ [ "numpy.arange", "numpy.linalg.norm", "numpy.sin", "numpy.arctan2", "numpy.testing.assert_almost_equal", "numpy.deg2rad", "numpy.cross", "numpy.meshgrid" ] ]
Psirus/Nimfem
[ "6cd97997e92a62ff720d8a0073e072247320d251" ]
[ "benchmark/comparison.py" ]
[ "import os\nimport subprocess\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nelements = np.logspace(2, 7, 6)\n\ntime_nim = []\ntime_fenics = []\n\nfenics_env = os.environ.copy()\nfenics_env[\"OMP_NUM_THREADS\"] = \"1\"\n\nos.makedirs(\"timings\", exist_ok=True)\nrun = True\nfor n in [int(np.sqrt(n / 2)) for n in elements]:\n print(n)\n\n if run:\n subprocess.run([\"/usr/bin/time\", \"-o\", f\"timings/nim{n}.txt\", \"./poisson\", f\"{n}\"])\n subprocess.run(\n [\n \"/usr/bin/time\",\n \"-o\",\n f\"timings/fenics{n}.txt\",\n \"python3\",\n \"poisson.py\",\n f\"{n}\",\n ],\n env=fenics_env,\n )\n\n with open(f\"timings/nim{n}.txt\") as f:\n line = f.readline()\n user_time = line.split(\"user\")[0]\n time_nim.append(float(user_time))\n\n with open(f\"timings/fenics{n}.txt\") as f:\n line = f.readline()\n user_time = line.split(\"user\")[0]\n time_fenics.append(float(user_time))\n\ntime_nim = np.array(time_nim)\ntime_fenics = np.array(time_fenics)\nplt.loglog(elements, time_nim, marker=\".\", label=\"Nimfem\")\nplt.loglog(elements, time_fenics, marker=\".\", label=\"FEniCS\")\nplt.legend()\nplt.xlabel(\"Number of Elements\")\nplt.ylabel(\"Runtime [s]\")\n\nplt.savefig(\"poisson_comparison.png\")\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.sqrt", "numpy.logspace", "matplotlib.pyplot.loglog", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.ylabel" ] ]
usnistgov/chebby
[ "75dbccfd9a029e91cbfdfd263befc51b893822ea" ]
[ "scripts/test_Basu.py" ]
[ "\"\"\"\nA driver script to test the implementation in cheb2d_Basu for correctness and speed (in Python at least)\n\"\"\"\nimport timeit\nfrom cheb2d_Basu import *\nimport matplotlib.pyplot as plt\nimport pandas\n\nx_ = 0.7\ny_ = 0.7\n\n# f = lambda x, y: np.sin(x)*np.cos(-0.01*y**2)\nf = lambda x, y: x**3*np.sin(-x**2)*np.cos(x)*np.exp(-x**2-y**2)\n\no = []\nfor M in [4,6,8,10,12,14,16]:\n print(M)\n a = get_coeff_mat(f, m=M, n=M)\n\n N = 100\n\n tic = timeit.default_timer()\n for i in range(N):\n eval_naive(amat=a, x=x_, y=y_)\n toc = timeit.default_timer()\n time_naive = (toc-tic)/N*1e6\n print(time_naive, 'us/call for naive implementation')\n\n tic = timeit.default_timer()\n for i in range(N):\n eval_Clenshaw(amat=a, x=x_, y=y_)\n toc = timeit.default_timer()\n time_Clenshaw = (toc-tic)/N*1e6\n print(time_Clenshaw, 'us/call for Clenshaw implementation in 2D')\n\n assert(abs(eval_Clenshaw(amat=a, x=x_, y=y_) - eval_naive(amat=a, x=x_, y=y_)) < 1e-14)\n\n print(\n eval_Clenshaw(amat=a, x=x_, y=y_), \n eval_naive(amat=a, x=x_, y=y_), \n f(x_, y_)\n )\n o.append({\n 'M': M,\n 'naive': time_naive,\n 'Clenshaw': time_Clenshaw\n })\n\ndf = pandas.DataFrame(o)\nplt.plot(df['M'], df['naive'], 'o-')\nplt.plot(df['M'], df['Clenshaw'], 'o-')\nplt.yscale('log')\nplt.gca().set(xlabel='$M$, of $M^2$ matrix', ylabel='time / s')\nplt.show()" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.yscale", "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.show" ] ]
Jitesh17/ortools_projects
[ "475ec8fd24bd5826d968b1165c8cafa0553ca290" ]
[ "test/yanagi1.py" ]
[ "from __future__ import annotations\n\nimport os\nimport sys\n\nimport numpy as np\nimport openpyxl\nimport pandas as pd\nimport printj\nfrom ortools.sat.python import cp_model\nfrom printj import ColorText as ct\n\n# from typing import Union\n\n\nclass TimeVar:\n def __init__(self, hours: int, minutes: int):\n while minutes > 60:\n minutes -= 60\n hours += 1\n self.hours = hours\n self.minutes = minutes\n self.time_str = f'{hours}:{minutes}'\n\n def __str__(self):\n return self.time_str\n\n def __add__(self, added_time: TimeVar):\n return TimeVar(self.hours + added_time.hours, self.minutes + added_time.minutes)\n\n @classmethod\n def by_string(cls, time: str):\n time_split_hour_min = time.split(\":\")\n hours = int(time_split_hour_min[0])\n minutes = int(time_split_hour_min[1])\n return cls(hours, minutes)\n# # function to get unique values\n# def unique(list1):\n\n# # insert the list to the set\n# list_set = set(list1)\n# # convert the set to the list\n# unique_list = (list(list_set))\n# # for x in unique_list:\n# # print x,\n# return unique_list\n\nclass Scheduler:\n def __init__(self, input_data_package, input_data_worker, input_data_location,\n time_shifts,\n num_vehicles: int = 4, ):\n self.input_data_package = input_data_package\n self.input_data_worker = input_data_worker\n self.input_data_location = input_data_location\n self.time_shifts = time_shifts\n self.num_vehicles = num_vehicles\n\n\n def solution_printer(self, shifts):\n\n alphabets = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"\"\"\n data = []\n for p in range(num_packages):\n # print('Package %i' % p)\n data_i = []\n for s in range(num_shifts):\n s_val = ct.white('0 ')\n for w in range(num_workers):\n is_working = False\n for v in range(num_vehicles):\n if solver.Value(shifts[(w, p, v, s)]) == 1:\n is_working = True\n # print(' Worker %i works shift %i' % (w, s))\n text_worker = ct.green(\n f'Worker {alphabets[w]}')\n # text_shift = ct.purple(f'shift {[\"9:00\", \"10:00\", \"11:00\", \"12:00\", ][s]}')\n text_shift = ct.purple(f'shift {time_shifts[s]}')\n # text_shift = ct.purple(f'shift {s}')\n text_package = ct.cyan(f'package-{p}')\n text_vehicle = ct.yellow(\n f'vehicle {v+1}')\n # text_keiro = ct.yellow(\n # f'keiro {[\"Main2\", \"Main1\", \"SUB\", ][v]}')\n # if p in [2, 4]:\n # print(\n # f' {text_worker} at {text_shift} moves {text_package} using {text_vehicle}')\n s_val = ct.green(f'{alphabets[w]}{v+1} ')\n data_i.append(s_val)\n data.append(data_i)\n # data = pd.DataFrame(data, columns=time_shifts)\n data = pd.DataFrame(data, columns=[ct.yellow(f' {s}') for s in time_shifts])\n \"\"\"\n data = []\n data_moved = []\n for p in range(self.num_packages):\n # print('Package %i' % p)\n num_packages_moved = 0\n data_i = []\n for s in range(self.num_shifts):\n s_val = '0 '\n for w in range(self.num_workers):\n is_working = False\n for v in range(self.num_vehicles):\n # print(\"self.solver.Value(shifts[(w, p, v, s)])\", self.solver.Value(shifts[(w, p, v, s)]))\n if self.solver.Value(shifts[(w, p, v, s)]) == 1:\n is_working = True\n # print(' Worker %i works shift %i' % (w, s))\n text_worker = f'Worker {alphabets[w]}'\n # text_shift = ct.purple(f'shift {[\"9:00\", \"10:00\", \"11:00\", \"12:00\", ][s]}')\n text_shift = f'shift {self.time_shifts[s]}'\n # text_shift = ct.purple(f'shift {s}')\n text_package = f'package-{p}'\n text_vehicle = f'vehicle {v+1}'\n # text_keiro = ct.yellow(\n # f'keiro {[\"Main2\", \"Main1\", \"SUB\", ][v]}')\n # if p in [2, 4]:\n # print(\n # f' {text_worker} at {text_shift} moves {text_package} using {text_vehicle}')\n s_val = f'{alphabets[w]}{v+1} '\n num_packages_moved += 1\n data_i.append(s_val)\n data.append(data_i)\n data_moved.append([\n num_packages_moved, \n self.input_data_package.quantity[p] - num_packages_moved, \n self.input_data_package.yesterday[p] + num_packages_moved - self.input_data_package.decay[p]*self.num_shifts])\n # data = pd.DataFrame(data, columns=time_shifts)\n data = pd.DataFrame(data, columns=[f' {s}' for s in self.time_shifts])\n\n data_moved = pd.DataFrame(data_moved, columns=['moved', 'not_moved', 'q_at_destination'])\n print(data_moved)\n \n data = pd.concat([\n data,\n data_moved], axis=1)\n data.index = [f'Package-{p}' for p in range(self.num_packages)]\n print()\n print(data)\n return data\n\n\n def solution_writer(self):\n output_path = 'test/xl.xlsx'\n print()\n self.output_data = self.output_data.reset_index(drop=True)\n self.input_data_package_orig = pd.concat([\n self.input_data_package_orig[['package', 'quantity', 'decay',\n 'location', 'vehicle', 'next', 'yesterday']],\n self.output_data], axis=1)\n print(self.input_data_package_orig)\n # Create a Pandas Excel writer using XlsxWriter as the engine.\n writer = pd.ExcelWriter(output_path, engine='xlsxwriter')\n # Write each dataframe to a different worksheet.\n self.input_data_package_orig.to_excel(\n writer, sheet_name='Sheet_package', index=False)\n self.input_data_worker_orig.to_excel(writer, sheet_name='Sheet_worker', index=False)\n self.input_data_location_orig.to_excel(\n writer, sheet_name='Sheet_location', index=False)\n # output_data.to_excel(writer, sheet_name='Sheet_schedule')\n writer.save()\n\n\n def run(self):\n # Data.\n # package_to_table = [\n # [1, 0, 0, 0, 0, 0],\n # [1, 1, 0, 0, 0, 0],\n # [0, 0, 1, 1, 0, 0],\n # [0, 0, 0, 0, 1, 0],\n # [0, 0, 0, 1, 0, 1],\n # [1, 1, 1, 1, 1, 1],\n # ]\n # workers_to_table = [\n # [1, 1, 1, 1, 0, 1],\n # [1, 1, 1, 1, 1, 0],\n # [1, 1, 1, 1, 0, 1],\n # [1, 1, 1, 1, 1, 0],\n # ]\n self.input_data_package.dropna(subset = [\"package\"], inplace=True)\n self.input_data_package_orig, self.input_data_worker_orig, self.input_data_location_orig = self.input_data_package.copy(\n ), self.input_data_worker.copy(), self.input_data_location.copy()\n printj.yellow('::::::::::::::::::: preprocess :::::::::::::::::::')\n\n print(self.input_data_package)\n self.input_data_package.vehicle = [\n [int(i) for i in v.split(\",\")] for v in self.input_data_package.vehicle]\n self.input_data_package.next = [v if isinstance(\n v, int) else None for v in self.input_data_package.next]\n self.input_data_worker.location = [\n [int(i) for i in v.split(\",\")] for v in self.input_data_worker.location]\n self.input_data_worker.vehicle = [\n [int(i) for i in v.split(\",\")] for v in self.input_data_worker.vehicle]\n self.num_locations = len(self.input_data_location.location)\n # package_to_location = pd.crosstab(\n # index=self.input_data_package['package'], columns=self.input_data_package['location']).to_numpy()\n package_to_location = pd.DataFrame({p: [1 if l in [location_list] else 0 for l in range(self.num_locations)]\n for p, location_list in enumerate(self.input_data_package.location)}).T.to_numpy() # num_location \n package_to_vehicle = pd.DataFrame({p: [1 if (v+1) in vehicles_list else 0 for v in range(self.num_vehicles)]\n for p, vehicles_list in enumerate(self.input_data_package.vehicle)}).T.to_numpy() # num_vehicle = 4\n worker_to_vehicle = pd.DataFrame({p: [1 if (v+1) in vehicles_list else 0 for v in range(self.num_vehicles)]\n for p, vehicles_list in enumerate(self.input_data_worker.vehicle)}).T.to_numpy() # num_vehicle = 4\n location_to_worker = pd.DataFrame({p: [1 if v in worker_list else 0 for v in range(\n self.num_locations)] for p, worker_list in enumerate(self.input_data_worker.location)}).to_numpy() # num_keiro = 6\n package_orders = [[i, int(next_i)] for (i, next_i) in zip(\n self.input_data_package.package, self.input_data_package.next) if pd.notna(next_i)]\n print(\"package_to_vehicle\\n\", package_to_vehicle)\n print(\"worker_to_vehicle\\n\", worker_to_vehicle)\n print(\"package_to_location\\n\", package_to_location)\n print(\"location_to_worker\\n\", location_to_worker)\n print(\"package_orders\\n\", package_orders)\n print()\n print()\n # print(package_to_location.to_numpy())\n # sys.exit()\n\n # package_orders = [[0, 1], [1, 2], ]\n # main2, main1, sub\n # package_to_vehicle = np.array([\n # [1, 1, 1, 1],\n # [1, 0, 0, 0],\n # [1, 0, 0, 0],\n # [0, 1, 1, 0],\n # [0, 0, 1, 1],\n # [0, 0, 1, 1],\n # ])\n # package_to_location = np.array([\n # [1, 0, 0],\n # [1, 0, 0],\n # [1, 0, 0],\n # [0, 1, 0],\n # [1, 0, 0],\n # [0, 0, 1],\n # ])\n # workers_to_keiro = np.array([\n # [1, 0, 1],\n # [1, 1, 0],\n # [1, 0, 1],\n # [1, 1, 0],\n # ])\n self.num_workers = len(self.input_data_worker.worker) # 4\n self.num_packages = len(self.input_data_package.package) # 5\n self.num_shifts = len(self.time_shifts)\n # num_tables = 6\n all_workers = range(self.num_workers)\n all_packages = range(self.num_packages)\n all_shifts = range(self.num_shifts)\n all_vehicles = range(self.num_vehicles)\n all_locations = range(self.num_locations)\n\n # print(all_vehicles)\n print(\n f'\\nNo. of package {self.num_packages}, No. of workers {self.num_workers}')\n alphabets = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"\"\"\n available_workers_per_package = []\n for i, item in enumerate(package_to_vehicle):\n available_workers_list = []\n for j, table in enumerate(item):\n if table == 1:\n available_workers_list += [k for k in range(len(workers_to_keiro)) if workers_to_keiro[k][j]==1]\n available_workers_list = unique(available_workers_list)\n print(f'Package-{i} can be moved by workers {\"\".join(alphabets[l] for l in available_workers_list)}')\n available_workers_per_package.append(available_workers_list)\n\n print(available_workers_per_package)\n print(np.array(available_workers_per_package))\n \"\"\"\n # package_to_worker = np.matmul(package_to_location, workers_to_keiro.T)\n # print(package_to_location.shape, location_to_worker.shape)\n package_to_worker = np.matmul(package_to_location, location_to_worker)\n available_workers_per_package = [\n [i for i, ll in enumerate(l) if ll == 1] for l in package_to_worker]\n available_vehicles_per_package = [\n [i for i, ll in enumerate(l) if ll == 1] for l in package_to_vehicle]\n available_packages_per_location = [\n [i for i, ll in enumerate(l) if ll == 1] for l in package_to_location.T]\n available_vehicles_per_worker = [\n [i for i, ll in enumerate(l) if ll == 1] for l in worker_to_vehicle]\n\n # print()\n # for p, item in enumerate(available_workers_per_package):\n # text_worker = ct.green(\n # f'workers {\"\".join(alphabets[l] for l in item)}')\n # text_package = ct.cyan(f'Package-{p}')\n # print(f'{text_package} can be moved by {text_worker}')\n print()\n for w, item in enumerate(available_vehicles_per_worker):\n text_vehicle = ct.green(\n f'vehicle {\", \".join(f\"{l+1}\" for l in item)}')\n text_worker = ct.cyan(f'worker {alphabets[w]}')\n print(f'{text_worker} can use {text_vehicle}')\n print()\n # for p, item in enumerate(available_vehicles_per_package):\n # text_vehicle = ct.yellow(\n # f'vehicle {\" \".join([\"Main2\", \"Main1\", \"SUB\", ][l] for l in item)}')\n # text_package = ct.cyan(f'Package-{p}')\n # print(f'{text_package} can be moved to {text_vehicle}')\n # print()\n for p, (workers, vehicles) in enumerate(zip(available_workers_per_package, available_vehicles_per_package)):\n text_worker = ct.green(\n f'workers {\", \".join(alphabets[l] for l in workers)}')\n text_vehicle = ct.yellow(\n f'vehicle {\", \".join(str(v) for v in vehicles)}')\n text_package = ct.cyan(f'Package-{p}')\n print(f'{text_package} can be moved by \\t{text_worker}\\tusing {text_vehicle}')\n print()\n for l, item in enumerate(available_packages_per_location):\n text_package = ct.cyan(f'package {\", \".join(f\"{i}\" for i in item)}')\n text_location = ct.green(\n f'location {l}')\n print(f'{text_location} carries {text_package}')\n print()\n\n # vehicle_to_worker = np.matmul(package_to_vehicle.T, package_to_worker)\n # sys.exit()\n # Creates the model.\n model = cp_model.CpModel()\n\n # Creates shift variables.\n # shifts[(w, p, v, s)]: nurse 'n' works shift 's' on package 'd'.\n shifts = {}\n for w in all_workers:\n for p in all_packages:\n for v in all_vehicles:\n for s in all_shifts:\n shifts[(w, p, v, s)] = model.NewBoolVar(\n 'shift_w%ip%iv%is%i' % (w, p, v, s))\n package_quantity = 1\n for pi, p in enumerate(all_packages):\n package_quantity = self.input_data_package.quantity[pi]\n\n # package_quantity = min(package_quantity, )\n # 1 worker needed per package\n\n model.Add(sum(sum(sum(shifts[(w, p, v, s)] for v in all_vehicles)\n for s in all_shifts) for w in all_workers) <= package_quantity)\n # 1 available worker per package\n model.Add(sum(sum(sum(shifts[(w, p, v, s)] for v in all_vehicles)\n for s in all_shifts) for w in available_workers_per_package[p]) <= package_quantity)\n # 1 available vehicle per package\n model.Add(sum(sum(sum(shifts[(w, p, v, s)] for w in all_workers)\n for s in all_shifts) for v in available_vehicles_per_package[p]) <= package_quantity)\n\n for s in all_shifts:\n model.Add(sum(sum(shifts[(w, p, v, s)]\n for v in all_vehicles) for w in all_workers) <= 1)\n\n # Capacity constraints\n # location_filled = dict.fromkeys(self.input_data_location.location, 0)\n for l in all_locations:\n # total_quantity = sum(self.input_data_package.quantity[p] for p in available_packages_per_location[l])\n # print(total_quantity)\n # location_filled[l] += sum(sum(sum(sum(shifts[(w, p, v, s)]for v in all_vehicles) for s in all_shifts) for w in all_workers) for p in available_packages_per_location[l])\n capacity = self.input_data_location.capacity[l]\n # decay = self.input_data_location.decay[l]\n # current_empty_space = capacity # 10 = 3 nimotsu + 7 empty_space = 2 nimotsu + 8 empty_space = 10 empty_space\n # empty_space = max(current_empty_space + decay*1, capacity) # using max: empty space can't be more than the capacity of the shelf/location\n # empty_space = min(total_quantity, empty_space) # using min:\n # model.Add(location_filled[l]==empty_space)\n for si in all_shifts:\n for p in available_packages_per_location[l]:\n constant = 1 # Use/ change when decay is a fraction like 0.5\n decay = self.input_data_package.decay[p]*constant\n # sum_package = sum(sum(sum(sum(shifts[(w, p, v, s)]for v in all_vehicles) for w in all_workers) for s in range(si+1)))\n sum_package = sum(sum(sum(shifts[(\n w, p, v, s)]for v in all_vehicles) for w in all_workers) for s in range(si+1))\n sum_package += self.input_data_package.yesterday[p]\n model.Add(sum_package*constant-decay*(si+1) <= capacity*constant)\n model.Add(sum_package-decay*(si+1)*constant >= 0)\n # print(capacity, sum_package, decay*(si+1))\n # print()\n\n # 1 W, V, S for 1 package\n for s in all_shifts:\n for w in all_workers:\n for v in all_vehicles:\n model.Add(sum(shifts[(w, p, v, s)] for p in all_packages) <= 1)\n\n printj.red(f'all_vehicles: {list(all_vehicles)}')\n printj.red(\n f'available_vehicles_per_worker: {available_vehicles_per_worker}')\n for w in all_workers:\n for v in all_vehicles:\n # 1 available vehicle per worker\n if v in available_vehicles_per_worker[w]:\n model.Add(sum(sum(shifts[(w, p, v, s)] for p in all_packages)\n for s in all_shifts) >= 0)\n else:\n model.Add(sum(sum(shifts[(w, p, v, s)] for p in all_packages)\n for s in all_shifts) == 0)\n\n # package_order # s(p=2) < s(p=4)\n for package_order in package_orders:\n shift_before = 0\n for s in all_shifts:\n for w in all_workers:\n for v in all_vehicles:\n # s = {0, 1, 2, 3}\n shift_before += shifts[(w, package_order[0], v, s)]\n shift_after = 0\n # for s2 in range(s, num_shifts):\n for s2 in range(s+2):\n if s2 < self.num_shifts:\n for w2 in all_workers:\n for v2 in all_vehicles:\n # (4 - {0, 1, 2, 3})\n shift_after += shifts[(w2,\n package_order[1], v2, s2)]\n # model.Add(shift_before <= shift_after)\n model.Add(shift_before == shift_after).OnlyEnforceIf(\n shifts[(w, package_order[0], v, s)])\n model.Add(shift_before == shift_after).OnlyEnforceIf(\n shifts[(w, package_order[1], v, s)])\n\n # # pylint: disable=g-complex-comprehension\n \n objective = sum(sum(sum(sum(sum(shifts[(w, p, v, s)] for v in all_vehicles) for w in all_workers) for s in range(si+1)) for p in all_packages) for s in all_shifts)\n model.Maximize(objective)\n\n printj.yellow('::::::::::::::::::::: Output :::::::::::::::::::::')\n # Creates the solver and solve.\n self.solver = cp_model.CpSolver()\n self.status = self.solver.Solve(model)\n if self.status == cp_model.OPTIMAL:\n self.output_data = self.solution_printer(shifts)\n self.solution_writer()\n else:\n print(\"No solutions\")\n # Statistics.\n print()\n print('Statistics')\n # print(' - Number of shift requests met = %i' % solver.ObjectiveValue(),\n # '(out of', num_nurses * min_shifts_per_nurse, ')')\n print(' - wall time : %f s' % self.solver.WallTime())\n # assert solution_printer.solution_count() == 5\n\n\n def bool2int(x):\n y = 0\n for i, j in enumerate(x):\n y += j << i\n return y\n\n\ndef main():\n\n printj.yellow('::::::::::::::::::::: Input :::::::::::::::::::::')\n path = \"test/xl.xlsx\"\n input_data_package = pd.read_excel(open(path, 'rb'),\n sheet_name='Sheet_package')\n input_data_worker = pd.read_excel(open(path, 'rb'),\n sheet_name='Sheet_worker')\n input_data_location = pd.read_excel(open(path, 'rb'),\n sheet_name='Sheet_location')\n num_vehicles = 4\n num_shifts = 20\n time_shifts = [TimeVar(6, 30) + TimeVar(0, 20*i)\n for i in range(num_shifts)]\n \"\"\"\n input_data_package = pd.DataFrame({\n \"package\": [0, 1],\n \"quantity\": [2, 2], \n \"location\": [0, 0],\n \"vehicle\": [[1, 2, 3, 4], [1]],\n # \"next\": [None, 2, 3, 4, 5, None],\n \"next\": [None, None], # Only work if the quantity is same,\n \"yesterday\": [1, 2], \n })\n input_data_worker = pd.DataFrame({\n \"worker\": list(\"ABCD\"),\n \"location\": [[0, 2], [0, 1], [0, 2], [0, 1]],\n \"vehicle\": [[1 ], [1, 2, 3, 4], [1], [1, 2, 3, 4]],\n })\n input_data_location = pd.DataFrame({\n \"location\": list(range(3)),\n # \"decay\": [1, 1, 1], # per shift\n \"capacity\": [4, 3, 3],\n }) # 4 - num_pack_loc0_shift + f(decay*(0, shift))\n \"\"\"\n \"\"\"\n input_data_package = pd.DataFrame({\n \"package\": [0, 1, 2, 3, 4, 5],\n \"quantity\": [20000, 20000, 20000, 20000, 20000, 200000], \n \"decay\": [1, 1, 1, 1, 1, 1],\n \"location\": [0, 0, 0, 1, 0, 2],\n \"vehicle\": [[1, 2, 3, 4], [1], [1], [2, 3], [3, 4], [1]],\n # \"next\": [None, 2, 3, 4, 5, None],\n \"next\": [None, None, None, None, None, None], # Only work if the quantity is same,\n \"yesterday\": [0, 0, 0, 0, 0, 0], \n })\n input_data_worker = pd.DataFrame({\n \"worker\": list(\"ABCD\"),\n \"location\": [[0, 2], [0, 1], [0, 2], [0, 1]],\n \"vehicle\": [[1 ], [1, 2, 3, 4], [1], [1, 2, 3, 4]],\n })\n input_data_location = pd.DataFrame({\n \"location\": list(range(3)),\n # \"decay\": [1, 1, 1], # per shift\n \"capacity\": [10, 10, 10],\n }) # 4 - num_pack_loc0_shift + f(decay*(0, shift))\n \"\"\"\n \"\"\"\n input_data_package = pd.DataFrame({\n \"package\": [0],\n \"quantity\": [6], \n \"location\": [0],\n \"vehicle\": [[1, 2, 3, 4]],\n # \"next\": [None, 2, 3, 4, 5, None],\n \"next\": [None], # Only work if the quantity is same\n \"yesterday\": [1], \n })\n input_data_worker = pd.DataFrame({\n \"worker\": list(\"ABCD\"),\n \"location\": [[0], [0], [0], [0]],\n \"vehicle\": [[1 ], [1, 2, 3, 4], [1], [1, 2, 3, 4]],\n })\n input_data_location = pd.DataFrame({\n \"location\": list(range(1)),\n # \"decay_rate\": [1, 1, 1], # per shift\n \"capacity\": [1],\n }) # 4 - num_pack_loc0_shift + f(decay*(0, shift))\n \"\"\"\n # \"\"\"\n print(input_data_package)\n print(input_data_worker)\n print(input_data_location)\n print()\n Scheduler(input_data_package, input_data_worker, input_data_location,\n time_shifts,\n num_vehicles).run()\n \n # \"\"\"\n \"\"\" \n wb = openpyxl.Workbook()\n sheet = wb.active\n\n sheet_title = sheet.title\n wb.save(path)\n\n print(\"active sheet title: \" + sheet_title)\n \"\"\"\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.notna", "pandas.concat", "numpy.matmul", "pandas.DataFrame", "pandas.ExcelWriter" ] ]
smithis7/tardis
[ "02a816b66ba0e9cd778ce598a7d0afa4a25370c0" ]
[ "tardis/montecarlo/montecarlo_numba/formal_integral.py" ]
[ "import sys\nimport warnings\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse as sp\nfrom scipy.interpolate import interp1d\nfrom astropy import units as u\nfrom tardis import constants as const\nfrom numba import njit, char, float64, int64, typeof, byte, prange\nfrom numba.experimental import jitclass\nimport pdb\n\nfrom tardis.montecarlo.montecarlo_numba.numba_config import SIGMA_THOMSON\nfrom tardis.montecarlo.montecarlo_numba import njit_dict, njit_dict_no_parallel\nfrom tardis.montecarlo.montecarlo_numba.numba_interface import \\\n (numba_plasma_initialize, NumbaModel, NumbaPlasma)\n\nfrom tardis.montecarlo.spectrum import TARDISSpectrum\n\nC_INV = 3.33564e-11\nM_PI = np.arccos(-1)\nKB_CGS = 1.3806488e-16\nH_CGS = 6.62606957e-27\n\nclass IntegrationError(Exception):\n pass\n\n\n@njit(**njit_dict)\ndef numba_formal_integral(model, plasma, iT, inu, inu_size, att_S_ul, Jred_lu, Jblue_lu, tau_sobolev, electron_density, N):\n '''\n model, plasma, and estimator are the numba variants\n '''\n # todo: add all the original todos\n # Initialize the output which is shared among threads\n L = np.zeros(inu_size, dtype=np.float64)\n # global read-only values\n size_line, size_shell = tau_sobolev.shape\n size_tau = size_line * size_shell\n R_ph = model.r_inner[0] # make sure these are cgs\n R_max = model.r_outer[size_shell - 1]\n pp = np.zeros(N, dtype=np.float64) # check\n exp_tau = np.zeros(size_tau, dtype=np.float64)\n exp_tau = np.exp(-tau_sobolev.T.ravel()) # maybe make this 2D?\n pp[::] = calculate_p_values(R_max, N)\n line_list_nu = plasma.line_list_nu\n # done with instantiation\n # now loop over wavelength in spectrum\n for nu_idx in prange(inu_size):\n I_nu = np.zeros(N, dtype=np.float64)\n z = np.zeros(2 * size_shell, dtype=np.float64)\n shell_id = np.zeros(2 * size_shell, dtype=np.int64)\n offset = 0\n size_z = 0\n idx_nu_start = 0\n direction = 0\n first = 0\n i = 0\n p = 0.0\n nu_start = 0.0\n nu_end = 0.0\n nu = 0.0\n zstart = 0.0\n zend = 0.0\n escat_contrib = 0.0\n escat_op = 0.0\n Jkkp = 0.0\n pexp_tau = 0\n patt_S_ul = 0\n pJred_lu = 0\n pJblue_lu = 0\n pline = 0\n\n\n nu = inu[nu_idx]\n # now loop over discrete values along line\n for p_idx in range(1, N):\n escat_contrib = 0\n p = pp[p_idx]\n\n # initialize z intersections for p values\n size_z = populate_z(model, p, z, shell_id) # check returns\n # initialize I_nu\n if p <= R_ph:\n I_nu[p_idx] = intensity_black_body(nu * z[0], iT)\n else:\n I_nu[p_idx] = 0\n\n # find first contributing lines\n nu_start = nu * z[0]\n nu_end = nu * z[1]\n idx_nu_start = line_search(plasma.line_list_nu,\n nu_start, size_line)\n offset = shell_id[0] * size_line\n # start tracking accumulated e-scattering optical depth\n zstart = model.time_explosion / C_INV * (1. - z[0])\n # Initialize \"pointers\"\n pline = int(idx_nu_start)\n pexp_tau = int(offset + idx_nu_start)\n patt_S_ul = int(offset + idx_nu_start)\n pJred_lu = int(offset + idx_nu_start)\n pJblue_lu = int(offset + idx_nu_start)\n\n # flag for first contribution to integration on current p-ray\n first = 1\n nu_ends = nu * z[1:]\n nu_ends_idxs = size_line - np.searchsorted(\n line_list_nu[::-1], \n nu_ends, \n side='right'\n )\n # loop over all interactions \n for i in range(size_z - 1):\n escat_op = electron_density[int(shell_id[i])] * SIGMA_THOMSON\n nu_end = nu_ends[i]\n nu_end_idx = nu_ends_idxs[i]\n for _ in range(max(nu_end_idx-pline,0)):\n\n # calculate e-scattering optical depth to next resonance point\n zend = model.time_explosion / C_INV * (1. - line_list_nu[pline] / nu) # check\n\n if first == 1:\n # first contribution to integration\n # NOTE: this treatment of I_nu_b (given\n # by boundary conditions) is not in Lucy 1999;\n # should be re-examined carefully\n escat_contrib += (zend - zstart) * escat_op * (\n Jblue_lu[pJblue_lu] - I_nu[p_idx]);\n first = 0;\n else:\n # Account for e-scattering, c.f. Eqs 27, 28 in Lucy 1999\n Jkkp = 0.5 * (Jred_lu[pJred_lu] + Jblue_lu[pJblue_lu]);\n escat_contrib += (zend - zstart) * escat_op * (\n Jkkp - I_nu[p_idx])\n # this introduces the necessary ffset of one element between\n # pJblue_lu and pJred_lu\n pJred_lu += 1\n I_nu[p_idx] += escat_contrib\n # // Lucy 1999, Eq 26\n I_nu[p_idx] *= (exp_tau[pexp_tau])\n I_nu[p_idx] += att_S_ul[patt_S_ul] \n\n # // reset e-scattering opacity\n escat_contrib = 0\n zstart = zend\n\n pline += 1\n pexp_tau += 1\n patt_S_ul += 1\n pJblue_lu += 1\n\n # calculate e-scattering optical depth to grid cell boundary\n\n Jkkp = 0.5 * (Jred_lu[pJred_lu] + Jblue_lu[pJblue_lu])\n zend = model.time_explosion / C_INV * (1. - nu_end / nu) # check\n escat_contrib += (zend - zstart) * escat_op * (\n Jkkp - I_nu[p_idx])\n zstart = zend\n\n # advance pointers\n direction = int((shell_id[i+1] - shell_id[i]) * size_line)\n pexp_tau += direction\n patt_S_ul += direction\n pJred_lu += direction\n pJblue_lu += direction\n I_nu[p_idx] *= p\n L[nu_idx] = 8 * M_PI * M_PI * trapezoid_integration(I_nu, R_max / N)\n\n return L\n\n\n\nintegrator_spec = [\n ('model', NumbaModel.class_type.instance_type),\n ('plasma', NumbaPlasma.class_type.instance_type),\n ('points', int64)\n]\n@jitclass(integrator_spec)\nclass NumbaFormalIntegrator(object):\n '''\n Helper class for performing the formal integral\n with numba.\n '''\n def __init__(self, model, plasma, points=1000):\n\n self.model = model\n self.plasma = plasma\n self.points = points\n\n def formal_integral(self, iT, inu, inu_size, att_S_ul, Jred_lu, Jblue_lu, tau_sobolev, electron_density, N):\n '''simple wrapper for the numba implementation of the formal integral'''\n return numba_formal_integral(self.model, self.plasma, iT, inu, inu_size, att_S_ul, Jred_lu, Jblue_lu, tau_sobolev, electron_density, N)\n\n\nclass FormalIntegrator(object):\n '''\n Class containing the formal integrator\n '''\n\n def __init__(self, model, plasma, runner, points=1000):\n\n self.model = model\n self.runner = runner \n self.points = points\n if plasma:\n self.plasma = numba_plasma_initialize(\n plasma, runner.line_interaction_type\n )\n self.atomic_data = plasma.atomic_data\n self.original_plasma = plasma\n\n def generate_numba_objects(self):\n '''instantiate the numba interface objects\n needed for computing the formal integral'''\n self.numba_model = NumbaModel(\n self.runner.r_inner_i,\n self.runner.r_outer_i,\n self.model.time_explosion.to(\"s\").value,\n )\n self.numba_plasma = numba_plasma_initialize(\n self.original_plasma, \n self.runner.line_interaction_type\n )\n\n self.numba_integrator = NumbaFormalIntegrator(\n self.numba_model, \n self.numba_plasma, \n self.points\n )\n\n\n def check(self, raises=True):\n \"\"\"\n A method that determines if the formal integral can be performed with\n the current configuration settings\n\n The function returns False if the configuration conflicts with the\n required settings. If raises evaluates to True, then a\n IntegrationError is raised instead \n \"\"\"\n\n def raise_or_return(message):\n if raises:\n raise IntegrationError(message)\n else:\n warnings.warn(message)\n return False\n\n for obj in (self.model, self.plasma, self.runner):\n if obj is None:\n return raise_or_return(\n \"The integrator is missing either model, plasma or \"\n \"runner. Please make sure these are provided to the \"\n \"FormalIntegrator.\"\n )\n\n if not self.runner.line_interaction_type in [\"downbranch\", \"macroatom\"]:\n return raise_or_return(\n \"The FormalIntegrator currently only works for \"\n 'line_interaction_type == \"downbranch\"'\n 'and line_interaction_type == \"macroatom\"'\n )\n\n return True\n\n def calculate_spectrum(\n self, frequency, points=None, interpolate_shells=0, raises=True\n ):\n # Very crude implementation\n # The c extension needs bin centers (or something similar)\n # while TARDISSpectrum needs bin edges\n self.check(raises)\n N = points or self.points\n if interpolate_shells == 0: # Default Value\n interpolate_shells = max(2 * self.model.no_of_shells, 80)\n warnings.warn(\n \"The number of interpolate_shells was not \"\n f\"specified. The value was set to {interpolate_shells}.\"\n )\n self.interpolate_shells = interpolate_shells\n frequency = frequency.to(\"Hz\", u.spectral())\n\n luminosity = u.Quantity(self.formal_integral(frequency, N), \"erg\") * (\n frequency[1] - frequency[0]\n )\n\n # Ugly hack to convert to 'bin edges'\n frequency = u.Quantity(\n np.concatenate(\n [\n frequency.value,\n [frequency.value[-1] + np.diff(frequency.value)[-1]],\n ]\n ),\n frequency.unit,\n )\n\n return TARDISSpectrum(frequency, luminosity)\n\n def make_source_function(self):\n \"\"\"\n Calculates the source function using the line absorption rate estimator `Edotlu_estimator`\n\n Formally it calculates the expression ( 1 - exp(-tau_ul) ) S_ul but this product is what we need later,\n so there is no need to factor out the source function explicitly.\n\n Parameters\n ----------\n model : tardis.model.Radial1DModel\n\n Returns\n -------\n Numpy array containing ( 1 - exp(-tau_ul) ) S_ul ordered by wavelength of the transition u -> l\n \"\"\"\n\n model = self.model\n runner = self.runner\n\n macro_ref = self.atomic_data.macro_atom_references\n macro_data = self.atomic_data.macro_atom_data\n\n no_lvls = len(self.atomic_data.levels)\n no_shells = len(model.w)\n\n if runner.line_interaction_type == \"macroatom\":\n internal_jump_mask = (macro_data.transition_type >= 0).values\n ma_int_data = macro_data[internal_jump_mask]\n internal = self.original_plasma.transition_probabilities[\n internal_jump_mask\n ]\n\n source_level_idx = ma_int_data.source_level_idx.values\n destination_level_idx = ma_int_data.destination_level_idx.values\n\n Edotlu_norm_factor = 1 / (runner.time_of_simulation * model.volume)\n exptau = 1 - np.exp(-self.original_plasma.tau_sobolevs)\n Edotlu = Edotlu_norm_factor * exptau * runner.Edotlu_estimator\n\n # The following may be achieved by calling the appropriate plasma\n # functions\n Jbluelu_norm_factor = (\n (\n const.c.cgs\n * model.time_explosion\n / (4 * np.pi * runner.time_of_simulation * model.volume)\n )\n .to(\"1/(cm^2 s)\")\n .value\n )\n # Jbluelu should already by in the correct order, i.e. by wavelength of\n # the transition l->u\n Jbluelu = runner.j_blue_estimator * Jbluelu_norm_factor\n\n upper_level_index = self.atomic_data.lines.index.droplevel(\n \"level_number_lower\"\n )\n e_dot_lu = pd.DataFrame(Edotlu, index=upper_level_index)\n e_dot_u = e_dot_lu.groupby(level=[0, 1, 2]).sum()\n e_dot_u_src_idx = macro_ref.loc[e_dot_u.index].references_idx.values\n\n if runner.line_interaction_type == \"macroatom\":\n C_frame = pd.DataFrame(\n columns=np.arange(no_shells), index=macro_ref.index\n )\n q_indices = (source_level_idx, destination_level_idx)\n for shell in range(no_shells):\n Q = sp.coo_matrix(\n (internal[shell], q_indices), shape=(no_lvls, no_lvls)\n )\n inv_N = sp.identity(no_lvls) - Q\n e_dot_u_vec = np.zeros(no_lvls)\n e_dot_u_vec[e_dot_u_src_idx] = e_dot_u[shell].values\n C_frame[shell] = sp.linalg.spsolve(inv_N.T, e_dot_u_vec)\n\n e_dot_u.index.names = [\n \"atomic_number\",\n \"ion_number\",\n \"source_level_number\",\n ] # To make the q_ul e_dot_u product work, could be cleaner\n transitions = self.original_plasma.atomic_data.macro_atom_data[\n self.original_plasma.atomic_data.macro_atom_data.transition_type\n == -1\n ].copy()\n transitions_index = transitions.set_index(\n [\"atomic_number\", \"ion_number\", \"source_level_number\"]\n ).index.copy()\n tmp = self.original_plasma.transition_probabilities[\n (self.atomic_data.macro_atom_data.transition_type == -1).values\n ]\n q_ul = tmp.set_index(transitions_index)\n t = model.time_explosion.value\n lines = self.atomic_data.lines.set_index(\"line_id\")\n wave = lines.wavelength_cm.loc[\n transitions.transition_line_id\n ].values.reshape(-1, 1)\n if runner.line_interaction_type == \"macroatom\":\n e_dot_u = C_frame.loc[e_dot_u.index]\n att_S_ul = wave * (q_ul * e_dot_u) * t / (4 * np.pi)\n\n result = pd.DataFrame(\n att_S_ul.values, index=transitions.transition_line_id.values\n )\n att_S_ul = result.loc[lines.index.values].values\n\n # Jredlu should already by in the correct order, i.e. by wavelength of\n # the transition l->u (similar to Jbluelu)\n Jredlu = Jbluelu * np.exp(-self.original_plasma.tau_sobolevs) + att_S_ul\n if self.interpolate_shells > 0:\n (\n att_S_ul,\n Jredlu,\n Jbluelu,\n e_dot_u,\n ) = self.interpolate_integrator_quantities(\n att_S_ul, Jredlu, Jbluelu, e_dot_u\n )\n else:\n runner.r_inner_i = runner.r_inner_cgs\n runner.r_outer_i = runner.r_outer_cgs\n runner.tau_sobolevs_integ = self.original_plasma.tau_sobolevs.values\n runner.electron_densities_integ = self.original_plasma.electron_densities.values\n\n return att_S_ul, Jredlu, Jbluelu, e_dot_u\n\n def interpolate_integrator_quantities(\n self, att_S_ul, Jredlu, Jbluelu, e_dot_u\n ):\n runner = self.runner\n plasma = self.original_plasma\n nshells = self.interpolate_shells\n r_middle = (runner.r_inner_cgs + runner.r_outer_cgs) / 2.0\n\n r_integ = np.linspace(\n runner.r_inner_cgs[0], runner.r_outer_cgs[-1], nshells\n )\n runner.r_inner_i = r_integ[:-1]\n runner.r_outer_i = r_integ[1:]\n\n r_middle_integ = (r_integ[:-1] + r_integ[1:]) / 2.0\n\n runner.electron_densities_integ = interp1d(\n r_middle,\n plasma.electron_densities,\n fill_value=\"extrapolate\",\n kind=\"nearest\",\n )(r_middle_integ)\n # Assume tau_sobolevs to be constant within a shell\n # (as in the MC simulation)\n runner.tau_sobolevs_integ = interp1d(\n r_middle,\n plasma.tau_sobolevs,\n fill_value=\"extrapolate\",\n kind=\"nearest\",\n )(r_middle_integ)\n att_S_ul = interp1d(r_middle, att_S_ul, fill_value=\"extrapolate\")(\n r_middle_integ\n )\n Jredlu = pd.DataFrame(interp1d(r_middle, Jredlu, fill_value=\"extrapolate\")(\n r_middle_integ\n ))\n Jbluelu = interp1d(r_middle, Jbluelu, fill_value=\"extrapolate\")(\n r_middle_integ\n )\n e_dot_u = interp1d(r_middle, e_dot_u, fill_value=\"extrapolate\")(\n r_middle_integ\n )\n\n # Set negative values from the extrapolation to zero\n att_S_ul = att_S_ul.clip(0.0)\n Jbluelu = Jbluelu.clip(0.0)\n Jredlu = Jredlu.clip(0.0)\n e_dot_u = e_dot_u.clip(0.0)\n return att_S_ul, Jredlu, Jbluelu, e_dot_u\n\n def formal_integral(self, nu, N):\n '''Do the formal integral with the numba\n routines'''\n # TODO: get rid of storage later on\n\n res = self.make_source_function()\n\n att_S_ul = res[0].flatten(order='F')\n Jred_lu = res[1].values.flatten(order='F')\n Jblue_lu = res[2].flatten(order='F')\n\n self.generate_numba_objects()\n L = self.numba_integrator.formal_integral(\n self.model.t_inner,\n nu,\n nu.shape[0],\n att_S_ul,\n Jred_lu,\n Jblue_lu,\n self.runner.tau_sobolevs_integ,\n self.runner.electron_densities_integ,\n N\n )\n return np.array(L, np.float64)\n\n@njit(**njit_dict_no_parallel)\ndef populate_z(model, p, oz, oshell_id):\n \"\"\"Calculate p line intersections\n\n This function calculates the intersection points of the p-line with\n each shell\n\n Inputs:\n :p: (double) distance of the integration line to the center\n :oz: (array of doubles) will be set with z values. the array is truncated\n by the value `1`.\n :oshell_id: (int64) will be set with the corresponding shell_ids\n \"\"\"\n # abbreviations\n r = model.r_outer\n N = len(model.r_inner) # check\n #print(N)\n inv_t = 1/model.time_explosion\n z = 0\n offset = N\n\n if p <= model.r_inner[0]:\n # intersect the photosphere\n for i in range(N):\n oz[i] = 1 - calculate_z(r[i], p, inv_t)\n oshell_id[i] = i\n return N\n else:\n # no intersection with photosphere\n # that means we intersect each shell twice\n for i in range(N):\n z = calculate_z(r[i], p, inv_t)\n if z == 0:\n continue\n if offset == N:\n offset = i\n # calculate the index in the resulting array\n i_low = N - i - 1 # the far intersection with the shell\n i_up = N + i - 2 * offset # the nearer intersection with the shell\n\n # setting the arrays; check return them?\n oz[i_low] = 1 + z\n oshell_id[i_low] = i\n oz[i_up] = 1 - z\n oshell_id[i_up] = i\n return 2 * (N - offset)\n\n\n@njit(**njit_dict_no_parallel)\ndef calculate_z(r, p, inv_t):\n \"\"\"Calculate distance to p line\n\n Calculate half of the length of the p-line inside a shell\n of radius r in terms of unit length (c * t_exp).\n If shell and p-line do not intersect, return 0.\n\n Inputs:\n :r: (double) radius of the shell\n :p: (double) distance of the p-line to the center of the supernova\n :inv_t: (double) inverse time_explosio is needed to norm to unit-length\n \"\"\"\n if r > p:\n return np.sqrt(r * r - p * p) * C_INV * inv_t\n else:\n return 0\n\n\nclass BoundsError(ValueError):\n pass\n\n\n@njit(**njit_dict_no_parallel)\ndef line_search(nu, nu_insert, number_of_lines):\n \"\"\"\n Insert a value in to an array of line frequencies\n\n Inputs:\n :nu: (array) line frequencies\n :nu_insert: (int) value of nu key\n :number_of_lines: (int) number of lines in the line list\n\n Outputs:\n index of the next line ot the red.\n If the key value is redder\n than the reddest line returns number_of_lines.\n \"\"\"\n # TODO: fix the TARDIS_ERROR_OK\n # tardis_error_t ret_val = TARDIS_ERROR_OK # check\n imin = 0\n imax = number_of_lines - 1\n if nu_insert > nu[imin]:\n result = imin\n elif nu_insert < nu[imax]:\n result = imax + 1\n else:\n result = reverse_binary_search(nu, nu_insert, imin, imax)\n result = result + 1\n return result\n\n\n@njit(**njit_dict_no_parallel)\ndef reverse_binary_search(x, x_insert, imin, imax):\n \"\"\"Look for a place to insert a value in an inversely sorted float array.\n\n Inputs:\n :x: (array) an inversely (largest to lowest) sorted float array\n :x_insert: (value) a value to insert\n :imin: (int) lower bound\n :imax: (int) upper bound\n\n Outputs:\n index of the next boundary to the left\n \"\"\"\n # ret_val = TARDIS_ERROR_OK # check\n if (x_insert > x[imin]) or (x_insert < x[imax]):\n raise BoundsError # check\n return len(x) - 1 - np.searchsorted(x[::-1], x_insert, side='right')\n\n@njit(**njit_dict_no_parallel)\ndef trapezoid_integration(array, h):\n '''in the future, let's just replace\n this with the numpy trapz\n since it is numba compatable\n '''\n return np.trapz(array, dx=h)\n\n\n@njit(**njit_dict_no_parallel)\ndef intensity_black_body(nu, T):\n '''Get the black body intensity at frequency nu\n and temperature T '''\n if nu == 0:\n return np.nan # to avoid ZeroDivisionError\n beta_rad = 1 / (KB_CGS * T)\n coefficient = 2 * H_CGS * C_INV * C_INV\n return coefficient * nu * nu * nu / (np.exp(H_CGS * nu * beta_rad) - 1)\n\n@njit(**njit_dict_no_parallel)\ndef calculate_p_values(R_max, N):\n '''This can probably be replaced with a simpler function'''\n return np.arange(N).astype(np.float64) * R_max / (N - 1)\n" ]
[ [ "scipy.sparse.coo_matrix", "numpy.sqrt", "numpy.linspace", "scipy.sparse.linalg.spsolve", "numpy.arange", "numpy.arccos", "pandas.DataFrame", "scipy.interpolate.interp1d", "scipy.sparse.identity", "numpy.diff", "numpy.searchsorted", "numpy.exp", "numpy.array", "numpy.zeros", "numpy.trapz" ] ]
meracan/netcdf-swan
[ "f7548129331d34323ac960f315ca592d9511086a" ]
[ "test/test_netcdfswan.py" ]
[ "import os\nimport json\nfrom netcdfswan import NetCDFSWAN\nimport numpy as np\nimport logging\nfrom dataTest import elem,time,lat,lon,bed,slat,slon,freq,dir,spcgroup,variables, stations\n\n\ndef test_NetCDFSWAN_write():\n swanFolder=\"../s3/swandata\"\n jsonFile='./test/json/demo.json'\n input=NetCDFSWAN.prepareInputJSON(jsonFile,swanFolder,year=2000,month=1)\n swan=NetCDFSWAN(input)\n\n # Write\n # swan.uploadStatic(year=2000)\n swan.uploadS()\n swan.uploadT()\n # swan.uploadSpc()\n\n\ndef test_NetCDFSWAN():\n\n \n input={\n \"name\":\"swan-test1\",\n \"bucket\":\"uvic-bcwave\",\n \"cacheLocation\":\"../s3\",\n \"localOnly\":True\n }\n \n swan=NetCDFSWAN(input)\n\n # Read\n np.testing.assert_array_equal(swan[\"nodes\",\"bed\"], bed)\n np.testing.assert_array_equal(swan[\"elem\",\"elem\"], elem)\n np.testing.assert_array_equal(swan[\"time\",\"time\"], time)\n np.testing.assert_array_equal(swan[\"nodes\",\"lat\"], lat)\n np.testing.assert_array_equal(swan[\"nodes\",\"lon\"], lon)\n\n np.testing.assert_array_equal(swan[\"freq\",\"freq\"], freq)\n np.testing.assert_array_equal(swan[\"dir\",\"dir\"], dir)\n np.testing.assert_array_equal(swan[\"snodes\",\"slon\",0:11], [0,1,2,3,4,5,6,6,7,7,8])\n np.testing.assert_array_equal(swan[\"snodes\",\"slat\",0:11], [0,0,0,0,0,0,0,1,0,1,0])\n np.testing.assert_array_equal(swan[\"snodes\",\"stationid\",0:11], [0,1,2,3,4,5,6,6,7,7,8])\n np.testing.assert_array_equal(swan[\"stations\",\"name\",0:2], [\"beverly\",\"brooks\"])\n \n\n np.testing.assert_array_equal(swan[\"s\",\"u10\"], variables['WIND']['Windv_x'])\n np.testing.assert_array_equal(swan[\"s\",\"v10\"], variables['WIND']['Windv_y'])\n np.testing.assert_array_equal(swan[\"s\",\"hs\"], variables['HS']['Hsig'])\n np.testing.assert_array_equal(swan[\"s\",\"tps\"], variables['TPS']['TPsmoo'])\n np.testing.assert_array_equal(swan[\"s\",\"tmm10\"], variables['TMM10']['Tm_10'])\n np.testing.assert_array_equal(swan[\"s\",\"tm01\"], variables['TM01']['Tm01'])\n np.testing.assert_array_equal(swan[\"s\",\"tm02\"], variables['TM02']['Tm02'])\n np.testing.assert_array_equal(swan[\"s\",\"pdir\"], variables['PDIR']['Pdir'])\n np.testing.assert_array_equal(swan[\"s\",\"dir\"], variables['DIR']['Dir'])\n np.testing.assert_array_equal(swan[\"s\",\"dspr\"], variables['DSPR']['Dspr'])\n np.testing.assert_array_equal(swan[\"s\",\"qp\"], variables['QP']['Qp'])\n np.testing.assert_array_equal(swan[\"s\",\"transpx\"], variables['TRANSP']['Transp_x'])\n np.testing.assert_array_equal(swan[\"s\",\"transpy\"], variables['TRANSP']['Transp_y'])\n\n np.testing.assert_array_equal(swan[\"t\",\"u10\"], variables['WIND']['Windv_x'].T)\n np.testing.assert_array_equal(swan[\"t\",\"v10\"], variables['WIND']['Windv_y'].T)\n np.testing.assert_array_equal(swan[\"t\",\"hs\"], variables['HS']['Hsig'].T)\n\n np.testing.assert_array_equal(swan[\"t\",\"tps\"], variables['TPS']['TPsmoo'].T)\n np.testing.assert_array_equal(swan[\"t\",\"tmm10\"], variables['TMM10']['Tm_10'].T)\n np.testing.assert_array_equal(swan[\"t\",\"tm01\"], variables['TM01']['Tm01'].T)\n np.testing.assert_array_equal(swan[\"t\",\"tm02\"], variables['TM02']['Tm02'].T)\n np.testing.assert_array_equal(swan[\"t\",\"pdir\"], variables['PDIR']['Pdir'].T)\n np.testing.assert_array_equal(swan[\"t\",\"dir\"], variables['DIR']['Dir'].T)\n np.testing.assert_array_equal(swan[\"t\",\"dspr\"], variables['DSPR']['Dspr'].T)\n np.testing.assert_array_equal(swan[\"t\",\"qp\"], variables['QP']['Qp'].T)\n np.testing.assert_array_equal(swan[\"t\",\"transpx\"], variables['TRANSP']['Transp_x'].T)\n np.testing.assert_array_equal(swan[\"t\",\"transpy\"], variables['TRANSP']['Transp_y'].T)\n\n for name in swan.stations:\n \n id = swan.stations[name]['id']\n \n sIndex=swan.stations[name]['start']\n eIndex=swan.stations[name]['end']\n np.testing.assert_array_equal(swan[\"spc\", \"spectra\",sIndex:eIndex], spcgroup[\"spectra\"][sIndex:eIndex])\n \n\ndef test_NetCDFSWAN_logger():\n logging.basicConfig(\n filename=os.path.join('./data',\"progress.log\"),\n level=logging.DEBUG,\n format=\"%(levelname)s %(asctime)s %(message)s\"\n )\n logger = logging.getLogger()\n \n try:\n swanFolder=\"../s3/swandata\"\n jsonFile='./test/json/demo.json'\n input=NetCDFSWAN.prepareInputJSON(jsonFile,swanFolder,year=2000,month=1)\n swan=NetCDFSWAN(input,logger=logger)\n swan.uploadStatic()\n swan.uploadS()\n swan.uploadT()\n swan.uploadSpc()\n except Exception as err:\n logger.error(err)\n \n \nif __name__ == \"__main__\":\n test_NetCDFSWAN_write()\n test_NetCDFSWAN()\n \n" ]
[ [ "numpy.testing.assert_array_equal" ] ]
hlibe/FinTech-of-Networks
[ "64c00951ea192adc69166ecdc0a08668a4a40858" ]
[ "LR.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 27 15:19:34 2021\n\n@author: HaoLI\n\"\"\"\n# evaluate gradient boosting algorithm for classification\nimport pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom numpy import mean\nfrom numpy import std\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.metrics import roc_curve, auc, roc_auc_score ###计算roc和auc\nfrom sklearn.model_selection import train_test_split\nimport lightgbm as lgb\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import StandardScaler\nimport datetime\nimport time\nfrom imblearn.over_sampling import RandomOverSampler\n\n# check and set the working directory\nos.getcwd()\n#os.chdir('/Users/HaoLI/Dropbox/FinTech/raw_data')\nos.chdir('/Users/HaoLI/Stata/credit/data')\ndf = pd.read_csv('data1210rename_use.csv')\ncol_names = list(df.columns.values[3:30]) \ncol_names.remove('default_geq_1') #X中不能包含目标函数y\ncol_names.remove('default_geq_2')\ncol_names.remove('default_geq_3')\nbase_col_names = col_names[0:13] # for baseline model 仅仅包含银行数据+早中晚,而不包含消费数据\ndf_fillna = df.fillna(0) # fill NA with 0. 无消费以0计\nX = df_fillna[col_names]\ny = df_fillna.default_geq_1 # Target variable\n\nX_base = df_fillna[base_col_names]\ny_base = df_fillna.default_geq_1 # Target variable\n\npenalty='none'\n\nlist_rec = [] #记录参数\nfor random_state in range(0,20):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.30)\n X_base_train, X_base_test, y_base_train, y_base_test = train_test_split(X_base, y_base, test_size = 0.30, random_state=random_state)\n ros = RandomOverSampler(random_state=0)\n X_train, y_train = ros.fit_resample(X_train, y_train)\n X_base_train, y_base_train = ros.fit_resample(X_base_train, y_base_train)\n #sc = StandardScaler()\n #X_train = sc.fit_transform(X_train)\n #X_test = sc.fit_transform(X_test)\n # define the model\n classifier = LogisticRegression(penalty= 'none', dual=False,\n tol=0.0001, C=1.0, fit_intercept=True, intercept_scaling=1, \n class_weight=None, random_state=None, solver='saga', \n max_iter=100, verbose=0, \n warm_start=False, n_jobs=None, l1_ratio=None) #'none' for no penalty\n classifier.fit(X_train, y_train)\n\n # use trained model and testing data to predict\n y_train_pred = classifier.decision_function(X_train)\n y_test_pred = classifier.decision_function(X_test)\n fullmodelperc = np.percentile(y_test_pred,[5,10,20,30,40,50] )\n\n classifier.fit(X_base_train, y_base_train)\n y_base_train_pred = classifier.decision_function(X_base_train)\n y_base_test_pred = classifier.decision_function(X_base_test)#可以加weight 0.5\n basemodelperc = np.percentile(y_base_test_pred,[5,10,20,30,40,50] )\n #print(\"full model percentile[5,10,20,30,40,50]: %s\"%fullmodelperc )# get percentile of array y_test_pred\n #print(\"baseline model percentile[5,10,20,30,40,50]: %s\"%basemodelperc )# get percentile of array y_test_pred\n\n\n #### ROC curve and Area-Under-Curve (AUC)\n train_fpr, train_tpr, tr_thresholds = roc_curve(y_train, y_train_pred)\n test_fpr, test_tpr, te_thresholds = roc_curve(y_test, y_test_pred)\n print(auc(train_fpr, train_tpr))\n print(auc(test_fpr, test_tpr))\n \n plt.grid()\n plt.plot(train_fpr, train_tpr, label=\" AUC TRAIN =\"+str(auc(train_fpr, train_tpr)))\n plt.plot(test_fpr, test_tpr, label=\" AUC TEST =\"+str(auc(test_fpr, test_tpr)))\n plt.plot([0,1],[0,1],'g--')\n plt.legend()\n plt.xlabel(\"True Positive Rate\")\n plt.ylabel(\"False Positive Rate\")\n plt.title(\"AUC(LR ROC curve)\")\n plt.grid(color='black', linestyle='-', linewidth=0.5)\n time1 = datetime.datetime.now()\n #对现在时间格式化,以此作为文件名\n time2 = time1.strftime('%Y-%m-%d-%H%M%S')\n plt.savefig(\"/Users/HaoLI/Stata/credit/out/ROC figure/Figure_\"+time2+\".png\", bbox_inches = 'tight') \n plt.show()\n list_rec.append([auc(train_fpr, train_tpr), auc(test_fpr, test_tpr)])\n\nlist_rec_1 = list_rec\ndf = pd.DataFrame(list_rec, columns = ['IS_AUC','OOS_AUC'])\ndf.to_csv('LR'+penalty+'_AUC_parameter_record.csv')\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_csv", "sklearn.linear_model.LogisticRegression", "matplotlib.pyplot.title", "sklearn.metrics.auc", "sklearn.model_selection.train_test_split", "numpy.percentile", "pandas.DataFrame", "sklearn.metrics.roc_curve", "matplotlib.pyplot.plot", "matplotlib.pyplot.savefig", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
Shahzaib1999/Python
[ "25d163fb2db3001814e373814781d6ff6a4675d3" ]
[ "start.py" ]
[ "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random \r\nx=np.linspace(-4,4,9)\r\ny=np.linspace(-5,5,11)\r\n#print(x)\r\n#print(y)\r\nxx,yy=np.meshgrid(x,y)\r\n#print(yy)\r\n#ellipse=xx**2.0+4.0*yy**2.0\r\nrand=np.random.random_sample((11, 9))\r\nplt.contourf(xx,yy,rand,cmap=\"jet\")\r\nplt.colorbar()\r\nplt.show()\r\n\r\n\r\n\r\n" ]
[ [ "matplotlib.pyplot.contourf", "numpy.linspace", "numpy.random.random_sample", "matplotlib.pyplot.colorbar", "numpy.meshgrid", "matplotlib.pyplot.show" ] ]
YBZh/MetaFGNet
[ "a9c2128f5d2a6cdffa25d49291b037270f3de166", "a9c2128f5d2a6cdffa25d49291b037270f3de166" ]
[ "Sample_Selection/models/resnet0.py", "MetaFGNet_with_Sample_Selection/main.py" ]
[ "import torch.nn as nn\nfrom torch.legacy import nn as torchnn\nimport copy\nimport math\nimport torch.utils.model_zoo as model_zoo\n\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152']\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AvgPool2d(7)\n # self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n # x = self.fc(x)\n\n return x\n\n\nclass Two_stream_classifier(nn.Module):\n def __init__(self, resnet_conv, resnet_classifier, convout_dimension, args):\n super(Two_stream_classifier, self).__init__()\n self.resnet_conv = resnet_conv\n self.resnet_classifier = resnet_classifier\n self.target_classifier = nn.Linear(convout_dimension, 120)\n self.source_softmax = nn.Softmax()\n self.target_softmax = nn.Softmax()\n self.args = args\n # self.cub_classifier.weight.data.normal_(0.0, 0.02)\n # self.cub_classifier.bias.data.normal_(0)\n\n def forward(self, x):\n x = self.resnet_conv(x)\n x = [x.narrow(0, 0, self.args.batch_size_source), x.narrow(0, self.args.batch_size_source, self.args.batch_size)]\n # x = x.chunk(2, 0) # here should be the torch.tensor operation.\n x = [self.resnet_classifier(x[0]), self.target_classifier(x[1])]\n x = [self.source_softmax(x[0]), self.target_softmax(x[1])]\n\n return x\n\n\nclass Share_convs(nn.Module):\n def __init__(self, resnet_conv, convout_dimension, num_class):\n super(Share_convs, self).__init__()\n self.resnet_conv = resnet_conv\n self.fc = nn.Linear(convout_dimension, num_class)\n # self.logsoftmax = nn.LogSoftmax()\n\n def forward(self, x):\n x = self.resnet_conv(x)\n x = self.fc(x)\n # x = self.logsoftmax(x)\n return x\n\n\ndef resnet18(pretrained=False, args=1, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n # modify the structure of the model.\n num_of_feature_map = model.fc.in_features\n model.fc = nn.Linear(num_of_feature_map, 120)\n model.fc.weight.data.normal_(0.0, 0.02)\n model.fc.bias.data.normal_(0)\n return model\n\n\ndef resnet34(pretrained=False, args=1, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n # model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n # if pretrained:\n # print('load the imagenet pretrained model')\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n # # target_model = model\n # target_model = copy.deepcopy(model)\n # # modify the structure of the model.\n # # pretrained_fc_temp = model.fc # the fc layer for the imagenet classifier\n # # print(pretrained_fc_temp)\n # num_of_feature_map = model.fc.in_features # the channels of feature map\n # # model.fc = nn.Linear(10, 1)\n # # model = Two_stream_classifier(model, pretrained_fc_temp, num_of_feature_map, args)\n # target_model.fc = nn.Linear(num_of_feature_map, 200)\n # print(id(model.layer1))\n # print(id(target_model.layer1))\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n pretrained_dict = 1\n if pretrained:\n print('load the imagenet pretrained model')\n pretrained_dict = model_zoo.load_url(model_urls['resnet34'])\n model_dict = model.state_dict()\n pretrained_dict_temp = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict_temp)\n model.load_state_dict(model_dict)\n\n target_model = Share_convs(model, 512, 120)\n source_model = Share_convs(model, 512, 1000)\n source_model_dict = source_model.state_dict()\n pretrained_dict_temp1 = {k: v for k, v in pretrained_dict.items() if k in source_model_dict}\n source_model_dict.update(pretrained_dict_temp1)\n source_model.load_state_dict(source_model_dict)\n # print(id(source_model.resnet_conv)) # the memory is shared here\n # print(id(target_model.resnet_conv))\n\n # print(id(source_model.fc)) # the memory id shared here.\n # print(id(target_model.fc))\n\n\n return source_model , target_model\n\n\ndef resnet50(pretrained=False, args=1, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n # modify the structure of the model.\n num_of_feature_map = model.fc.in_features\n model.fc = nn.Linear(num_of_feature_map, 120)\n model.fc.weight.data.normal_(0.0, 0.02)\n model.fc.bias.data.normal_(0)\n return model\n\n\ndef resnet101(pretrained=False, args=1,**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n # modify the structure of the model.\n num_of_feature_map = model.fc.in_features\n model.fc = nn.Linear(num_of_feature_map, 120)\n model.fc.weight.data.normal_(0.0, 0.02)\n model.fc.bias.data.normal_(0)\n return model\n\n\ndef resnet152(pretrained=False, args=1, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n # modify the structure of the model.\n num_of_feature_map = model.fc.in_features\n model.fc = nn.Linear(num_of_feature_map, 120)\n model.fc.weight.data.normal_(0.0, 0.02)\n model.fc.bias.data.normal_(0)\n return model\n\n\ndef resnet(args, pretrained=False, **kwargs):\n print(\"==> creating model '{}' \".format(args.arch))\n if args.arch == 'resnet18':\n return resnet18(pretrained, args)\n elif args.arch == 'resnet34':\n return resnet34(pretrained, args)\n elif args.arch == 'resnet50':\n return resnet50(pretrained, args)\n elif args.arch == 'resnet101':\n return resnet101(pretrained, args)\n elif args.arch == 'resnet152':\n return resnet152(pretrained, args)\n else:\n raise ValueError('Unrecognized model architecture', arch)\n", "##############################################################################\n#\n# All the codes about the model constructing should be kept in the folder ./models/\n# All the codes about the data process should be kept in the folder ./data/\n# The file ./opts.py stores the options.\n# The file ./trainer.py stores the training and test strategy\n# The ./main.py should be simple\n#\n##############################################################################\nimport os\nimport json\nimport shutil\nimport torch.optim\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport random\nimport numpy as np\nimport torch.backends.cudnn as cudnn\nfrom models.resnet import resnet # The model construction\nfrom trainer import train # For the training process\nfrom trainer import validate # For the validate (test) process\nfrom opts import opts # The options for the project\nfrom data.prepare_data import generate_dataloader # Prepare the data and dataloader\nimport ipdb\n\nbest_prec1 = 0\n\ndef main():\n global args, best_prec1\n args = opts()\n # ipdb.set_trace()\n # args = parser.parse_args()\n model_source, model_target = resnet(args)\n # define-multi GPU\n model_source = torch.nn.DataParallel(model_source).cuda()\n model_target = torch.nn.DataParallel(model_target).cuda()\n print('the memory id should be same for the shared feature extractor:')\n print(id(model_source.module.resnet_conv)) # the memory is shared here\n print(id(model_target.module.resnet_conv))\n print('the memory id should be different for the different classifiers:')\n print(id(model_source.module.fc)) # the memory id shared here.\n print(id(model_target.module.fc))\n # define loss function(criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda()\n\n np.random.seed(1) ### fix the random data.\n random.seed(1)\n # optimizer = torch.optim.SGD(model.parameters(),\n # To apply different learning rate to different layer\n if args.meta_sgd:\n meta_train_lr = []\n for param in model_target.parameters():\n meta_train_lr.append(torch.FloatTensor(param.data.size()).fill_(args.meta_train_lr).cuda())\n if args.pretrained:\n print('the pretrained setting of optimizer')\n if args.auxiliary_dataset == 'imagenet':\n optimizer = torch.optim.SGD([\n {'params': model_source.module.resnet_conv.parameters(), 'name': 'pre-trained'},\n {'params': model_source.module.fc.parameters(), 'name': 'pre-trained'},\n {'params': model_target.module.fc.parameters(), 'name': 'new-added'},\n ],\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n elif args.auxiliary_dataset == 'l_bird':\n optimizer = torch.optim.SGD([\n {'params': model_source.module.resnet_conv.parameters(), 'name': 'pre-trained'},\n {'params': model_source.module.fc.parameters(), 'name': 'pre-trained'},\n {'params': model_target.module.fc.parameters(), 'name': 'new-added'},\n ],\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n else:\n print('the from scratch setting of optimizer')\n optimizer = torch.optim.SGD([\n {'params': model_source.module.resnet_conv.parameters(), 'name': 'new-added'},\n {'params': model_source.module.fc.parameters(), 'name': 'new-added'},\n {'params': model_target.module.fc.parameters(), 'name': 'new-added'},\n ],\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n #optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n # raise ValueError('the resume function is not finished')\n print(\"==> loading checkpoints '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n if args.meta_sgd:\n meta_train_lr = checkpoint['meta_train_lr']\n best_prec1 = checkpoint['best_prec1']\n model_source.load_state_dict(checkpoint['source_state_dict'])\n model_target.load_state_dict(checkpoint['target_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"==> loaded checkpoint '{}'(epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n raise ValueError('The file to be resumed from is not exited', args.resume)\n\n if not os.path.isdir(args.log):\n os.makedirs(args.log)\n log = open(os.path.join(args.log, 'log.txt'), 'w')\n state = {k: v for k, v in args._get_kwargs()}\n log.write(json.dumps(state) + '\\n')\n log.close()\n\n cudnn.benchmark = True\n # process the data and prepare the dataloaders.\n dataloader_returned = generate_dataloader(args)\n dataloader_number_returned = len(dataloader_returned)\n print('the number of dataloader number returned is: ', dataloader_number_returned)\n if dataloader_number_returned != 2:\n train_loader_source, val_loader_source, train_loader_target, val_loader_target = dataloader_returned\n else:\n train_loader_target, val_loader_target = dataloader_returned\n train_loader_source = None\n # train_loader, val_loader = generate_dataloader(args)\n # test only\n if args.test_only:\n if dataloader_number_returned == 2:\n validate(None, val_loader_target, model_source, model_target, criterion, 0, args)\n else:\n validate(val_loader_source, val_loader_target, model_source, model_target, criterion, 0, args)\n # if args.auxiliary_dataset == 'imagenet':\n # validate(val_loader_source, val_loader_target, model_source, model_target, criterion, 0, args)\n # else:\n # validate(None, val_loader_target, model_source, model_target, criterion, 0, args)\n return\n\n print('begin training')\n if train_loader_source:\n train_loader_source_batch = enumerate(train_loader_source)\n else:\n train_loader_source_batch = None\n train_loader_target_batch = enumerate(train_loader_target)\n for epoch in range(args.start_epoch, args.epochs):\n # train for one epoch\n if args.meta_sgd:\n train_loader_source_batch, train_loader_target_batch, meta_train_lr = train(train_loader_source, train_loader_source_batch, train_loader_target,train_loader_target_batch, model_source, model_target, criterion, optimizer, epoch, args, meta_train_lr)\n else:\n train_loader_source_batch, train_loader_target_batch = train(train_loader_source, train_loader_source_batch, train_loader_target,train_loader_target_batch, model_source, model_target, criterion, optimizer, epoch, args, None)\n # train(train_loader, model, criterion, optimizer, epoch, args)\n # evaluate on the val data\n if (epoch + 1) % args.test_freq == 0 or (epoch + 1) % args.epochs == 0:\n if dataloader_number_returned == 2:\n prec1 = validate(None, val_loader_target, model_source, model_target, criterion, epoch, args)\n else:\n prec1 = validate(val_loader_source, val_loader_target, model_source, model_target, criterion, epoch, args)\n # prec1 = 1\n # record the best prec1 and save checkpoint\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n if is_best:\n log = open(os.path.join(args.log, 'log.txt'), 'a')\n log.write(' \\nTarget_T1 acc: %3f' % (best_prec1))\n log.close()\n if args.meta_sgd:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'meta_train_lr': meta_train_lr,\n 'arch': args.arch,\n 'source_state_dict': model_source.state_dict(),\n 'target_state_dict': model_target.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer': optimizer.state_dict(),\n }, is_best, args, epoch)\n else:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'source_state_dict': model_source.state_dict(),\n 'target_state_dict': model_target.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer': optimizer.state_dict(),\n }, is_best, args, epoch + 1)\n\n\ndef save_checkpoint(state, is_best, args, epoch):\n filename = str(epoch) + 'checkpoint.pth.tar'\n dir_save_file = os.path.join(args.log, filename)\n torch.save(state, dir_save_file)\n if is_best:\n shutil.copyfile(dir_save_file, os.path.join(args.log, 'model_best.pth.tar'))\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Softmax", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.utils.model_zoo.load_url" ], [ "torch.nn.CrossEntropyLoss", "numpy.random.seed" ] ]
cjauvin/RavenPy
[ "d9671b5a71004bb0501ab64e0e6efbd06d2fa465" ]
[ "ravenpy/models/base.py" ]
[ "\"\"\"\nBase classes\n------------\n\nThe `Raven` class is the base class that implements model setup, execution and output retrieval, while the `Ostrich`\nclass is the base class adapting `Raven` to work with the Ostrich calibration tool.\n\n\"\"\"\nimport csv\nimport datetime as dt\nimport operator\nimport os\nimport shutil\nimport stat\nimport subprocess\nimport tempfile\nfrom collections import OrderedDict\nfrom pathlib import Path\nfrom typing import Union\n\nimport numpy as np\nimport xarray as xr\n\nimport ravenpy\n\nfrom .rv import (\n RV,\n RVI,\n Ost,\n RavenNcData,\n RVFile,\n get_states,\n isinstance_namedtuple,\n parse_solution,\n)\n\nRAVEN_EXEC_PATH = os.getenv(\"RAVENPY_RAVEN_BINARY_PATH\") or shutil.which(\"raven\")\nOSTRICH_EXEC_PATH = os.getenv(\"RAVENPY_OSTRICH_BINARY_PATH\") or shutil.which(\"ostrich\")\n\n\nclass Raven:\n \"\"\"RAVEN hydrological model wrapper.\n\n This class is used to run the RAVEN model from user-provided configuration files. It can also be subclassed with\n configuration templates for emulated models, allowing direct calls to the models.\n\n r = Raven('/tmp/testdir')\n r.configure()\n \"\"\"\n\n identifier = \"generic-raven\"\n templates = ()\n\n # Allowed configuration file extensions\n _rvext = (\"rvi\", \"rvp\", \"rvc\", \"rvh\", \"rvt\")\n\n # Dictionary of potential variable names, keyed by CF standard name.\n # http://cfconventions.org/Data/cf-standard-names/60/build/cf-standard-name-table.html\n # PET is the potential evapotranspiration, while evspsbl is the actual evap.\n # TODO: Check we're not mixing precip and rainfall.\n _variable_names = {\n \"tasmin\": [\"tasmin\", \"tmin\"],\n \"tasmax\": [\"tasmax\", \"tmax\"],\n \"tas\": [\"tas\", \"t2m\"],\n \"rainfall\": [\"rainfall\", \"rain\"],\n \"pr\": [\"pr\", \"precip\", \"prec\", \"precipitation\", \"tp\"],\n \"prsn\": [\"prsn\", \"snow\", \"snowfall\", \"solid_precip\"],\n \"evspsbl\": [\"pet\", \"evap\", \"evapotranspiration\"],\n \"water_volume_transport_in_river_channel\": [\n \"qobs\",\n \"discharge\",\n \"streamflow\",\n \"dis\",\n ],\n }\n\n _parallel_parameters = [\n \"params\",\n \"hru_state\",\n \"basin_state\",\n \"nc_index\",\n \"name\",\n \"area\",\n \"elevation\",\n \"latitude\",\n \"longitude\",\n \"region_id\",\n ]\n\n def __init__(self, workdir: Union[str, Path] = None):\n \"\"\"Initialize the RAVEN model.\n\n Directory for the model configuration and outputs. If None, a temporary directory will be created.\n \"\"\"\n\n if not RAVEN_EXEC_PATH:\n raise RuntimeError(\n \"Could not find raven binary in PATH, and RAVENPY_RAVEN_BINARY_PATH env variable is not set\"\n )\n\n if not OSTRICH_EXEC_PATH:\n raise RuntimeError(\n \"Could not find ostrich binary in PATH, and RAVENPY_OSTRICH_BINARY_PATH env variable is not set\"\n )\n\n self.raven_exec = RAVEN_EXEC_PATH\n self.ostrich_exec = OSTRICH_EXEC_PATH\n\n workdir = workdir or tempfile.mkdtemp()\n self._rvs = []\n\n self.rvi = RV()\n self.rvp = RV()\n self.rvc = RV()\n self.rvt = RV()\n self.rvh = RV()\n self.rvd = RV() # rvd is for derived parameters\n\n self.workdir = Path(workdir)\n self.ind_outputs = {} # Individual files for all simulations\n self.outputs = {} # Aggregated files\n self.singularity = False # Set to True to launch Raven with singularity.\n self.raven_simg = None # ravenpy.raven_simg\n self._name = None\n self._defaults = {}\n self.rvfiles = {}\n\n # Configuration file extensions + rvd for derived parameters.\n self._rvext = self._rvext + (\"rvd\",)\n\n # For subclasses where the configuration file templates are known in advance.\n if self.templates:\n self.configure(self.templates)\n\n # Directory logic\n # Top directory inside workdir. This is where Ostrich and its config and templates are stored.\n self.model_dir = \"model\" # Path to the model configuration files.\n self.final_dir = \"final\"\n self.output_dir = \"output\"\n\n self.exec_path = self.workdir / \"exec\"\n self.final_path = self.workdir / self.final_dir\n self._psim = 0\n self._pdim = None # Parallel dimension (either initparam, params or region)\n\n @property\n def output_path(self):\n return self.model_path / self.output_dir\n\n @property\n def model_path(self):\n return self.exec_path / self.model_dir / \"p{:02}\".format(self.psim)\n\n @property\n def raven_cmd(self):\n \"\"\"Path to the Raven executable.\"\"\"\n return self.model_path / \"raven\"\n\n @property\n def version(self):\n import re\n\n out = subprocess.check_output(\n [\n self.raven_exec,\n ],\n input=b\"\\n\",\n )\n match = re.search(r\"Version (\\S+) \", out.decode(\"utf-8\"))\n if match:\n return match.groups()[0]\n else:\n raise AttributeError(\"Version not found: {}\".format(out))\n\n @property\n def psim(self):\n return self._psim\n\n @psim.setter\n def psim(self, value):\n if not isinstance(value, int):\n raise ValueError\n if isinstance(self.rvi, RVI):\n self.rvi.run_index = value\n self._psim = value\n\n @property\n def cmd(self):\n \"\"\"This is the main executable.\"\"\"\n return self.raven_cmd\n\n @property\n def bash_cmd(self):\n \"\"\"Bash command arguments.\"\"\"\n return [self.cmd, self.name, \"-o\", str(self.output_path)]\n\n @property\n def singularity_cmd(self):\n \"\"\"Run Singularity container.\"\"\"\n return [\n \"singularity\",\n \"run\",\n \"--bind\",\n \"{}:/data\".format(self.model_path),\n \"--bind\",\n \"{}:/data_out:rw\".format(self.output_path),\n self.raven_simg,\n self.name,\n ]\n\n @property\n def cmd_path(self):\n \"\"\"This is the main executable.\"\"\"\n return self.model_path\n\n @property\n def name(self):\n \"\"\"Name of the model configuration.\"\"\"\n return self._name\n\n @name.setter\n def name(self, x):\n self._name = x\n\n @property\n def configuration(self):\n \"\"\"Configuration dictionaries.\"\"\"\n return {ext: OrderedDict(getattr(self, ext).items()) for ext in self._rvext}\n\n @property\n def parameters(self):\n \"\"\"Dictionary storing all parameters.\"\"\"\n params = {}\n for key, val in self.configuration.items():\n params.update(val)\n return params\n\n @property\n def rvobjs(self):\n \"\"\"Generator for (ext, rv object).\"\"\"\n return {ext: getattr(self, ext) for ext in self._rvext}\n\n def configure(self, fns):\n \"\"\"Read configuration files.\"\"\"\n for fn in fns:\n rvf = RVFile(fn)\n if rvf.ext not in self._rvext + (\"txt\",):\n raise ValueError(\n \"rv contains unrecognized configuration file keys : {}.\".format(\n rvf.ext\n )\n )\n else:\n if rvf.ext.startswith(\"rv\"):\n setattr(self, \"name\", rvf.stem)\n self.rvfiles[rvf.ext] = rvf\n elif rvf.ext == \"txt\":\n self.rvfiles[rvf.stem] = rvf\n else:\n raise ValueError\n\n def assign(self, key, value):\n \"\"\"Assign parameter to rv object that has a key with the same name.\"\"\"\n\n assigned = False\n for ext, obj in self.rvobjs.items():\n if hasattr(obj, key):\n att = getattr(obj, key)\n\n # If att is a namedtuple, we get its class and try to instantiate it with the values passed.\n if isinstance_namedtuple(att) and isinstance(\n value, (list, tuple, np.ndarray)\n ):\n p = att.__class__(*value)\n setattr(obj, key, p)\n # If att is a RavenNcData, we expect a dict\n elif isinstance(att, RavenNcData):\n att.update(value)\n else:\n setattr(obj, key, value)\n assigned = True\n\n if not assigned:\n raise AttributeError(\"No configuration key named {}\".format(key))\n\n def derived_parameters(self):\n \"\"\"Subclassed by emulators. Defines model parameters that are a function of other parameters.\"\"\"\n return\n\n def _dump_rv(self):\n \"\"\"Write configuration files to disk.\"\"\"\n\n params = self.parameters\n\n for rvf in self.rvfiles.values():\n p = self.exec_path if rvf.is_tpl else self.model_path\n if (\n rvf.stem == \"OstRandomNumbers\"\n and isinstance(self.txt, Ost)\n and self.txt.random_seed == \"\"\n ):\n continue\n fn = rvf.write(p, **params)\n self._rvs.append(fn)\n\n def setup(self, overwrite=False):\n \"\"\"Create directory structure to store model input files, executable and output results.\n\n Model configuration files and time series inputs are stored directly in the working directory.\n\n workdir/ # Created by PyWPS. Is considered the model path.\n model/\n output/\n\n \"\"\"\n if overwrite:\n if self.model_path.exists():\n shutil.rmtree(str(self.exec_path))\n if self.final_path.exists():\n shutil.rmtree(str(self.final_path))\n\n # Create general subdirectories\n if not self.exec_path.exists():\n os.makedirs(str(self.exec_path)) # workdir/exec\n if not self.final_path.exists():\n os.makedirs(str(self.final_path)) # workdir/final\n\n def setup_model_run(self, ts):\n \"\"\"Create directory structure to store model input files, executable and output results.\n\n Parameters\n ----------\n ts : sequence\n Paths to input forcing files.\n index : int\n Run index.\n \"\"\"\n # Create configuration information from input files\n ncvars = self._assign_files(ts)\n self.rvt.update(ncvars)\n self.check_units()\n self.check_inputs()\n\n # Compute derived parameters\n self.derived_parameters()\n\n # Write configuration files in model directory\n if not self.model_path.exists():\n os.makedirs(self.model_path)\n os.makedirs(self.output_path)\n self._dump_rv()\n\n # Create symbolic link to input files\n for fn in ts:\n if not (self.model_path / Path(fn).name).exists():\n os.symlink(str(fn), str(self.model_path / Path(fn).name))\n\n # Create symbolic link to Raven executable\n if not self.raven_cmd.exists():\n os.symlink(self.raven_exec, str(self.raven_cmd))\n\n # Shell command to run the model\n if self.singularity:\n cmd = self.singularity_cmd\n else:\n cmd = self.bash_cmd\n\n return cmd\n\n def run(self, ts, overwrite=False, **kwds):\n \"\"\"Run the model.\n\n Parameters\n ----------\n ts : path or sequence\n Sequence of input file paths. Symbolic links to those files will be created in the model directory.\n overwrite : bool\n Whether or not to overwrite existing model and output files.\n **kwds : dict\n Raven parameters used to fill configuration file templates.\n\n Create a work directory with a model/ and output/ subdirectories, write the configuration files in model/ and\n launch the Raven executable. If the configuration files are templates, values can be formatted by passing\n dictionaries keyed by their extension.\n\n Examples\n --------\n >>> r = Raven()\n >>> r.configure(rvi='path to template', rvp='...'}\n >>> r.run(ts, start_date=dt.datetime(2000, 1, 1), area=1000, X1=67)\n\n \"\"\"\n if isinstance(ts, (str, Path)):\n ts = [ts]\n\n # Case for potentially parallel parameters\n pdict = {}\n for p in self._parallel_parameters:\n a = kwds.pop(p, None)\n\n if a is not None and p in [\"params\"]:\n pdict[p] = np.atleast_2d(a)\n else:\n pdict[p] = np.atleast_1d(a)\n\n # Number of parallel loops is dictated by the number of parallel parameters or nc_index.\n plen = {pp: len(pdict[pp]) for pp in self._parallel_parameters + [\"nc_index\"]}\n\n # Find the longest parallel array and its length\n longer, nloops = max(plen.items(), key=operator.itemgetter(1))\n\n # Assign the name of the parallel dimension\n # nbasins is set by RavenC++\n if nloops > 1:\n self._pdim = {\n \"params\": \"params\",\n \"hru_state\": \"state\",\n \"basin_state\": \"state\",\n \"nc_index\": \"nbasins\",\n }[longer]\n\n for key, val in pdict.items():\n if len(val) not in [1, nloops]:\n raise ValueError(\n \"Parameter {} has incompatible dimension: {}. \"\n \"Should be 1 or {}.\".format(key, len(val), nloops)\n )\n\n # Resize parallel parameters to the largest size\n for key, val in pdict.items():\n if len(val) == 1:\n pdict[key] = val.repeat(nloops, axis=0)\n\n # Update non-parallel parameter objects\n for key, val in kwds.items():\n\n if key in self._rvext:\n obj = getattr(self, key)\n if isinstance(val, dict):\n obj.update(val)\n elif isinstance(val, RV):\n setattr(self, key, val)\n else:\n raise ValueError(\n \"A dictionary or an RV instance is expected to update the values \"\n \"for {}.\".format(key)\n )\n else:\n self.assign(key, val)\n\n if self.rvi:\n self.handle_date_defaults(ts)\n self.set_calendar(ts)\n\n # Loop over parallel parameters - sets self.rvi.run_index\n procs = []\n for self.psim in range(nloops):\n for key, val in pdict.items():\n if val[self.psim] is not None:\n self.assign(key, val[self.psim])\n\n cmd = self.setup_model_run(tuple(map(Path, ts)))\n procs.append(\n subprocess.Popen(cmd, cwd=self.cmd_path, stdout=subprocess.PIPE)\n )\n\n return procs\n\n def __call__(self, ts, overwrite=False, **kwds):\n self.setup(overwrite)\n procs = self.run(ts, overwrite, **kwds)\n\n for proc in procs:\n proc.wait()\n # Julie: For debugging\n # for line in iter(proc.stdout.readline, b''):\n # print(line)\n try:\n self.parse_results()\n err = self.parse_errors()\n if \"ERROR\" in err:\n raise UserWarning(\"Simulation error\")\n\n except UserWarning as e:\n err = self.parse_errors()\n msg = \"\"\"\n **************************************************************\n Path : {dir}\n **************************************************************\n {err}\n \"\"\".format(\n dir=self.cmd_path, err=err\n )\n print(msg)\n raise e\n\n def resume(self, solution=None):\n \"\"\"Set the initial state to the state at the end of the last run.\n\n Parameters\n ----------\n solution : str, Path\n Path to solution file. If None, will use solution from last model run if any.\n \"\"\"\n if solution is None:\n fn = self.outputs[\"solution\"]\n else:\n fn = solution\n\n self.rvc.parse(Path(fn).read_text())\n\n def parse_results(self, path=None, run_name=None):\n \"\"\"Store output files in the self.outputs dictionary.\"\"\"\n # Output files default names. The actual output file names will be composed of the run_name and the default\n # name.\n path = path or self.exec_path\n run_name = run_name or getattr(self.rvi, \"run_name\", \"\")\n patterns = {\n \"hydrograph\": f\"{run_name}*Hydrographs.nc\",\n \"storage\": f\"{run_name}*WatershedStorage.nc\",\n \"solution\": f\"{run_name}*solution.rvc\",\n \"diagnostics\": f\"{run_name}*Diagnostics.csv\",\n }\n\n for key, pattern in patterns.items():\n # There are no diagnostics if a streamflow time series is not provided.\n try:\n fns = self._get_output(pattern, path=path)\n except UserWarning as exc:\n if key != \"diagnostics\":\n raise exc\n else:\n continue\n\n fns.sort()\n self.ind_outputs[key] = fns\n self.outputs[key] = self._merge_output(fns, pattern[1:])\n\n self.outputs[\"rv_config\"] = self._merge_output(self.rvs, \"rv.zip\")\n\n def _merge_output(self, files, name):\n \"\"\"Merge multiple output files into one if possible, otherwise return a list of files.\"\"\"\n import zipfile\n\n # If there is only one file, return its name directly.\n if len(files) == 1:\n return files[0]\n\n # Otherwise try to create a new file aggregating all files.\n outfn = self.final_path / name\n\n if name.endswith(\".nc\") and not isinstance(\n self, ravenpy.models.RavenMultiModel\n ):\n ds = [xr.open_dataset(fn) for fn in files]\n try:\n # We aggregate along the pdim dimensions.\n out = xr.concat(ds, self._pdim, data_vars=\"all\")\n out.to_netcdf(outfn)\n return outfn\n except (ValueError, KeyError):\n pass\n\n # Let's zip the files that could not be merged.\n outfn = outfn.with_suffix(\".zip\")\n\n # Find the lower file parts level at which there are differences among files.\n i = get_diff_level(files)\n\n # Try to create a zip file\n with zipfile.ZipFile(outfn, \"w\") as f:\n for fn in files:\n len(fn.parts)\n f.write(fn, arcname=fn.relative_to(Path(*fn.parts[:i])))\n\n return outfn\n\n def parse_errors(self):\n files = self._get_output(\"Raven_errors.txt\", self.exec_path)\n out = \"\"\n for f in files:\n out += f.read_text()\n return out\n\n def _assign_files(self, fns):\n \"\"\"Find for each variable the file storing it's data and the name of the netCDF variable.\n\n Parameters\n ----------\n fns : sequence\n Paths to netCDF files.\n\n Returns\n -------\n dict\n A dictionary keyed by variable storing the `RavenNcData` instance storing each variable's configuration\n information.\n \"\"\"\n ncvars = {}\n for fn in fns:\n if \".nc\" in fn.suffix:\n with xr.open_dataset(fn) as ds:\n for var, alt_names in self._variable_names.items():\n # Check that the emulator is expecting that variable.\n if var not in self.rvt.keys():\n continue\n\n # Check if any alternate variable name is in the file.\n for alt_name in alt_names:\n if alt_name in ds.data_vars:\n ncvars[var] = dict(\n var=var,\n path=fn,\n var_name=alt_name,\n dimensions=ds[alt_name].dims,\n units=ds[alt_name].attrs.get(\"units\"),\n )\n if \"GRIB_stepType\" in ds[alt_name].attrs:\n ncvars[var][\"deaccumulate\"] = (\n ds[alt_name].attrs[\"GRIB_stepType\"] == \"accum\"\n )\n break\n return ncvars\n\n def _get_output(self, pattern, path):\n \"\"\"Match actual output files to known expected files.\n\n Return a dictionary of file paths for each expected input.\n \"\"\"\n files = list(path.rglob(pattern))\n\n if len(files) == 0:\n if not (isinstance(self.rvi, RVI) and self.rvi.suppress_output):\n raise UserWarning(\"No output files for {} in {}.\".format(pattern, path))\n\n return [f.absolute() for f in files]\n\n @staticmethod\n def start_end_date(fns):\n \"\"\"Return the common starting and ending date and time of netCDF files.\n\n Parameters\n ----------\n fns : sequence\n Sequence of netCDF file names for forcing data.\n\n Returns\n -------\n start : datetime\n The first datetime of the forcing files.\n end : datetime\n The last datetime of the forcing files.\n \"\"\"\n\n ds = xr.open_mfdataset(fns, combine=\"by_coords\")\n return ds.indexes[\"time\"][0], ds.indexes[\"time\"][-1]\n\n @staticmethod\n def get_calendar(fns):\n \"\"\"Return the calendar.\"\"\"\n ds = xr.open_mfdataset(fns, combine=\"by_coords\")\n return ds.time.encoding.get(\"calendar\", \"standard\")\n\n def set_calendar(self, ts):\n \"\"\"Set the calendar in the RVI configuration.\"\"\"\n self.rvi.calendar = self.get_calendar(ts)\n\n def handle_date_defaults(self, ts):\n # Get start and end date from file\n start, end = self.start_end_date(ts)\n\n rvi = self.rvi\n if rvi.start_date in [None, dt.datetime(1, 1, 1)]:\n rvi.start_date = start\n\n if rvi.end_date in [None, dt.datetime(1, 1, 1)]:\n rvi.end_date = end\n\n @property\n def rvs(self):\n return self._rvs\n\n @property\n def q_sim(self):\n \"\"\"Return a view of the hydrograph time series.\n\n This view will be overwritten by successive calls to `run`. To make a copy of this DataArray that will\n persist in memory, use `q_sim.copy(deep=True)`.\n \"\"\"\n if isinstance(self.hydrograph, list):\n return [h.q_sim for h in self.hydrograph]\n\n return self.hydrograph.q_sim\n\n @property\n def hydrograph(self):\n \"\"\"Return a view of the current output file.\n\n If the model is run multiple times, hydrograph will point to the latest version. To store the results of\n multiple runs, either create different model instances or explicitly copy the file to another disk location.\n \"\"\"\n if self.outputs[\"hydrograph\"].suffix == \".nc\":\n return xr.open_dataset(self.outputs[\"hydrograph\"])\n elif self.outputs[\"hydrograph\"].suffix == \".zip\":\n return [xr.open_dataset(fn) for fn in self.ind_outputs[\"hydrograph\"]]\n else:\n raise ValueError\n\n @property\n def storage(self):\n if self.outputs[\"storage\"].suffix == \".nc\":\n return xr.open_dataset(self.outputs[\"storage\"])\n elif self.outputs[\"storage\"].suffix == \".zip\":\n return [xr.open_dataset(fn) for fn in self.ind_outputs[\"storage\"]]\n else:\n raise ValueError\n\n @property\n def solution(self):\n if self.outputs[\"solution\"].suffix == \".rvc\":\n return parse_solution(self.outputs[\"solution\"].read_text())\n elif self.outputs[\"solution\"].suffix == \".zip\":\n return [\n parse_solution(fn.read_text()) for fn in self.ind_outputs[\"solution\"]\n ]\n\n def get_final_state(self, hru_index=1, basin_index=1):\n \"\"\"Return model state at the end of simulation.\n\n Parameters\n ----------\n hru_index : None, int\n Set index value or None to get all HRUs.\n basin_index : None, int\n Set index value or None to get all basin states.\n \"\"\"\n solution = self.solution\n if isinstance(solution, dict):\n return get_states(solution, hru_index, basin_index)\n else:\n return zip(*[get_states(s, hru_index, basin_index) for s in solution])\n\n @property\n def diagnostics(self):\n diag = []\n for fn in self.ind_outputs[\"diagnostics\"]:\n with open(fn) as f:\n reader = csv.reader(f.readlines())\n header = next(reader)\n content = next(reader)\n\n out = dict(zip(header, content))\n out.pop(\"\")\n\n for key, val in out.items():\n if \"DIAG\" in key:\n out[key] = float(val)\n diag.append(out)\n\n return diag if len(diag) > 1 else diag[0]\n\n @property\n def tags(self):\n \"\"\"Return a list of tags within the templates.\"\"\"\n out = []\n for rvf in self.rvfiles.values():\n out.extend(rvf.tags)\n\n return out\n\n @staticmethod\n def split_ext(fn):\n \"\"\"Return the name and rv key of the configuration file.\"\"\"\n if isinstance(fn, str):\n fn = Path(fn)\n\n return fn.stem, fn.suffix[1:]\n\n def check_units(self):\n \"\"\"Check that the input file units match expectations.\"\"\"\n for var, nc in self.rvt.items():\n if isinstance(nc, RavenNcData) and nc.var is not None:\n nc._check_units()\n\n def check_inputs(self):\n \"\"\"Check that necessary variables are defined.\"\"\"\n has_file = {key for key, val in self.rvt.items() if val is not None}\n vars = list(self.rvt.keys())\n\n for var in vars:\n if var not in has_file and var != \"nc_index\":\n if var in [\"tasmin\", \"tasmax\"] and \"tas\" in has_file:\n pass # This is OK\n if var == \"tas\" and has_file.issuperset([\"tasmin\", \"tasmax\"]):\n pass\n elif var in [\"prsn\"]:\n pass # Ok, can be guessed from temp ?\n elif var in [\"evspsbl\"]:\n pass # Ok, can be computed by Oudin ?\n elif var in [\"water_volume_transport_in_river_channel\"]:\n pass # Ok, not strictly necessary for simulations ?\n else:\n raise ValueError(\"{} not found in files.\".format(var))\n\n\nclass Ostrich(Raven):\n \"\"\"Wrapper for OSTRICH calibration of RAVEN hydrological model.\n\n This class is used to calibrate RAVEN model using OSTRICH from user-provided configuration files. It can also be\n subclassed with configuration templates for emulated models, allowing direct calls to the models.\n\n Parameters\n ----------\n conf:\n The rv configuration files + Ostrict ostIn.txt.\n tpl:\n The Ostrich templates.\n\n Examples\n --------\n >>> r = Ostrich('/tmp/testdir')\n >>> r.configure()\n \"\"\"\n\n identifier = \"generic-ostrich\"\n _rvext = (\"rvi\", \"rvp\", \"rvc\", \"rvh\", \"rvt\", \"txt\")\n txt = RV()\n\n @property\n def model_path(self):\n return self.exec_path / self.model_dir\n\n @staticmethod\n def _allowed_extensions():\n return Raven._allowed_extensions() + (\"txt\",)\n\n @property\n def ostrich_cmd(self):\n \"\"\"OSTRICH executable path.\"\"\"\n return self.exec_path / \"ostrich\"\n\n @property\n def cmd(self):\n \"\"\"OSTRICH executable path.\"\"\"\n return self.ostrich_cmd\n\n @property\n def cmd_path(self):\n \"\"\"This is the main executable.\"\"\"\n return self.exec_path\n\n @property\n def proc_path(self):\n \"\"\"Path to Ostrich parallel process directory.\"\"\"\n return self.exec_path / \"processor_0\" # /'model' / 'output' ?\n\n def write_save_best(self):\n fn = self.exec_path / \"save_best.sh\"\n fn.write_text(save_best)\n make_executable(fn)\n\n def write_ostrich_runs_raven(self):\n fn = self.exec_path / \"ostrich-runs-raven.sh\"\n fn.write_text(ostrich_runs_raven.format(name=self.name))\n make_executable(fn)\n\n def setup(self, overwrite=False):\n \"\"\"Create directory structure to store model input files, executable, and output results.\n\n Model configuration files and time series inputs are stored directly in the working directory.\n At each Ostrich loop, configuration files (original and created from templates are copied into model).\n \"\"\"\n Raven.setup(self, overwrite)\n\n os.makedirs(str(self.final_path), exist_ok=True)\n\n self.write_ostrich_runs_raven()\n self.write_save_best()\n\n # Create symbolic link to executable\n os.symlink(self.ostrich_exec, str(self.cmd))\n\n def parse_results(self):\n \"\"\"Store output files in the self.outputs dictionary.\"\"\"\n # Output files default names. The actual output file names will be composed of the run_name and the default\n # name.\n Raven.parse_results(self, path=self.final_path)\n\n patterns = {\n \"params_seq\": \"OstModel?.txt\",\n \"calibration\": \"OstOutput?.txt\",\n }\n\n # Store output file names in dict\n for key, pattern in patterns.items():\n fns = self._get_output(pattern, path=self.exec_path)\n if len(fns) == 1:\n fns = fns[0]\n self.outputs[key] = fns\n\n try:\n self.outputs[\"calibparams\"] = \", \".join(map(str, self.calibrated_params))\n except AttributeError:\n err = self.parse_errors()\n raise UserWarning(err)\n\n def parse_errors(self):\n try:\n raven_err = self._get_output(\"OstExeOut.txt\", path=self.exec_path)[\n 0\n ].read_text()\n except UserWarning: # Read in processor_0 directory instead.\n try:\n raven_err = self._get_output(\"OstExeOut.txt\", path=self.proc_path)[\n 0\n ].read_text()\n except UserWarning:\n raven_err = \"\"\n\n try:\n ost_err = self._get_output(\"OstErrors?.txt\", path=self.exec_path)[\n 0\n ].read_text()\n except UserWarning: # Read in processor_0 directory instead.\n ost_err = self._get_output(\"OstErrors?.txt\", path=self.proc_path)[\n 0\n ].read_text()\n\n return f\"{ost_err}\\n{raven_err}\"\n\n def parse_optimal_parameter_set(self):\n \"\"\"Return dictionary of optimal parameter set.\"\"\"\n import re\n\n txt = open(self.outputs[\"calibration\"]).read()\n ops = re.search(r\".*Optimal Parameter Set(.*?)\\n{2}\", txt, re.DOTALL).groups()[\n 0\n ]\n\n p = re.findall(r\"(\\w+)\\s*:\\s*([\\S]+)\", ops)\n return OrderedDict((k, float(v)) for k, v in p)\n\n def ost2raven(self, ops):\n \"\"\"Return model parameters.\n\n Notes\n -----\n This method should be subclassed by emulators for which Ostrich has different parameters than the original\n Raven model.\n \"\"\"\n if hasattr(self, \"params\"):\n n = len(self.params._fields)\n pattern = \"par_x{}\" if n < 8 else \"par_x{:02}\"\n names = [pattern.format(i + 1) for i in range(n)]\n return self.params(*(ops[n] for n in names))\n else:\n return ops.values()\n\n @property\n def calibrated_params(self):\n \"\"\"The dictionary of optimal parameters estimated by Ostrich.\"\"\"\n ops = self.parse_optimal_parameter_set()\n return self.ost2raven(ops)\n\n @property\n def obj_func(self):\n return np.loadtxt(self.outputs[\"params_seq\"], skiprows=1)[-1, 1]\n\n @property\n def optimized_parameters(self):\n \"\"\"These are the raw parameters returned by Ostrich.\"\"\"\n return np.loadtxt(self.outputs[\"params_seq\"], skiprows=1)[-1, 2:]\n\n\ndef get_diff_level(files):\n \"\"\"Return the lowest hierarchical file parts level at which there are differences among file paths.\"\"\"\n\n for i, parts in enumerate(zip(*[f.parts for f in files])):\n if len(set(parts)) > 1:\n return i\n\n\ndef make_executable(fn):\n \"\"\"Make file executable.\"\"\"\n st = os.stat(fn)\n os.chmod(fn, st.st_mode | stat.S_IEXEC)\n\n\n# TODO: Configure this according to the model_path and output_path.\nsave_best = \"\"\"#!/bin/bash\n\nset -e\n\ncp ./model/*.rv? ../../final/\ncp ./model/output/* ../../final/\n\nexit 0\n\"\"\"\n\n# TODO: Configure this according to raven_cmd, name and output_path.\nostrich_runs_raven = \"\"\"\n#!/bin/bash\n\nset -e\n\ncp ./*.rv? model/\n\n./model/raven ./model/{name} -o ./model/output/\n\nexit 0\n\"\"\"\n" ]
[ [ "numpy.atleast_1d", "numpy.atleast_2d", "numpy.loadtxt" ] ]
mdnls/tramp
[ "5b8be12df2003ba1364e8a5a83fe597558c848c3" ]
[ "examples/glm/data/compressed_sensing_ep_vs_se.py" ]
[ "import logging\nimport numpy as np\nfrom tramp.models import glm_generative\nfrom tramp.experiments import save_experiments, BayesOptimalScenario\nfrom tramp.algos import EarlyStopping, EarlyStoppingEP\n\ndef run_cs(N, alpha, ensemble_type, prior_rho):\n model = glm_generative(\n N=N, alpha=alpha, ensemble_type=ensemble_type, \n prior_type=\"gauss_bernoulli\", output_type=\"gaussian\",\n prior_rho=prior_rho, output_var=1e-11\n )\n scenario = BayesOptimalScenario(model, x_ids=[\"x\"])\n early = EarlyStopping()\n records = scenario.run_all(\n metrics=[\"mse\"], max_iter=200, callback=early\n )\n return records\n\nif __name__==\"__main__\":\n csv_file = __file__.replace(\".py\", \".csv\")\n logging.basicConfig(level=logging.INFO)\n save_experiments(\n run_cs, csv_file, \n N=1000, ensemble_type=\"gaussian\",\n prior_rho=[0.25, 0.50, 0.75], alpha=np.linspace(0,1,50)[1:]\n )" ]
[ [ "numpy.linspace" ] ]
AndrewRook/polling_simulator
[ "df390b26bb7f2f6fd019813de1ab188489a68d61" ]
[ "tests/test_core.py" ]
[ "import numpy as np\nimport pandas as pd\n\nfrom polling_simulator import core\n\n\nclass TestVariable:\n\n def test_instantiates_ok(self):\n var = core.Variable(\"woo\", lambda x: np.ones(x))\n assert var.name == \"woo\"\n\n\nclass TestSegmentationVariable:\n def test_general_working(self):\n var1 = core.Variable(\"var1\", lambda x: np.ones(x))\n var2 = core.Variable(\"var2\", lambda x: np.ones(x))\n var3 = core.Variable(\"var3\", lambda x: np.ones(x))\n\n seg = (\n ((var1 > 3) & (var2 == 5)) |\n (\n (var1 == 10) &\n ((var2 < var1) | (var3 > 5))\n )\n )\n seg_variables = seg.variables\n assert len(seg_variables) == 3\n assert seg_variables[0] is var1\n assert seg_variables[1] is var2\n assert seg_variables[2] is var3\n\n\nclass TestSegmentationSegment:\n def test_general_working(self):\n var = core.Variable(\"var\", lambda x: np.ones(x))\n seg = (var >= 3)\n data = pd.DataFrame({\"var\": [1, 2, 3, 4, 5]})\n segment_mask = seg.segment(data)\n pd.testing.assert_series_equal(\n segment_mask,\n pd.Series([False, False, True, True, True], name=\"var\")\n )\n\n def test_multiple_segments(self):\n var1 = core.Variable(\"var1\", lambda x: np.ones(x))\n var2 = core.Variable(\"var2\", lambda x: np.ones(x))\n seg1 = var1 >= 3\n seg2 = var2 < 5\n seg = seg1 & seg2\n data = pd.DataFrame({\n \"var1\": [1, 2, 3, 4, 5],\n \"var2\": [1, 5, 1, 5, 1]\n })\n\n segment_mask = seg.segment(data)\n pd.testing.assert_series_equal(\n segment_mask,\n pd.Series([False, False, True, False, True])\n )\n\n def test_order_of_operation(self):\n data = pd.DataFrame({\n \"var1\": [1, 2, 3, 4, 5],\n \"var2\": [1, 5, 1, 5, 1]\n })\n var1 = core.Variable(\"var1\", lambda x: np.ones(x))\n var2 = core.Variable(\"var2\", lambda x: np.ones(x))\n seg1 = var1 >= 4\n seg2 = var2 < 5\n seg3 = (var1 == 2)\n\n seg_explicit_order = (seg3 | seg1) & seg2\n segment_explicit_order_mask = seg_explicit_order.segment(data)\n pd.testing.assert_series_equal(\n segment_explicit_order_mask,\n pd.Series([False, False, False, False, True])\n )\n\n seg_implicit_order = seg3 | seg1 & seg2\n segment_implicit_order_mask = seg_implicit_order.segment(data)\n pd.testing.assert_series_equal(\n segment_implicit_order_mask,\n pd.Series([False, True, False, False, True])\n )\n\nclass TestSegmentationStr:\n def test_works_complex_case(self):\n var1 = core.Variable(\"var1\", lambda x: np.ones(x))\n var2 = core.Variable(\"var2\", lambda x: np.ones(x))\n\n seg1 = var1 >= 4\n seg2 = var2 != var1\n seg3 = seg2 == False\n final_seg = seg1 & (seg2 | seg3)\n assert str(final_seg) == \"(var1 >= 4) & ((var2 != var1) | ((var2 != var1) == False))\"" ]
[ [ "pandas.Series", "pandas.DataFrame", "numpy.ones" ] ]
bmahlbrand/DeepSDF
[ "a55416a995ae1c918f18e32681d262853b496788" ]
[ "deep_sdf/mesh.py" ]
[ "#!/usr/bin/env python3\n# Copyright 2004-present Facebook. All Rights Reserved.\n\nimport logging\nimport numpy as np\nimport plyfile\nimport skimage.measure\nimport time\nimport torch\n\nimport deep_sdf.utils\n\n\ndef create_mesh(decoder, latent_vec, filename, N=256, max_batch=32 ** 3):\n start = time.time()\n ply_filename = filename\n\n decoder.eval()\n\n # NOTE: the voxel_origin is actually the (bottom, left, down) corner, not the middle\n voxel_origin = [-1, -1, -1]\n voxel_size = 2.0 / (N - 1)\n\n overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor())\n samples = torch.zeros(N ** 3, 4)\n\n # transform first 3 columns\n # to be the x, y, z index\n samples[:, 2] = overall_index % N\n samples[:, 1] = (overall_index.long() / N) % N\n samples[:, 0] = ((overall_index.long() / N) / N) % N\n\n # transform first 3 columns\n # to be the x, y, z coordinate\n samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2]\n samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1]\n samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0]\n\n num_samples = N ** 3\n\n samples.requires_grad = False\n\n head = 0\n\n while head < num_samples:\n sample_subset = samples[head : min(head + max_batch, num_samples), 0:3].cuda()\n\n samples[head : min(head + max_batch, num_samples), 3] = (\n deep_sdf.utils.decode_sdf(decoder, latent_vec, sample_subset).squeeze(1).detach().cpu()\n )\n head += max_batch\n\n sdf_values = samples[:, 3]\n sdf_values = sdf_values.reshape(N, N, N)\n\n end = time.time()\n print(\"sampling takes: %f\" % (end - start))\n\n convert_sdf_samples_to_ply(\n sdf_values.data.cpu(), voxel_origin, voxel_size, ply_filename + \".ply\"\n )\n\n\ndef convert_sdf_samples_to_ply(\n pytorch_3d_sdf_tensor, voxel_grid_origin, voxel_size, ply_filename_out\n):\n \"\"\"\n Convert sdf samples to .ply\n\n :param pytorch_3d_sdf_tensor: a torch.FloatTensor of shape (n,n,n)\n :voxel_grid_origin: a list of three floats: the bottom, left, down origin of the voxel grid\n :voxel_size: float, the size of the voxels\n :ply_filename_out: string, path of the filename to save to\n\n This function adapted from: https://github.com/RobotLocomotion/spartan\n \"\"\"\n start_time = time.time()\n\n numpy_3d_sdf_tensor = pytorch_3d_sdf_tensor.numpy()\n\n verts, faces, normals, values = skimage.measure.marching_cubes_lewiner(\n numpy_3d_sdf_tensor, level=0.0, spacing=[voxel_size] * 3\n )\n\n # transform from voxel coordinates to camera coordinates\n # note x and y are flipped in the output of marching_cubes\n mesh_points = np.zeros_like(verts)\n mesh_points[:, 0] = voxel_grid_origin[0] + verts[:, 0]\n mesh_points[:, 1] = voxel_grid_origin[1] + verts[:, 1]\n mesh_points[:, 2] = voxel_grid_origin[2] + verts[:, 2]\n\n # try writing to the ply file\n\n num_verts = verts.shape[0]\n num_faces = faces.shape[0]\n\n verts_tuple = np.zeros((num_verts,), dtype=[(\"x\", \"f4\"), (\"y\", \"f4\"), (\"z\", \"f4\")])\n\n for i in range(0, num_verts):\n verts_tuple[i] = tuple(mesh_points[i, :])\n\n faces_building = []\n for i in range(0, num_faces):\n faces_building.append(((faces[i, :].tolist(),)))\n faces_tuple = np.array(faces_building, dtype=[(\"vertex_indices\", \"i4\", (3,))])\n\n el_verts = plyfile.PlyElement.describe(verts_tuple, \"vertex\")\n el_faces = plyfile.PlyElement.describe(faces_tuple, \"face\")\n\n ply_data = plyfile.PlyData([el_verts, el_faces])\n logging.debug(\"saving mesh to %s\" % (ply_filename_out))\n ply_data.write(ply_filename_out)\n\n logging.debug(\n \"converting to ply format and writing to file took {} s\".format(time.time() - start_time)\n )\n" ]
[ [ "torch.LongTensor", "torch.zeros", "numpy.zeros_like", "numpy.array", "numpy.zeros" ] ]
raoulbia/Python-for-Data-Science
[ "5a25b1c734f48e77c652a1bfd68513fbd2e45891" ]
[ "Tutorials/Exercise Solution.py" ]
[ "# Import numpy\nimport numpy as np\n\n# store the variables in arrays\nprob = np.array([0.25, 0.5, 0.25])\nrate_1 = np.array([0.05, 0.075, 0.10])\nrate_2 = np.array([0.2, 0.15, 0.1])\n\n# expected return of each investment\nexpected_return1 = np.sum(prob * rate_1)\nexpected_return2 = np.sum(prob * rate_2)\n\n# expected return of the equally weighted portfolio\nweights = np.array([0.5, 0.5])\nindividual_returns = np.array([expected_return1, expected_return2])\nportfolio_returns = np.dot(weights, individual_returns)\n\n# covariance matrix given probabilities\ncov_matrix = np.cov(rate_1, rate_2, ddof=0, aweights=prob)\n\n# variance and standard deviation of each investment\nvar1 = cov_matrix[0,0]\nvar2 = cov_matrix[1,1]\nstd1 = np.sqrt(var1)\nstd2 = np.sqrt(var2)\n\n# correlation between Asset 1 & 2's returns\ncov = cov_matrix[0,1]\ncorr = cov / (std1 * std2)\n\n# variance of portfolio\nportfolio_var = np.dot(weights.T, np.dot(cov_matrix, weights))\n\n# standard deviation (volatility of the portfolio)\nportfolio_vols = np.sqrt(portfolio_var)\n\ndef percentage (number):\n return str(round(number, 4) * 100) + '%'\n\nprint('Expected Return of Investment 1 = {}'.format(percentage(expected_return1)))\nprint('Expected Return of Investment 2 = {}'.format(percentage(expected_return2)))\nprint('Expected Return of Portfolio = {}'.format(percentage(portfolio_returns)))\nprint('Standard Deviation of Investment 1 = {}'.format(percentage(std1)))\nprint('Standard Deviation of Investment 1 = {}'.format(percentage(std2)))\nprint('Correlation between Returns of 1 & 2 = {}'.format(round(corr, 4)))\nprint('Risk of Portfilio = {}'.format(percentage(portfolio_vols)))\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.cov", "numpy.array", "numpy.sum" ] ]
zhuyuanxiang/deep-learning-with-python-notebooks
[ "6b6b5670193f5a26321c36de3b547203e30dc8c7" ]
[ "ch03/ch0306_regression.py" ]
[ "# -*- encoding: utf-8 -*- \n\"\"\"\n@Author : zYx.Tom\n@Contact : 526614962@qq.com\n@site : https://zhuyuanxiang.github.io\n---------------------------\n@Software : PyCharm\n@Project : deep-learning-with-python-notebooks\n@File : ch0306_regression.py\n@Version : v0.1\n@Time : 2019-11-18 11:14\n@License : (C)Copyright 2018-2019, zYx.Tom\n@Reference : 《Python 深度学习,Francois Chollet》, Sec0306,P66\n@Desc : 神经网络入门,神经网络解决\n\"\"\"\nimport os\nimport sys\nimport sklearn\nimport winsound\nimport numpy as np # pip install numpy<1.17,小于1.17就不会报错\nimport matplotlib.pyplot as plt\nfrom keras.datasets import boston_housing\nfrom keras import models\nfrom keras import layers\nfrom keras import activations\nfrom keras import optimizers\nfrom keras import losses\nfrom keras import metrics\n\n# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n# 设置数据显示的精确度为小数点后3位\nnp.set_printoptions(precision = 3, suppress = True, threshold = np.inf, linewidth = 200)\n# to make this notebook's output stable across runs\nseed = 42\nnp.random.seed(seed)\n# Python ≥3.5 is required\nassert sys.version_info >= (3, 5)\n# Scikit-Learn ≥0.20 is required\nassert sklearn.__version__ >= \"0.20\"\n# numpy 1.16.4 is required\nassert np.__version__ in [\"1.16.5\", \"1.16.4\"]\n\n# 3.6.1 Boston 房价数据集\nprint(\"* Code 3-24 加载数据集...\")\n(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()\nprint(\"\\t训练数据集(train_data):404 条数据;测试数据集(test_data):102 条数据\")\nprint(\"\\t\\tlen(train_data) =\", len(train_data))\nprint(\"\\t\\tlen(test_data) =\", len(test_data))\nprint(\"\\t数据集中每条数据有13个特征,每个特征值为一个实数\")\nprint(\"\\t\\tlen(train_data[0]) =\", len(train_data[0]))\nprint(\"\\t\\tlen(train_data[1]) =\", len(train_data[1]))\nprint(\"\\t\\ttrain_data[0] =\", train_data[0])\nprint(\"\\t每条标签对应具体的房价\")\nprint(\"\\t\\ttrain_targets[0] =\", train_targets[0])\nprint(\"\\t\\ttrain_targets[1] =\", train_targets[1])\n\n# 3.6.2 准备数据\nprint(\"* Code 3-25:数据标准化(零均值,单位方差)\")\nmean = train_data.mean(axis = 0)\ntrain_data -= mean\nstd = train_data.std(axis = 0)\ntrain_data /= std\n\ntest_data -= mean\ntest_data /= std\n\n\n# 3.6.3 构建网络\n# Code 3-26:模型定义\n# MSE(Mean Squared Error,均方误差):预测值与目标值之差的平方,回归问题常用的损失函数\n# MAE(Mean Absolute Error,平均绝对误差):预测值与目标值之差的绝对值。\ndef build_model():\n model = models.Sequential()\n model.add(layers.Dense(64, activation = activations.relu, input_shape = (train_data.shape[1],)))\n model.add(layers.Dense(64, activation = activations.relu))\n model.add(layers.Dense(1))\n model.compile(optimizer = optimizers.rmsprop(lr = 0.001),\n loss = losses.mse, metrics = [metrics.mae])\n return model\n\n\n# 3.6.4 K 折验证\nprint(\"* Code 3-27:K 折验证(保存每折的验证结果)\")\nk = 4\nnum_val_samples = len(train_data) // k\nnum_epochs = 500\nall_mae_histories = []\nall_scores = []\nfor i in range(k):\n print(\"\\tprocessing fold #\", i)\n\n val_start_index = i * num_val_samples\n val_end_index = (i + 1) * num_val_samples\n val_data = train_data[val_start_index: val_end_index]\n val_targets = train_targets[val_start_index: val_end_index]\n\n partial_train_data = np.concatenate([train_data[:val_start_index], train_data[val_end_index:]])\n partial_train_targets = np.concatenate([train_targets[:val_start_index], train_targets[val_end_index:]])\n\n model = build_model()\n history = model.fit(partial_train_data, partial_train_targets,\n validation_data = (val_data, val_targets),\n epochs = num_epochs, batch_size = 1, verbose = 0, use_multiprocessing = True)\n mae_history = history.history['val_mean_absolute_error']\n all_mae_histories.append(mae_history)\n val_mse, val_mae = model.evaluate(val_data, val_targets, verbose = 0)\n all_scores.append(val_mae)\n pass\nprint(\"\\t四轮交叉验证对测试集验证的MAE:\")\nprint(all_scores)\nprint(\"\\t四轮交叉验证对测试集验证的MAE的平均值:{}\".format(np.mean(all_scores)))\n\nprint(\"* Code 3-29:计算所有轮次中的 K 折验证分数平均值\")\naverage_mae_history = [np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]\nprint(\"* Code 3-30:绘制验证分数\")\nplt.figure()\nplt.plot(range(1, len(average_mae_history) + 1), average_mae_history)\nplt.xlabel(\"迭代次数\")\nplt.ylabel(\"验证集的平均MAE\")\n\n\n# 将每个数据点替换为前面数据点的指数移动平均值,以得到光滑的曲线\ndef smooth_curve(points, factor = 0.9):\n smoothed_points = []\n for point in points:\n if smoothed_points:\n previous = smoothed_points[-1]\n smoothed_points.append((previous * factor + point * (1 - factor)))\n else:\n smoothed_points.append(point)\n pass\n pass\n return smoothed_points\n\n\nprint(\"* Code 3-31:绘制验证分数(删除前10个数据点)\")\nsmooth_mae_history = smooth_curve(average_mae_history[10:])\nplt.figure()\nplt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)\nplt.xlabel(\"迭代次数\")\nplt.ylabel(\"验证集的平均MAE\")\n\n# 运行结束的提醒\nwinsound.Beep(600, 500)\nif len(plt.get_fignums()) != 0:\n plt.show()\npass\n" ]
[ [ "numpy.random.seed", "matplotlib.pyplot.figure", "numpy.set_printoptions", "numpy.concatenate", "numpy.mean", "matplotlib.pyplot.get_fignums", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
christofs/romstat
[ "6ef7260290aa93a86e651ab09f8ce262f92e1331" ]
[ "code/viz_alle_umfang.py" ]
[ "import re\nfrom os.path import join\nimport glob\nimport os\nimport pandas as pd\nfrom datetime import date\nimport pygal\nfrom pygal.style import BlueStyle\nfrom pygal.style import DarkGreenBlueStyle\nfrom pygal.style import TurquoiseStyle\nfrom pygal.style import CleanStyle\nfrom collections import Counter\nfrom pygal.style import LightenStyle\n\n\ndef get_data(): \n\twith open(\"../data/romanistik-stellen_datensatz_2014-2021.csv\", \"r\", encoding=\"utf8\") as infile: \n\t\tdata = pd.read_csv(infile, sep=\"\\t\")\n\t\t#print(data.head())\n\t\treturn data\n\n\ndef prepare_data(data): \n\t# Filter down to useable data\n\tdata = data.fillna(0)\n\tdata = data.loc[:,[\"include\", \"umfang_prozent\"]]\n\tdata = data[data[\"include\"] == 1]\n\tdata = data[data[\"umfang_prozent\"] != \"N/A\"]\n\tprint(data.head())\n\tn = data.shape[0]\n\tprint(\"Anzahl der Datenpunkte\", n)\n\tfrom collections import Counter\n\tdata = dict(Counter(list(data.loc[:,\"umfang_prozent\"])))\n\tprint(data)\n\treturn data,n\n\n\ndef viz(data,n): \n\tdark_lighten_style = LightenStyle('#063d1e',\n\t\tstep=10, \n\t\tfont_family=\"FreeSans\",\n\t\tlabel_font_size = 12,\n\t\tmajor_label_font_size = 12,\n\t\tvalue_label_font_size = 12,\n\t\tvalue_font_size = 12,\n\t\ttitle_font_size = 16)\n\tchart = pygal.HorizontalBar(\n\t\tstyle=dark_lighten_style,\n\t\tprint_values = True,\n\t\tshow_legend = False,\n \tlegend_at_bottom = True,\n\t\tlegend_at_bottom_columns = 7,\n\t\tlegend_box_size=40)\n\tchart.title = \"Stellenumfang\"\n\tchart.x_title = \"Anteile des Stellenumfangs in Prozent (n=\"+str(n)+\")\"\n\tchart.y_title = \"Stellenumfang\"\n\tchart.x_labels = [\"100%\",\"90%\",\"80%\",\"75%\",\"65%\",\"50%\",\"25%\"]\n\tchart.add(\"Stellenumfang\", [data[100]/n*100,\n\t\t\t\t\t\t\t data[90]/n*100,\n\t\t\t\t\t\t\t data[80]/n*100,\n\t\t\t\t\t\t\t data[75]/n*100,\n\t\t\t\t\t\t\t data[65]/n*100,\n\t\t\t\t\t\t\t data[50]/n*100,\n\t\t\t\t\t\t\t data[25]/n*100,], formatter=lambda x: '{:.1f}%'.format(x))\n\tchart.render_to_file(\"../img/romanistik_alle-stellenumfang.svg\")\n\t\t\t\n\t\t\t\n\n\ndef main(): \n\tdata = get_data()\n\tdata,n = prepare_data(data)\n\tviz(data,n)\n\t\n\nmain()\t\n" ]
[ [ "pandas.read_csv" ] ]
s3bc40/tutorials
[ "8ecc11a0a7c4278f6d0b0c7f9273e63da3fb7780" ]
[ "script/modeling_tree.py" ]
[ "from sklearn.datasets import load_iris\nfrom sklearn import tree\nimport graphviz \n\n#Preprocessing\niris = load_iris()\nclf = tree.DecisionTreeClassifier()\nclf = clf.fit(iris.data, iris.target)\n\n#Modeling \ndot_data = tree.export_graphviz(clf, out_file=None, \n feature_names=iris.feature_names, \n class_names=iris.target_names, \n filled=True, rounded=True, \n special_characters=True) \ngraph = graphviz.Source(dot_data) \ngraph" ]
[ [ "sklearn.tree.DecisionTreeClassifier", "sklearn.datasets.load_iris", "sklearn.tree.export_graphviz" ] ]
zhengdd0422/VFNet
[ "b21e6635ffa318c0d8d81218ef47a92eac11bb4d" ]
[ "baseline_classify_VFG-564.py" ]
[ "import os\nimport numpy as np\nfrom sklearn import svm\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn import metrics\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import GridSearchCV\nimport argparse\nimport pickle\nfrom utils import *\nimport logging\nimport random as rn\nfrom keras import backend as K\n\nparser = argparse.ArgumentParser(description='ml_features_classifier')\nparser.add_argument('--feature', type=str, default='aac') # aac, dpc, ctd, pseaac1, pseaac2, all\nparser.add_argument('--classify', type=str, default='linearsvc') # LR, DT, RF, svm, linearsvc\nparser.add_argument('--file', type=str, default='VFG-564')\nparser.add_argument('--signal', type=int, default=13) # 13, 23, 33, 43, 53\nparser.add_argument('--split_cutoff', type=float, default=0.4) # 13, 23, 33, 43, 53\nargs = parser.parse_args()\nnp.random.seed(42)\nrn.seed(12345)\n\ndata_dir = os.getcwd() + \"/data/\"\nfeatures_dir = data_dir + args.file + \"_features/\"\nclass_name_dir = data_dir + args.file + \"_class_name\"\nclass_name = load_class_name(class_name_dir)\n\nrecord_dir = data_dir + args.file + \"_record/baseline/\"\nif not os.path.exists(record_dir):\n os.makedirs(record_dir)\n\nif args.feature == 'all':\n all_data = np.load(features_dir + \"aac_ml.npz\", allow_pickle=True)['data']\n all_labels = np.load(features_dir + \"aac_ml.npz\", allow_pickle=True)['labels']\n dpc_data = np.load(features_dir + \"dpc_ml.npz\", allow_pickle=True)['data']\n all_data = np.concatenate((all_data, dpc_data), axis=1)\n ctd_data = np.load(features_dir + \"ctd_ml.npz\", allow_pickle=True)['data']\n all_data = np.concatenate((all_data, ctd_data), axis=1)\n pseaac1_data = np.load(features_dir + \"pseaac1_ml.npz\", allow_pickle=True)['data']\n all_data = np.concatenate((all_data, pseaac1_data), axis=1)\n pseaac2_data = np.load(features_dir + \"pseaac2_ml.npz\", allow_pickle=True)['data']\n all_data = np.concatenate((all_data, pseaac2_data), axis=1)\nelse:\n all_data = np.load(features_dir + args.feature + \"_ml.npz\", allow_pickle=True)['data']\n all_labels = np.load(features_dir + args.feature + \"_ml.npz\", allow_pickle=True)['labels']\n\nmin_max_scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))\nall_data = min_max_scaler.fit_transform(all_data)\nall_labels = map(int, all_labels)\nrandom_state = np.random.RandomState(0)\n\nclass_weights_list = compute_class_weight('balanced', np.unique(all_labels), all_labels) # list\n# convet class_weights from list to dictionary\nclass_weights = {}\nfor i, eachclass in enumerate(class_weights_list):\n class_weights[i] = class_weights_list[i]\n#\n\nX_train, X_test, y_train, y_test, = train_test_split(all_data, all_labels, test_size=args.split_cutoff, stratify=all_labels, random_state=args.signal)\nprint('train number is {}\\ntest number is {}\\n'.format(len(X_train), len(X_test)))\n\nmodel = None\nif args.classify == 'svc':\n model = svm.SVC(random_state=random_state)\nelif args.classify == \"linearsvc\":\n model = LinearSVC(random_state=random_state)\nelif args.classify == 'RF':\n model = RandomForestClassifier(random_state=random_state, n_estimators=100)\nelif args.classify == 'LR':\n model = LogisticRegression(random_state=random_state)\nelif args.classify == 'DT':\n model = DecisionTreeClassifier(random_state=random_state)\nmodel.fit(X_train, y_train)\n\nsave_dir = record_dir + args.feature + \"_\" + args.classify + \"_split\" + str(args.split_cutoff) + \"_\"\nf = open(save_dir + str(args.signal) + '.txt', 'a')\nf_save_best_model_dir = save_dir + str(args.signal) + '_bestmodel'\n# pickle.dump(model, open(f_save_best_model_dir, 'ab')) # save model\n\ntrain_acc = model.score(X_train, y_train)\ntest_acc = model.score(X_test, y_test)\n\ny_pred = model.predict(X_test)\ntest_pred_labels = y_pred.tolist()\ntest_cla_report = classification_report(y_test, test_pred_labels, target_names=list(class_name))\nf.write('split: {}\\nTraining acc is {:.4f}\\nTest_acc is: {:.4f}\\nClassfication report:\\n{}\\n'.\n format(args.split_cutoff, train_acc, test_acc, test_cla_report))\nrecall_value = metrics.recall_score(y_test, test_pred_labels, average='micro')\nprecision_value = metrics.precision_score(y_test, test_pred_labels, average='micro')\nf1_score_value = metrics.f1_score(y_test, test_pred_labels, average='micro')\nrecall_value_2 = metrics.recall_score(y_test, test_pred_labels, average='macro')\nprecision_value_2 = metrics.precision_score(y_test, test_pred_labels, average='macro')\nf1_score_value_2 = metrics.f1_score(y_test, test_pred_labels, average='macro')\nf.write('Micro\\nprecision is: {:.4f}\\nRecall is: {:.4f}\\nF1_score is: {:.4f}\\n'.format(precision_value,\n recall_value,\n f1_score_value))\nf.write('Macro\\nprecision is: {:.4f}\\nRecall is: {:.4f}\\nF1_score is: {:.4f}\\n'.format(precision_value_2,\n recall_value_2, f1_score_value_2))\n\n# print(args.split_cutoff)\nprint(f1_score_value_2)\nf.close()\nprint(\"finished\\n\")\n" ]
[ [ "sklearn.linear_model.LogisticRegression", "numpy.random.seed", "numpy.unique", "sklearn.ensemble.RandomForestClassifier", "sklearn.metrics.precision_score", "sklearn.model_selection.train_test_split", "numpy.concatenate", "sklearn.tree.DecisionTreeClassifier", "sklearn.svm.SVC", "sklearn.svm.LinearSVC", "numpy.load", "sklearn.metrics.f1_score", "numpy.random.RandomState", "sklearn.metrics.recall_score", "sklearn.preprocessing.MinMaxScaler" ] ]
talhaadnan100/SklearncomPYre
[ "aa1c36c4d555be34eccd803546ec253cfdd0fd2c" ]
[ "SklearncomPYre/comparison_viz.py" ]
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# coding: utf-8\n\n\n\ndef comparison_viz(comparison, choice = \"accuracy\"):\n '''\n The purpose of the function is to help User visualize the comparison of accuracies or time given in the comparison\n dataframe. It takes in a dataframe with 7 attributes i.e. model name, training & test scores, model variance,\n and the time it takes to fit, predict model and total time taken for both fit and predict.\n\n The function outputs(saves in the root directory) a beautiful <a href=\"https://matplotlib.org\">matplotlib</a>\n bar chart comparison of different models' training and test scores or the time it takes to fit and predict.\n\n Parameters :\n\n Inputs:\n\n comparison : Dataframe with 7 columns\n - regressor or classifier name\n - training accuracy\n - test accuracy\n - model variance\n - time it takes to fit\n - time it takes to predict and\n - total time. Type: `pandas.Dataframe\n - Choice of `accuracy` or `time`. Defaults to `accuracy`. Type: `string`\n\n Outputs:\n - Bar chart of accuracies or time comparison by models. Type: `png`\n\n\tinspiration: https://matplotlib.org/gallery/statistics/barchart_demo.html\n '''\n\n\n ## Tests\n\n # Choice Type\n\n if type(choice) != str:\n raise TypeError(\"Choice must be of type string\")\n\n else:\n # enforce choice lowercase for consistency\n choice = choice.lower()\n\n # Choice value\n if (choice != 'time') and (choice != 'accuracy'):\n raise ValueError(\"Choice must either be 'time' or 'accuracy'\")\n\n\t# Comparison Type\n\n elif type(comparison) != pd.core.frame.DataFrame:\n raise TypeError(\"Comparison must be of type pandas.core.frame.DataFrame\")\n\n\t# Comparison Value Rows\n\n elif comparison.shape[0] < 1:\n raise ValueError(\"Comparison must at least have 1 row\")\n\n\t# Comparison Value Columns\n\n elif comparison.shape[1]!= 7:\n raise ValueError(\"Comparison must contain 7 columns (excluding index)\")\n\n # Comparison Models Column Type\n\n elif all(isinstance(m, str) for m in comparison.iloc[:,0].tolist()) != True:\n raise TypeError(\"Comparison Models column must only contain type string\")\n\n\t## Function\n\n\n else:\n if choice == 'accuracy':\n x = comparison.iloc[:,1]\n y = comparison.iloc[:,2]\n labels = ('Train Accuracy','Test Accuracy','Accuracy','Train and Test Accuracy by Model')\n else:\n x = comparison.iloc[:,4]\n y = comparison.iloc[:,5]\n labels = ('Fit Time','Predict Time','Time (s)','Fit and Predict Time by Model')\n\n \n n_models = comparison.shape[0]\n\n fig, ax = plt.subplots()\n\n index = np.arange(n_models)\n bar_width = 0.35\n opacity = 0.4\n\n rects1 = ax.bar(index, x,bar_width,\n alpha=opacity, color='b',\n label=labels[0])\n\n rects2 = ax.bar(index + bar_width, y, bar_width,\n alpha=opacity, color='r',\n label=labels[1])\n\n ax.set_xticklabels(comparison.iloc[:,0])\n ax.set_xlabel('Models')\n ax.set_ylabel(labels[2])\n ax.set_xticks(index + bar_width / 2)\n ax.set_title(labels[3])\n ax.legend()\n\n fig.tight_layout()\n\n plt.savefig('comparison.png', bbox_inches='tight')\n" ]
[ [ "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig" ] ]
reazulhoque/dpbench
[ "f1000a151814c656baa90d9c4d81b51bb5eb1fa9" ]
[ "native_dpcpp/blackscholes/GPU/base_bs_erf.py" ]
[ "# Copyright (C) 2017-2018 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport os, utils\nimport numpy as np\nfrom dpbench_datagen.blackscholes import gen_data_to_file, gen_rand_data\nfrom dpbench_python.blackscholes.bs_python import black_scholes_python\n\n# make xrange available in python 3\ntry:\n xrange\nexcept NameError:\n xrange = range\n\ndef ip_data_to_file(nopt):\n gen_data_to_file(nopt)\n\ndef gen_data_np(nopt):\n price, strike, t = gen_rand_data(nopt)\n return (price, strike, t,\n np.zeros(nopt, dtype=np.float64),\n -np.ones(nopt, dtype=np.float64))\n\nRISK_FREE = 0.1\nVOLATILITY = 0.2\n \n# create input data, call blackscholes computation function (alg)\ndef run(name, sizes=14, step=2, nopt=2**15):\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--steps', required=False, default=sizes, help=\"Number of steps\")\n parser.add_argument('--step', required=False, default=step, help=\"Factor for each step\")\n parser.add_argument('--size', required=False, default=nopt, help=\"Initial data size\")\n parser.add_argument('--repeat',required=False, default=1, help=\"Iterations inside measured region\")\n parser.add_argument('--usm', required=False, action='store_true', help=\"Use USM Shared or pure numpy\")\n parser.add_argument('--test', required=False, action='store_true', help=\"Check for correctness by comparing output with naieve Python version\")\n\t\n args = parser.parse_args()\n sizes= int(args.steps)\n step = int(args.step)\n nopt = int(args.size)\n repeat=int(args.repeat)\n\n clean_string = ['make', 'clean']\n utils.run_command(clean_string, verbose=True)\n\n if args.usm:\n build_string = ['make' ,'comp']\n utils.run_command(build_string, verbose=True)\n exec_name = \"./black_scholes_comp\"\n else:\n build_string = ['make']\n utils.run_command(build_string, verbose=True)\n exec_name = \"./black_scholes\"\n \n if args.test:\n #run sequential python\n price, strike, t, p_call, p_put = gen_data_np(nopt)\n black_scholes_python(nopt, price, strike, t, RISK_FREE, VOLATILITY, p_call, p_put)\n\n #run dpcpp\n ip_data_to_file(nopt)\n run_cmd = [exec_name, str(nopt), str(1), \"-t\"]\n utils.run_command(run_cmd, verbose=True)\n\n #read output of dpcpp into n_call, n_put\n n_call = np.fromfile(\"call.bin\", np.float64)\n\n #read output of dpcpp into n_call, n_put\n n_put = np.fromfile(\"put.bin\", np.float64)\n \n #compare outputs\n if np.allclose(n_call, p_call) and np.allclose(n_put, p_put):\n print(\"Test succeeded\\n\")\n else:\n print(\"Test failed\\n\") \n return\n\n if os.path.isfile('runtimes.csv'):\n os.remove('runtimes.csv')\n \n for i in xrange(sizes):\n # generate input data\n ip_data_to_file(nopt)\n\n # run the C program\n run_cmd = [exec_name, str(nopt), str(repeat)]\n utils.run_command(run_cmd, verbose=True)\n nopt *= step\n repeat -= step\n if repeat < 1:\n repeat = 1\n\nif __name__ == '__main__':\n run('Blackscholes dpcpp')\n" ]
[ [ "numpy.fromfile", "numpy.zeros", "numpy.allclose", "numpy.ones" ] ]
aleju/LossAccPlotter
[ "3c66a1be11117297d75275c3b099b2f108860ecb" ]
[ "laplotter.py" ]
[ "\"\"\"A class to generate plots for the results of applied loss functions and/or\naccuracy of models trained with machine learning methods.\n\nExample:\n plotter = LossAccPlotter()\n for epoch in range(100):\n loss_train, acc_train = your_model.train()\n loss_val, acc_val = your_model.validate()\n plotter.add_values(epoch,\n loss_train=loss_train, acc_train=acc_train,\n loss_val=loss_val, acc_val=acc_val)\n plotter.block()\n\nExample, no accuracy chart:\n plotter = LossAccPlotter(show_acc_plot=False)\n for epoch in range(100):\n loss_train = your_model.train()\n loss_val = your_model.validate()\n plotter.add_values(epoch, loss_train=loss_train, loss_val=loss_val)\n plotter.block()\n\nExample, update the validation line only every 10th epoch:\n plotter = LossAccPlotter(show_acc_plot=False)\n for epoch in range(100):\n loss_train = your_model.train()\n if epoch % 10 == 0:\n loss_val = your_model.validate()\n else:\n loss_val = None\n plotter.add_values(epoch, loss_train=loss_train, loss_val=loss_val)\n plotter.block()\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport warnings\nimport math\nfrom collections import OrderedDict\n\ndef ignore_nan_and_inf(value, label, x_index):\n \"\"\"Helper function that creates warnings on NaN/INF and converts them to None.\n Args:\n value: The value to check for NaN/INF.\n label: For which line the value was used (usually \"loss train\", \"loss val\", ...)\n This is used in the warning message.\n x_index: At which x-index the value was used (e.g. 1 as in Epoch 1).\n This is used in the warning message.\n Returns:\n value, but None if value is NaN or INF.\n \"\"\"\n if value is None:\n return None\n elif math.isnan(value):\n warnings.warn(\"Got NaN for value '%s' at x-index %d\" % (label, x_index))\n return None\n elif math.isinf(value):\n warnings.warn(\"Got INF for value '%s' at x-index %d\" % (label, x_index))\n return None\n else:\n return value\n\nclass LossAccPlotter(object):\n \"\"\"Class to plot loss and accuracy charts (for training and validation data).\"\"\"\n def __init__(self,\n title=None,\n save_to_filepath=None,\n show_regressions=True,\n show_averages=True,\n show_loss_plot=True,\n show_acc_plot=True,\n show_plot_window=True,\n x_label=\"Epoch\"):\n \"\"\"Constructs the plotter.\n\n Args:\n title: An optional title which will be shown at the top of the\n plot. E.g. the name of the experiment or some info about it.\n If set to None, no title will be shown. (Default is None.)\n save_to_filepath: The path to a file in which the plot will be saved,\n e.g. \"/tmp/last_plot.png\". If set to None, the chart will not be\n saved to a file. (Default is None.)\n show_regressions: Whether or not to show a regression, indicating\n where each line might end up in the future.\n show_averages: Whether to plot moving averages in the charts for\n each line (so for loss train, loss val, ...). This value may\n only be True or False. To change the interval (default is 20\n epochs), change the instance variable \"averages_period\" to the new\n integer value. (Default is True.)\n show_loss_plot: Whether to show the chart for the loss values. If\n set to False, only the accuracy chart will be shown. (Default\n is True.)\n show_acc_plot: Whether to show the chart for the accuracy value. If\n set to False, only the loss chart will be shown. (Default is True.)\n show_plot_window: Whether to show the plot in a window (True)\n or hide it (False). Hiding it makes only sense if you\n set save_to_filepath. (Default is True.)\n x_label: Label on the x-axes of the charts. Reasonable choices\n would be: \"Epoch\", \"Batch\" or \"Example\". (Default is \"Epoch\".)\n \"\"\"\n assert show_loss_plot or show_acc_plot\n assert save_to_filepath is not None or show_plot_window\n\n self.title = title\n self.title_fontsize = 14\n self.show_regressions = show_regressions\n self.show_averages = show_averages\n self.show_loss_plot = show_loss_plot\n self.show_acc_plot = show_acc_plot\n self.show_plot_window = show_plot_window\n self.save_to_filepath = save_to_filepath\n self.x_label = x_label\n\n # alpha values\n # 0.8 = quite visible line\n # 0.5 = moderately visible line\n # thick is used for averages and regression (also for the main values,\n # if there are no averages),\n # thin is used for the main values\n self.alpha_thick = 0.8\n self.alpha_thin = 0.5\n\n # the interval for the moving averages, e.g. 20 = average over 20 epochs\n self.averages_period = 20\n\n # these values deal with the regression\n self.poly_forward_perc = 0.1\n self.poly_backward_perc = 0.2\n self.poly_n_forward_min = 5\n self.poly_n_backward_min = 10\n self.poly_n_forward_max = 100\n self.poly_n_backward_max = 100\n self.poly_degree = 1\n\n # whether to show grids in both charts\n self.grid = True\n\n # the styling of the lines\n # sma = simple moving average\n self.linestyles = {\n \"loss_train\": \"r-\",\n \"loss_train_sma\": \"r-\",\n \"loss_train_regression\": \"r:\",\n \"loss_val\": \"b-\",\n \"loss_val_sma\": \"b-\",\n \"loss_val_regression\": \"b:\",\n \"acc_train\": \"r-\",\n \"acc_train_sma\": \"r-\",\n \"acc_train_regression\": \"r:\",\n \"acc_val\": \"b-\",\n \"acc_val_sma\": \"b-\",\n \"acc_val_regression\": \"b:\"\n }\n # different linestyles for the first epoch (if only one value is available),\n # because no line can then be drawn (needs 2+ points) and only symbols will\n # be shown.\n # No regression here, because regression always has at least at least\n # two xy-points (last real value and one (or more) predicted values).\n # No averages here, because the average over one value would be identical\n # to the value anyways.\n self.linestyles_one_value = {\n \"loss_train\": \"rs-\",\n \"loss_val\": \"b^-\",\n \"acc_train\": \"rs-\",\n \"acc_val\": \"b^-\"\n }\n\n # these values will be set in _initialize_plot() upon the first call\n # of redraw()\n # fig: the figure of the whole plot\n # ax_loss: loss chart (left)\n # ax_acc: accuracy chart (right)\n self.fig = None\n self.ax_loss = None\n self.ax_acc = None\n\n # dictionaries with x, y values for each line\n self.values_loss_train = OrderedDict()\n self.values_loss_val = OrderedDict()\n self.values_acc_train = OrderedDict()\n self.values_acc_val = OrderedDict()\n\n def add_values(self, x_index, loss_train=None, loss_val=None, acc_train=None,\n acc_val=None, redraw=True):\n \"\"\"Function to add new values for each line for a specific x-value (e.g.\n a specific epoch).\n\n Meaning of the values / lines:\n - loss_train: y-value of the loss function applied to the training set.\n - loss_val: y-value of the loss function applied to the validation set.\n - acc_train: y-value of the accuracy (e.g. 0.0 to 1.0) when measured on\n the training set.\n - acc_val: y-value of the accuracy (e.g. 0.0 to 1.0) when measured on\n the validation set.\n\n Values that are None will be ignored.\n Values that are INF or NaN will be ignored, but create a warning.\n\n It is currently assumed that added values follow logically after\n each other (progressive order), so the first x_index might be 1 (first entry),\n then 2 (second entry), then 3 (third entry), ...\n Not allowed would be e.g.: 10, 11, 5, 7, ...\n If that is not the case, you will get a broken line graph.\n\n Args:\n x_index: The x-coordinate, e.g. x_index=5 might represent Epoch 5.\n loss_train: The y-value of the loss train line at the given x_index.\n If None, no value for the loss train line will be added at\n the given x_index. (Default is None.)\n loss_val: Same as loss_train for the loss validation line.\n (Default is None.)\n acc_train: Same as loss_train for the accuracy train line.\n (Default is None.)\n acc_val: Same as loss_train for the accuracy validation line.\n (Default is None.)\n redraw: Whether to redraw the plot immediatly after receiving the\n new values. This is reasonable if you add values once at the end\n of every epoch. If you add many values in a row, set this to\n False and call redraw() at the end (significantly faster).\n (Default is True.)\n \"\"\"\n assert isinstance(x_index, (int, long))\n\n loss_train = ignore_nan_and_inf(loss_train, \"loss train\", x_index)\n loss_val = ignore_nan_and_inf(loss_val, \"loss val\", x_index)\n acc_train = ignore_nan_and_inf(acc_train, \"acc train\", x_index)\n acc_val = ignore_nan_and_inf(acc_val, \"acc val\", x_index)\n\n if loss_train is not None:\n self.values_loss_train[x_index] = loss_train\n if loss_val is not None:\n self.values_loss_val[x_index] = loss_val\n if acc_train is not None:\n self.values_acc_train[x_index] = acc_train\n if acc_val is not None:\n self.values_acc_val[x_index] = acc_val\n\n if redraw:\n self.redraw()\n\n def block(self):\n \"\"\"Function to show the plot in a blocking way.\n\n This should be called at the end of your program. Otherwise the\n chart will be closed automatically (at the end).\n By default, the plot is shown in a non-blocking way, so that the\n program continues execution, which causes it to close automatically\n when the program finishes.\n\n This function will silently do nothing if show_plot_window was set\n to False in the constructor.\n \"\"\"\n if self.show_plot_window:\n plt.figure(self.fig.number)\n plt.show()\n\n def save_plot(self, filepath):\n \"\"\"Saves the current plot to a file.\n\n Args:\n filepath: The path to the file, e.g. \"/tmp/last_plot.png\".\n \"\"\"\n self.fig.savefig(filepath, bbox_inches=\"tight\")\n\n def _initialize_plot(self):\n \"\"\"Creates empty figure and axes of the plot and shows it in a new window.\n \"\"\"\n if self.show_loss_plot and self.show_acc_plot:\n fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(24, 8))\n self.fig = fig\n self.ax_loss = ax1\n self.ax_acc = ax2\n else:\n fig, ax = plt.subplots(ncols=1, figsize=(12, 8))\n self.fig = fig\n self.ax_loss = ax if self.show_loss_plot else None\n self.ax_acc = ax if self.show_acc_plot else None\n\n # set_position is neccessary here in order to make space at the bottom\n # for the legend\n for ax in [self.ax_loss, self.ax_acc]:\n if ax is not None:\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.1,\n box.width, box.height * 0.9])\n\n # draw the title\n # it seems to be necessary to set the title here instead of in redraw(),\n # otherwise the title is apparently added again and again with every\n # epoch, making it ugly and bold\n if self.title is not None:\n self.fig.suptitle(self.title, fontsize=self.title_fontsize)\n\n if self.show_plot_window:\n plt.show(block=False)\n\n def redraw(self):\n \"\"\"Redraws the plot with the current values.\n\n This is a full redraw and includes recalculating averages and regressions.\n It should not be called many times per second as that would be slow.\n Calling it every couple seconds should create no noticeable slowdown though.\n\n Args:\n epoch: The index of the current epoch, starting at 0.\n train_loss: All of the training loss values of each\n epoch (list of floats).\n train_acc: All of the training accuracy values of each\n epoch (list of floats).\n val_loss: All of the validation loss values of each\n epoch (list of floats).\n val_acc: All of the validation accuracy values of each\n epoch (list of floats).\n \"\"\"\n # initialize the plot if it's the first redraw\n if self.fig is None:\n self._initialize_plot()\n\n # activate the plot, in case another plot was opened since the last call\n plt.figure(self.fig.number)\n\n # shorter local variables\n ax1 = self.ax_loss\n ax2 = self.ax_acc\n\n # set chart titles, x-/y-labels and grid\n for ax, label in zip([ax1, ax2], [\"Loss\", \"Accuracy\"]):\n if ax:\n ax.clear()\n ax.set_title(label)\n ax.set_ylabel(label)\n ax.set_xlabel(self.x_label)\n ax.grid(self.grid)\n\n # Plot main lines, their averages and the regressions (predictions)\n self._redraw_main_lines()\n self._redraw_averages()\n self._redraw_regressions()\n\n # Add legends (below both chart)\n ncol = 1\n labels = [\"$CHART train\", \"$CHART val.\"]\n if self.show_averages:\n labels.extend([\"$CHART train (avg %d)\" % (self.averages_period,),\n \"$CHART val. (avg %d)\" % (self.averages_period,)])\n ncol += 1\n if self.show_regressions:\n labels.extend([\"$CHART train (regression)\",\n \"$CHART val. (regression)\"])\n ncol += 1\n\n if ax1:\n ax1.legend([label.replace(\"$CHART\", \"loss\") for label in labels],\n loc=\"upper center\",\n bbox_to_anchor=(0.5, -0.08),\n ncol=ncol)\n if ax2:\n ax2.legend([label.replace(\"$CHART\", \"acc.\") for label in labels],\n loc=\"upper center\",\n bbox_to_anchor=(0.5, -0.08),\n ncol=ncol)\n\n plt.draw()\n\n # save the redrawn plot to a file upon every redraw.\n if self.save_to_filepath is not None:\n self.save_plot(self.save_to_filepath)\n\n def _redraw_main_lines(self):\n \"\"\"Draw the main lines of values (i.e. loss train, loss val, acc train, acc val).\n\n Returns:\n List of handles (one per line).\n \"\"\"\n handles = []\n ax1 = self.ax_loss\n ax2 = self.ax_acc\n\n # Set the styles of the lines used in the charts\n # Different line style for epochs after the first one, because\n # the very first epoch has only one data point and therefore no line\n # and would be invisible without the changed style.\n ls_loss_train = self.linestyles[\"loss_train\"]\n ls_loss_val = self.linestyles[\"loss_val\"]\n ls_acc_train = self.linestyles[\"acc_train\"]\n ls_acc_val = self.linestyles[\"acc_val\"]\n if len(self.values_loss_train) == 1:\n ls_loss_train = self.linestyles_one_value[\"loss_train\"]\n if len(self.values_loss_val) == 1:\n ls_loss_val = self.linestyles_one_value[\"loss_val\"]\n if len(self.values_acc_train) == 1:\n ls_acc_train = self.linestyles_one_value[\"acc_train\"]\n if len(self.values_acc_val) == 1:\n ls_acc_val = self.linestyles_one_value[\"acc_val\"]\n\n # Plot the lines\n alpha_main = self.alpha_thin if self.show_averages else self.alpha_thick\n if ax1:\n h_lt, = ax1.plot(self.values_loss_train.keys(), self.values_loss_train.values(),\n ls_loss_train, label=\"loss train\", alpha=alpha_main)\n h_lv, = ax1.plot(self.values_loss_val.keys(), self.values_loss_val.values(),\n ls_loss_val, label=\"loss val.\", alpha=alpha_main)\n handles.extend([h_lt, h_lv])\n if ax2:\n h_at, = ax2.plot(self.values_acc_train.keys(), self.values_acc_train.values(),\n ls_acc_train, label=\"acc. train\", alpha=alpha_main)\n h_av, = ax2.plot(self.values_acc_val.keys(), self.values_acc_val.values(),\n ls_acc_val, label=\"acc. val.\", alpha=alpha_main)\n handles.extend([h_at, h_av])\n\n return handles\n\n def _redraw_averages(self):\n \"\"\"Draw the moving averages of each line.\n\n If moving averages has been deactived in the constructor, this function\n will do nothing.\n\n Returns:\n List of handles (one per line).\n \"\"\"\n # abort if moving averages have been deactivated\n if not self.show_averages:\n return []\n\n handles = []\n ax1 = self.ax_loss\n ax2 = self.ax_acc\n\n # calculate the xy-values\n if ax1:\n # for loss chart\n (lt_sma_x, lt_sma_y) = self._calc_sma(self.values_loss_train.keys(),\n self.values_loss_train.values())\n (lv_sma_x, lv_sma_y) = self._calc_sma(self.values_loss_val.keys(),\n self.values_loss_val.values())\n if ax2:\n # for accuracy chart\n (at_sma_x, at_sma_y) = self._calc_sma(self.values_acc_train.keys(),\n self.values_acc_train.values())\n (av_sma_x, av_sma_y) = self._calc_sma(self.values_acc_val.keys(),\n self.values_acc_val.values())\n\n # plot the xy-values\n alpha_sma = self.alpha_thick\n if ax1:\n # for loss chart\n h_lt, = ax1.plot(lt_sma_x, lt_sma_y, self.linestyles[\"loss_train_sma\"],\n label=\"train loss (avg %d)\" % (self.averages_period,),\n alpha=alpha_sma)\n h_lv, = ax1.plot(lv_sma_x, lv_sma_y, self.linestyles[\"loss_val_sma\"],\n label=\"val loss (avg %d)\" % (self.averages_period,),\n alpha=alpha_sma)\n handles.extend([h_lt, h_lv])\n if ax2:\n # for accuracy chart\n h_at, = ax2.plot(at_sma_x, at_sma_y, self.linestyles[\"acc_train_sma\"],\n label=\"train acc (avg %d)\" % (self.averages_period,),\n alpha=alpha_sma)\n h_av, = ax2.plot(av_sma_x, av_sma_y, self.linestyles[\"acc_val_sma\"],\n label=\"acc. val. (avg %d)\" % (self.averages_period,),\n alpha=alpha_sma)\n handles.extend([h_at, h_av])\n\n return handles\n\n def _redraw_regressions(self):\n \"\"\"Draw the moving regressions of each line, i.e. the predictions of\n future values.\n\n If regressions have been deactived in the constructor, this function\n will do nothing.\n\n Returns:\n List of handles (one per line).\n \"\"\"\n if not self.show_regressions:\n return []\n\n handles = []\n ax1 = self.ax_loss\n ax2 = self.ax_acc\n\n # calculate future values for loss train (lt), loss val (lv),\n # acc train (at) and acc val (av)\n if ax1:\n # for loss chart\n lt_regression = self._calc_regression(self.values_loss_train.keys(),\n self.values_loss_train.values())\n lv_regression = self._calc_regression(self.values_loss_val.keys(),\n self.values_loss_val.values())\n # predicting accuracy values isnt necessary if theres no acc chart\n if ax2:\n # for accuracy chart\n at_regression = self._calc_regression(self.values_acc_train.keys(),\n self.values_acc_train.values())\n av_regression = self._calc_regression(self.values_acc_val.keys(),\n self.values_acc_val.values())\n\n # plot the predicted values\n alpha_regression = self.alpha_thick\n if ax1:\n # for loss chart\n h_lt, = ax1.plot(lt_regression[0], lt_regression[1],\n self.linestyles[\"loss_train_regression\"],\n label=\"loss train regression\",\n alpha=alpha_regression)\n h_lv, = ax1.plot(lv_regression[0], lv_regression[1],\n self.linestyles[\"loss_val_regression\"],\n label=\"loss val. regression\",\n alpha=alpha_regression)\n handles.extend([h_lt, h_lv])\n if ax2:\n # for accuracy chart\n h_at, = ax2.plot(at_regression[0], at_regression[1],\n self.linestyles[\"acc_train_regression\"],\n label=\"acc train regression\",\n alpha=alpha_regression)\n h_av, = ax2.plot(av_regression[0], av_regression[1],\n self.linestyles[\"acc_val_regression\"],\n label=\"acc val. regression\",\n alpha=alpha_regression)\n handles.extend([h_at, h_av])\n\n return handles\n\n def _calc_sma(self, x_values, y_values):\n \"\"\"Calculate the moving average for one line (given as two lists, one\n for its x-values and one for its y-values).\n\n Args:\n x_values: x-coordinate of each value.\n y_values: y-coordinate of each value.\n\n Returns:\n Tuple (x_values, y_values), where x_values are the x-values of\n the line and y_values are the y-values of the line.\n \"\"\"\n result_y, last_ys = [], []\n running_sum = 0\n period = self.averages_period\n # use a running sum here instead of avg(), should be slightly faster\n for y_val in y_values:\n last_ys.append(y_val)\n running_sum += y_val\n if len(last_ys) > period:\n poped_y = last_ys.pop(0)\n running_sum -= poped_y\n result_y.append(float(running_sum) / float(len(last_ys)))\n return (x_values, result_y)\n\n def _calc_regression(self, x_values, y_values):\n \"\"\"Calculate the regression for one line (given as two lists, one\n for its x-values and one for its y-values).\n\n Args:\n x_values: x-coordinate of each value.\n y_values: y-coordinate of each value.\n\n Returns:\n Tuple (x_values, y_values), where x_values are the predicted x-values\n of the line and y_values are the predicted y-values of the line.\n \"\"\"\n if not x_values or len(x_values) < 2:\n return ([], [])\n\n # This currently assumes that the last added x-value for the line\n # was indeed that highest x-value.\n # This could be avoided by tracking the max value for each line.\n last_x = x_values[-1]\n nb_values = len(x_values)\n\n # Compute regression lines based on n_backwards epochs\n # in the past, e.g. based on the last 10 values.\n # n_backwards is calculated relative to the current epoch\n # (e.g. at epoch 100 compute based on the last 10 values,\n # at 200 based on the last 20 values...). It has a minimum (e.g. never\n # use less than 5 epochs (unless there are only less than 5 epochs))\n # and a maximum (e.g. never use more than 1000 epochs).\n # The minimum prevents bad predictions.\n # The maximum\n # a) is better for performance\n # b) lets the regression react faster in case you change something\n # in the hyperparameters after a long time of training.\n n_backward = int(nb_values * self.poly_backward_perc)\n n_backward = max(n_backward, self.poly_n_backward_min)\n n_backward = min(n_backward, self.poly_n_backward_max)\n\n # Compute the regression lines for the n_forward future epochs.\n # n_forward also has a reletive factor, as well as minimum and maximum\n # values (see n_backward).\n n_forward = int(nb_values * self.poly_forward_perc)\n n_forward = max(n_forward, self.poly_n_forward_min)\n n_forward = min(n_forward, self.poly_n_forward_max)\n\n # return nothing of the values turn out too low\n if n_backward <= 1 or n_forward <= 0:\n return ([], [])\n\n # create/train the regression model\n fit = np.polyfit(x_values[-n_backward:], y_values[-n_backward:],\n self.poly_degree)\n poly = np.poly1d(fit)\n\n # calculate future x- and y-values\n # we use last_x to last_x+n_forward here instead of\n # last_x+1 to last_x+1+n_forward\n # so that the regression line is better connected to the current line\n # (no visible gap)\n future_x = [i for i in range(last_x, last_x + n_forward)]\n future_y = [poly(x_idx) for x_idx in future_x]\n\n return (future_x, future_y)\n" ]
[ [ "numpy.polyfit", "numpy.poly1d", "matplotlib.pyplot.subplots", "matplotlib.pyplot.draw", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
OneToolsCollection/4paradigm-AutoX
[ "f8e838021354de17f5bb9bc44e9d68d12dda6427" ]
[ "autox/autox_recommend/recall_and_rank/recalls/binary_recall.py" ]
[ "import gc\nimport math\nimport warnings\nimport pandas as pd\nfrom tqdm import tqdm\n\nwarnings.filterwarnings('ignore')\nimport datetime\n\n\ndef BinaryNet_Recommend(sim_item, user_item_dict, user_time_dict, user_id, top_k, item_num, time_max,\n rt_dict=False):\n rank = {}\n interacted_items = user_item_dict[user_id]\n interacted_times = user_time_dict[user_id]\n for loc, i in enumerate(interacted_items):\n time = interacted_times[loc]\n items = sorted(sim_item[i].items(), reverse=True)[0:top_k]\n for j, wij in items:\n rank.setdefault(j, 0)\n rank[j] += wij * 0.8 ** time\n\n if rt_dict:\n return rank\n\n return sorted(rank.items(), key=lambda d: d[1], reverse=True)[:item_num]\n\n\ndef get_sim_item_binary(df,\n user_col, item_col, time_col,\n time_max):\n user_item_ = df.groupby(user_col)[item_col].agg(list).reset_index()\n user_item_dict = dict(zip(user_item_[user_col], user_item_[item_col]))\n\n item_user_ = df.groupby(item_col)[user_col].agg(list).reset_index()\n item_user_dict = dict(zip(item_user_[item_col], item_user_[user_col]))\n\n df['date'] = (time_max - df[time_col]).dt.days\n user_time_ = df.groupby(user_col)['date'].agg(list).reset_index() # 引入时间因素\n user_time_dict = dict(zip(user_time_[user_col], user_time_['date']))\n\n del df['date']\n gc.collect()\n\n sim_item = {}\n for item, users in tqdm(item_user_dict.items()):\n sim_item.setdefault(item, {})\n for u in users:\n tmp_len = len(user_item_dict[u])\n for relate_item in user_item_dict[u]:\n sim_item[item].setdefault(relate_item, 0)\n sim_item[item][relate_item] += 1 / (math.log(len(users) + 1) * math.log(tmp_len + 1))\n\n return sim_item, user_item_dict, user_time_dict\n\n\ndef get_binaryNet_recall(custs, target_df, df,\n uid, iid, time_col,\n time_max, topk=200, rec_num=100):\n time_max = datetime.datetime.strptime(time_max, '%Y-%m-%d %H:%M:%S')\n sim_item, user_item_dict, user_time_dict, = get_sim_item_binary(df,\n uid, iid, time_col,\n time_max)\n\n samples = []\n target_df = target_df[target_df[uid].isin(custs)]\n for cust in tqdm(custs):\n if cust not in user_item_dict:\n continue\n rec = BinaryNet_Recommend(sim_item, user_item_dict, user_time_dict, cust, topk, rec_num,\n time_max)\n for k, v in rec:\n samples.append([cust, k, v])\n samples = pd.DataFrame(samples, columns=[uid, iid, 'binary_score'])\n print(samples.shape)\n target_df['label'] = 1\n samples = samples.merge(target_df[[uid, iid, 'label']], on=[uid, iid], how='left')\n samples['label'] = samples['label'].fillna(0)\n print('BinaryNet recall: ', samples.shape)\n print(samples.label.mean())\n\n return samples\n\n\ndef binary_recall(uids, data, date,\n uid, iid, time_col,\n last_days=7, recall_num=100, dtype='train', topk=1000):\n assert dtype in ['train', 'test']\n\n if dtype == 'train':\n\n begin_date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S') - datetime.timedelta(days=last_days)\n begin_date = str(begin_date)\n\n target_df = data[(data[time_col] <= date) & (data[time_col] > begin_date)]\n target = target_df.groupby(uid)[iid].agg(list).reset_index()\n target.columns = [uid, 'label']\n data_hist = data[data[time_col] <= begin_date]\n\n # BinaryNet进行召回\n binary_samples = get_binaryNet_recall(target[uid].unique(), target_df, data_hist,\n uid, iid, time_col,\n begin_date, topk=topk,\n rec_num=recall_num)\n\n return binary_samples\n\n elif dtype == 'test':\n\n time_max = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')\n\n sim_item, user_item_dict, user_time_dict = get_sim_item_binary(\n data,\n uid, iid, time_col,\n time_max)\n\n samples = []\n for cust in tqdm(uids):\n if cust not in user_item_dict:\n continue\n\n rec = BinaryNet_Recommend(sim_item, user_item_dict, user_time_dict, cust, topk,\n recall_num,\n time_max)\n for k, v in rec:\n samples.append([cust, k, v])\n\n samples = pd.DataFrame(samples, columns=[uid, iid, 'binary_score'])\n return samples\n" ]
[ [ "pandas.DataFrame" ] ]
derwind/dmfont
[ "17a91a9cc1917d2485eaa8e92b68245578920c76" ]
[ "datasets/data_utils.py" ]
[ "\"\"\"\nDMFont\nCopyright (c) 2020-present NAVER Corp.\nMIT license\n\"\"\"\nimport numpy as np\n\n\ndef cyclize(loader):\n \"\"\" Cyclize loader \"\"\"\n while True:\n for x in loader:\n yield x\n\n\ndef rev_dict(l):\n \"\"\" Reverse dict or list \"\"\"\n return {k: i for i, k in enumerate(l)}\n\n\ndef uniform_indices(end, n_sample, st=None):\n \"\"\" Sample from [0, end) with (almost) equidistant interval \"\"\"\n if end <= 0:\n return np.empty(0, dtype=np.int)\n\n # NOTE with endpoint=False, np.linspace does not sample the `end` value\n indices = np.linspace(0, end, num=n_sample, dtype=np.int, endpoint=False)\n if st is None and end:\n st = (end-1 - indices[-1]) // 2\n return indices + st\n\n\ndef sample(population, n_sample, exception=None):\n \"\"\" sampling without replacement N elements from set with exception\n\n Params:\n population: [1d] list or set or np.ndarray\n Return: np.ndarray\n \"\"\"\n if exception is not None:\n population = set(population) - set(exception)\n if not isinstance(population, np.ndarray):\n population = np.asarray(list(population))\n ids = np.random.choice(len(population), size=n_sample, replace=False)\n return population[ids]\n\n\ndef uniform_sample(population, n_sample, st=None):\n assert not isinstance(population, set), \"population should have order\"\n\n N = len(population)\n indices = uniform_indices(N, n_sample, st)\n\n if isinstance(population, np.ndarray):\n return population[indices]\n elif isinstance(population, list):\n return [population[idx] for idx in indices]\n elif isinstance(population, str):\n return ''.join([population[idx] for idx in indices])\n else:\n raise TypeError(type(population))\n\n\ndef get_fonts(avails):\n return list(avails.keys())\n\n\ndef get_union_chars(avails):\n return sorted(set.union(*map(set, avails.values())))\n\n\ndef get_fonts_unionchars(avails):\n return get_fonts(avails), get_union_chars(avails)\n\n\ndef get_intersection_chars(avails):\n return sorted(set.intersection(*map(set, avails.values())))\n" ]
[ [ "numpy.empty", "numpy.linspace" ] ]
hannahmfan/covid-prediction
[ "96482cfb6fff8959772ccd48c10ce0588b191f18" ]
[ "processing/raw_data_processing/get_JH_daily.py" ]
[ "#%%\nimport pandas as pd\nimport numpy as np\nimport requests\nfrom datetime import datetime as dt\nfrom io import StringIO\nimport os\nimport us\nimport git\nfrom functools import reduce\nfrom datetime import datetime, timedelta, date\n\n#%%\ndef clean_df(df, date):\n \"\"\"Cleans up dataframe to get only US counties (i.e. things with FIPS)\"\"\"\n df.dropna(subset=['FIPS', 'Admin2'], inplace=True)\n pd.options.mode.chained_assignment = None\n df = df[[df.columns[0]] + list(df.columns[-5:-1])]\n df.loc[:, 'Date'] = date\n return df\n\n# list all dates between two dates\ndef daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)):\n yield (start_date + timedelta(n)).strftime('%m-%d-%Y')\n\n# urls for data in Johns Hopkins github repository\nurls = {}\n\n# Note that JHU only started reporting county information from this date\n# Which is why we start our query from 3/23/2020\nstart_date = date(2020, 3, 23)\nend_date = date.today()\nfor d in daterange(start_date, end_date):\n urls[d] = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/\" + d + \".csv\"\n\n# Initialize dictionary to save output dataframes\noutput_dfs = {}\n\n# Loop urls\nfor condition, url in urls.items():\n # Obtain data\n request = requests.get(url)\n # Convert into string\n txt = StringIO(request.text)\n # Convert into dataframe\n df = pd.read_csv(txt)\n # Add to dictionary\n output_dfs[condition] = clean_df(df, condition)\n\n\n# Find home directory for repo\nrepo = git.Repo(\"./\", search_parent_directories=True)\nhomedir = repo.working_dir\ndatadir = f\"{homedir}/data/us/covid/\"\n\ndfs = list(output_dfs.values())\ndfs = pd.concat(dfs)\ndfs.to_csv(\n f\"{datadir}JHU_daily_US.csv\",\n index=False\n)\n" ]
[ [ "pandas.concat", "pandas.read_csv" ] ]
Crispy13/crispy
[ "835f487e099d26243153454fdc81086716114a85" ]
[ "core/ehk.py" ]
[ "import cv2\nimport json\nimport os\nimport csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image, ImageDraw\nfrom pathlib import Path\nfrom PIL import Image\nfrom matplotlib import cm, colors\nimport crispy as csp\nimport tensorflow as tf\nfrom tensorflow import keras\nimport tifffile\nimport pandas as pd\nimport shutil\nfrom multiprocessing import Pool, cpu_count\nfrom functools import partial\nimport tensorflow_addons as tfa\n\nfrom tensorflow.train import BytesList, FloatList, Int64List\nfrom tensorflow.train import Feature, Features, Example\n\nImage.MAX_IMAGE_PIXELS = None\n\n\n###\ndef load_tiff(path):\n image = tifffile.imread(path)\n \n if image.shape[-1] != 3:\n print(\"The shape of loaded image will be reshaped, current shape = {}\".format(image.shape))\n image = np.squeeze(image)\n image = np.transpose(image, axes = (1, 2, 0))\n \n print(\"Sample id: {}, image shape = {}\".format(Path(path).stem, image.shape))\n \n return image\n\n\n###\ndef make_mask(json_path, image):\n json_data = json.load(open(json_path, \"r\"))\n \n ## Make polygons\n polys = []\n for index in range(json_data.__len__()):\n geom = np.array(json_data[index]['geometry']['coordinates'])\n polys.append(geom)\n \n mask = np.zeros(image.shape[:-1])\n mask = np.expand_dims(mask, axis = -1)\n \n for i in range(len(polys)):\n cv2.fillPoly(\n mask, polys[i], \n 1\n )\n \n mask = mask.astype(bool)\n \n print(\"Mask shape: {}\".format(mask.shape))\n \n return mask\n\n\n###\ndef get_tile(baseimage, tile_size, tile_row_pos, tile_col_pos, stride):\n start_col = tile_col_pos * stride\n end_col = start_col + tile_size\n start_row = tile_row_pos * stride\n end_row = start_row + tile_size\n tile_image = baseimage[start_row:end_row, start_col:end_col, :]\n \n \n ## For truncated tiles, pad zeros to the tiles in order to get the same shape as normal tiles.\n if tile_image.shape != (tile_size, tile_size, baseimage.shape[-1]):\n zero_array = np.zeros((tile_size, tile_size, baseimage.shape[-1]))\n row, col, ch = tile_image.shape\n zero_array[:row, :col, :ch] = tile_image\n \n tile_image = zero_array\n orig_tile_shape = (row, col, ch)\n \n else:\n orig_tile_shape = \"no\"\n \n tile_image = tile_image.astype(np.uint8)\n \n return tile_image, orig_tile_shape\n\n\n###\ndef show_tile_and_mask(tile_image, tile_mask):\n fig, ax = plt.subplots(1,2,figsize=(20,3))\n ax[0].imshow(tile_image)\n ax[1].imshow(tile_mask)\n \n \n# Utilities serialize data into a TFRecord\ndef _bytes_feature(value):\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _float_feature(value):\n \"\"\"Returns a float_list from a float / double.\"\"\"\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\ndef _int64_feature(value):\n \"\"\"Returns an int64_list from a bool / enum / int / uint.\"\"\"\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\n###\ndef image_example(image_index, image, mask, tile_id, tile_col_pos, tile_row_pos):\n image_shape = image.shape\n \n img_bytes = image.tobytes()\n\n mask_bytes = mask.tobytes()\n \n feature = {\n 'img_index': _int64_feature(image_index),\n 'height': _int64_feature(image_shape[0]),\n 'width': _int64_feature(image_shape[1]),\n 'num_channels': _int64_feature(image_shape[2]),\n 'img_bytes': _bytes_feature(img_bytes),\n 'img_dtype': _bytes_feature(str(image.dtype).encode()),\n 'mask' : _bytes_feature(mask_bytes),\n 'mask_dtype' : _bytes_feature(str(mask.dtype).encode()),\n 'tile_id': _int64_feature(tile_id),\n 'tile_col_pos': _int64_feature(tile_col_pos),\n 'tile_row_pos': _int64_feature(tile_row_pos),\n \n }\n return tf.train.Example(features=tf.train.Features(feature=feature))\n\n\n###\ndef create_tfrecord(image_index, image, mask, tile_id, tile_col_pos, tile_row_pos, output_path):\n opts = tf.io.TFRecordOptions(compression_type=\"GZIP\")\n with tf.io.TFRecordWriter(str(output_path), opts) as writer:\n tf_example = image_example(image_index, image, mask, tile_id, tile_col_pos, tile_row_pos)\n writer.write(tf_example.SerializeToString())\n writer.close()\n \n \n \n###\ndef write_tfrecord_tiles(image_index, image_id, image, mask, tile_size, stride, output_path):\n output_dir = Path(output_path) / image_id\n# if output_dir.exists():\n# shutil.rmtree(output_dir)\n output_dir.mkdir(exist_ok = True)\n \n image_rows = image.shape[0]\n image_cols = image.shape[1]\n tile_rows = (image_rows-1) // stride + 1\n tile_cols = (image_cols-1) // stride + 1\n tileID = 0\n \n pb = csp.Progressbar(total = tile_rows * tile_cols)\n \n # create a pandas dataframe to store metadata for each tile\n tile_df = pd.DataFrame(columns = ['img_index', 'img_id', 'tile_id', 'tile_rel_path', 'tile_row_num', 'tile_col_num', 'lowband_density', 'mask_density', \"zero_padded\"])\n \n # create one directory for each row of images\n for row_number in range(tile_rows):\n# print('row_offset{} '.format(row_number),end='')\n# dir_path = output_dir / 'row{}'.format(row_number)\n# # create directory\n# if dir_path.exists():\n# shutil.rmtree(dir_path)\n# dir_path.mkdir()\n for col_number in range(tile_cols):\n #print(\"row{}\".format(col_number),end='')\n# dataset_file_path = dir_path+'/col{}_row{}.tfrecords'.format(row_number,col_number)\n \n dataset_file_path = dir_path / 'row{}_col{}.tfrecords'.format(row_number, col_number)\n relative_path = image_id + \"/row{}_col{}.tfrecords\".format(row_number, col_number)\n# if dataset_file_path.exists():\n# shutil.rmtree(dataset_file_path)\n# dataset_file_path.mkdir(parents = True, exist_ok = True)\n \n lower_col_range = col_number * stride\n image_tile, orig_image_shape = get_tile(image, tile_size, row_number, col_number, stride)\n tile_mask, _ = get_tile(image, tile_size, row_number, col_number, stride)\n num_records = create_tfrecord(image_index, image_tile, tile_mask, tileID, row_number, col_number, dataset_file_path)\n \n # populate the metadata for this tile\n img_hist = np.histogram(image_tile)\n lowband_density = np.sum(img_hist[0][0:4])\n mask_density = np.count_nonzero(tile_mask)\n tile_df = tile_df.append({'img_index':image_index, 'img_id':image_id, 'tile_id': tileID, 'tile_rel_path':relative_path, \n 'tile_col_num':col_number, 'tile_row_num':row_number,'lowband_density':lowband_density, 'mask_density':mask_density, \"zero_padded\":orig_image_shape},ignore_index=True)\n \n pb.show(tileID, details = \"Current sample id = {}, row_offset = {}, col_offset = {}\".format(image_id, row_number, col_number))\n tileID += 1\n \n return tile_df\n\n\n###\ndef write_tfrecord_tiles_mp(image_index, image_id, image, mask, tile_size, stride, output_path):\n output_dir = Path(output_path) / image_id\n# if output_dir.exists():\n# shutil.rmtree(output_dir)\n output_dir.mkdir(exist_ok = True)\n \n image_rows = image.shape[0]\n image_cols = image.shape[1]\n tile_rows = (image_rows-1) // stride + 1\n tile_cols = (image_cols-1) // stride + 1\n \n write_tfrecord_tiles_mp_main_partial = partial(write_tfrecord_tiles_mp_main, \n image_index = image_index, image_id = image_id, output_dir = output_dir, \n tile_size = tile_size, stride = stride, tile_cols = tile_cols\n )\n \n v.map(os.chdir, [os.getcwd()] * len(v))\n r = v.map_async(write_tfrecord_tiles_mp_main_partial, list(range(tile_rows)))\n print(dir(r))\n# csp.track_job(r, tile_rows)\n r.wait()\n ipc_result = r.get()\n \n print(\"Multiprocessing job done.\")\n\n\n# with Pool(cpu_count()) as p:\n# r = p.map_async(write_tfrecord_tiles_mp_main_partial, list(range(tile_rows)))\n# csp.track_job(r, tile_rows)\n \n# r.wait()\n \n# pool_result = r.get()\n \n# print(\"Multiprocessing job done.\")\n\n\n###\ndef write_tfrecord_tiles_mp_main(row_number, image, mask, image_index, image_id, output_dir, tile_cols, tile_size, stride):\n# print(\"write_tfrecord_tiles_mp_main: starting...\")\n tile_df = pd.DataFrame(columns = ['img_index', 'img_id', 'tile_id', 'tile_rel_path', 'tile_row_num', 'tile_col_num', 'lowband_density', 'mask_density', \"zero_padded\"])\n tileID = 0\n dir_path = Path(output_dir) / \"row{}\".format(row_number)\n dir_path.mkdir(exist_ok = True)\n \n# print(\"write_tfrecord_tiles_mp_main: Entering for loop ...\")\n for col_number in range(tile_cols):\n print(\"Starting to write tfrecords... {}-row{}-col{}\".format(image_id, row_number, col_number))\n #print(\"row{}\".format(col_number),end='')\n dataset_file_path = dir_path / 'row{}_col{}.tfrecord'.format(row_number, col_number)\n relative_path = image_id + \"/row{0}/row{0}_col{1}.tfrecord\".format(row_number, col_number)\n\n lower_col_range = col_number * stride\n image_tile, orig_image_shape = get_tile(image, tile_size, row_number, col_number, stride)\n tile_mask, _ = get_tile(mask, tile_size, row_number, col_number, stride)\n num_records = create_tfrecord(image_index, image_tile, tile_mask, tileID, row_number, col_number, dataset_file_path)\n\n # populate the metadata for this tile\n img_hist = np.histogram(image_tile)\n lowband_density = np.sum(img_hist[0][0:4])\n mask_density = np.count_nonzero(tile_mask)\n tile_df.loc[tileID, :] = {'img_index':image_index, 'img_id':image_id, 'tile_id': tileID, 'tile_rel_path':relative_path, \n 'tile_col_num':col_number, 'tile_row_num':row_number,'lowband_density':lowband_density, 'mask_density':mask_density, \"zero_padded\":orig_image_shape}\n\n tileID += 1\n \n del image_tile, tile_mask, orig_image_shape\n \n print(\"Complete writing tfrecords... {}-row{}-col{}\".format(image_id, row_number, col_number))\n \n tile_df.to_csv(dir_path / \"{}_row{}_tile-df.csv\".format(image_id, row_number))\n print(\"Complete writing tfrecords... {}\".format(image_id))\n \n \n# read back a record to make sure it the decoding works\n# Create a dictionary describing the features.\nimage_feature_description = {\n 'img_index': tf.io.FixedLenFeature([], tf.int64),\n 'height': tf.io.FixedLenFeature([], tf.int64),\n 'width': tf.io.FixedLenFeature([], tf.int64),\n 'num_channels': tf.io.FixedLenFeature([], tf.int64),\n 'img_bytes': tf.io.FixedLenFeature([], tf.string),\n 'img_dtype': tf.io.FixedLenFeature([], tf.string),\n 'mask': tf.io.FixedLenFeature([], tf.string),\n 'mask_dtype': tf.io.FixedLenFeature([], tf.string),\n 'tile_id': tf.io.FixedLenFeature([], tf.int64),\n 'tile_col_pos': tf.io.FixedLenFeature([], tf.int64),\n 'tile_row_pos': tf.io.FixedLenFeature([], tf.int64),\n}\n\ndef _parse_image_function(example_proto):\n # Parse the input tf.Example proto using the dictionary above.\n single_example = tf.io.parse_single_example(example_proto, image_feature_description)\n img_index = single_example['img_index']\n img_height = single_example['height']\n img_width = single_example['width']\n num_channels = single_example['num_channels']\n \n img_dtype = tf.io.decode_raw(single_example['img_dtype'], out_type = tf.string)\n img_bytes = tf.io.decode_raw(single_example['img_bytes'], out_type = img_dtype)\n \n img_array = tf.reshape( img_bytes, (img_height, img_width, num_channels))\n \n mask_dtype = tf.io.decode_raw(single_example['mask_dtype'], out_type = tf.string)\n mask_bytes = tf.io.decode_raw(single_example['mask'], out_type = mask_dtype)\n \n mask = tf.reshape(mask_bytes, (img_height,img_width, 1))\n\n mtd = dict()\n mtd['img_index'] = single_example['img_index']\n mtd['width'] = single_example['width']\n mtd['height'] = single_example['height']\n mtd['tile_id'] = single_example['tile_id']\n mtd['tile_col_pos'] = single_example['tile_col_pos']\n mtd['tile_row_pos'] = single_example['tile_row_pos']\n struct = {\n 'img_array': img_array,\n 'mask': mask,\n 'mtd': mtd\n } \n return struct\n\ndef read_tf_dataset(storage_file_path):\n encoded_image_dataset = tf.data.TFRecordDataset(storage_file_path, compression_type=\"GZIP\")\n parsed_image_dataset = encoded_image_dataset.map(_parse_image_function)\n return parsed_image_dataset\n\n\n\n###\ndef write_tfrecord_tiles_mp_main_test(row_number, trainset_dir, image_index, image_id, output_dir, tile_cols, tile_size, stride):\n image = load_tiff(trainset_dir / \"{}.tiff\".format(image_id))\n mask = make_mask(trainset_dir / \"{}.json\".format(image_id), image)\n \n# print(\"write_tfrecord_tiles_mp_main: starting...\")\n tile_df_path = dir_path / \"{}_row{}_tile-df.csv\".format(image_id, row_number)\n if tile_df_path.exists():\n tile_df = pd.read_csv(tile_df_path, index_col = 0)\n else:\n tile_df = pd.DataFrame(columns = ['img_index', 'img_id', 'tile_id', 'tile_rel_path', 'tile_row_num', 'tile_col_num', 'lowband_density', 'mask_density', \"zero_padded\"])\n \n tileID = 0\n dir_path = Path(output_dir) / \"row{}\".format(row_number)\n dir_path.mkdir(exist_ok = True)\n \n# print(\"write_tfrecord_tiles_mp_main: Entering for loop ...\")\n for col_number in range(tile_cols):\n print(\"Starting to write tfrecords... {}-row{}-col{}\".format(image_id, row_number, col_number))\n #print(\"row{}\".format(col_number),end='')\n dataset_file_path = dir_path / 'row{}_col{}.tfrecord'.format(row_number, col_number)\n relative_path = image_id + \"/row{0}/row{0}_col{1}.tfrecord\".format(row_number, col_number)\n\n lower_col_range = col_number * stride\n image_tile, orig_image_shape = get_tile(image, tile_size, row_number, col_number, stride)\n tile_mask, _ = get_tile(mask, tile_size, row_number, col_number, stride)\n num_records = create_tfrecord(image_index, image_tile, tile_mask, tileID, row_number, col_number, dataset_file_path)\n\n # populate the metadata for this tile\n img_hist = np.histogram(image_tile)\n lowband_density = np.sum(img_hist[0][0:4])\n mask_density = np.count_nonzero(tile_mask)\n tile_df.loc[tileID, :] = {'img_index':image_index, 'img_id':image_id, 'tile_id': tileID, 'tile_rel_path':relative_path, \n 'tile_col_num':col_number, 'tile_row_num':row_number,'lowband_density':lowband_density, 'mask_density':mask_density, \"zero_padded\":orig_image_shape}\n\n tileID += 1\n \n del image_tile, tile_mask, orig_image_shape\n \n print(\"Complete writing tfrecords... {}-row{}-col{}\".format(image_id, row_number, col_number))\n \n \n tile_df_path.exist\n tile_df.to_csv()\n print(\"Complete writing tfrecords... {}\".format(image_id))\n \n \n \n###\ndef write_tfrecord_tiles_mp_main_test2(row_number, image_index, image_id, output_dir, tile_cols, tile_size, stride):\n# print(\"write_tfrecord_tiles_mp_main: starting...\")\n\n dir_path = Path(output_dir) / \"row{}\".format(row_number)\n dir_path.mkdir(exist_ok = True)\n print(id(image), id(mask))\n \n ## set tile_df\n tile_df_path = dir_path / \"{}_row{}_tile-df.csv\".format(image_id, row_number)\n \n if tile_df_path.is_file():\n tile_df = pd.read_csv(tile_df_path, index_col = 0)\n else:\n tile_df = pd.DataFrame(columns = ['img_index', 'img_id', 'tile_id', 'tile_rel_path', 'tile_row_num', 'tile_col_num', 'lowband_density', 'mask_density', \"zero_padded\"])\n \n tileID = 0\n \n# print(\"write_tfrecord_tiles_mp_main: Entering for loop ...\")\n for col_number in range(tile_cols):\n# print(\"Starting to write tfrecords... {}-row{}-col{}\".format(image_id, row_number, col_number))\n #print(\"row{}\".format(col_number),end='')\n dataset_file_path = dir_path / 'row{}_col{}.tfrecord'.format(row_number, col_number)\n relative_path = image_id + \"/row{0}/row{0}_col{1}.tfrecord\".format(row_number, col_number)\n\n lower_col_range = col_number * stride\n image_tile, orig_image_shape = get_tile(image, tile_size, row_number, col_number, stride)\n tile_mask, _ = get_tile(mask, tile_size, row_number, col_number, stride)\n \n print(\"image_tile.shape = {}\\nimage_id = {}, row_number = {}, col_number = {}\".format(image_tile.shape, image_id, row_number, col_number))\n if image_tile.shape != (tile_size, tile_size, 3):\n raise ValueError(\"tile size is not {} but {}\\nimage_id = {}, row_number = {}, col_number = {}\".format((tile_size, tile_size, 3), image_tile.shape, image_id, row_number, col_number))\n \n num_records = create_tfrecord(image_index, image_tile, tile_mask, tileID, row_number, col_number, dataset_file_path)\n\n # populate the metadata for this tile\n img_hist = np.histogram(image_tile)\n lowband_density = np.sum(img_hist[0][0:4])\n mask_density = np.count_nonzero(tile_mask)\n tile_df.loc[tileID, :] = {'img_index':image_index, 'img_id':image_id, 'tile_id': tileID, 'tile_rel_path':relative_path, \n 'tile_col_num':col_number, 'tile_row_num':row_number,'lowband_density':lowband_density, 'mask_density':mask_density, \"zero_padded\":orig_image_shape}\n\n tileID += 1\n \n del image_tile, tile_mask, orig_image_shape\n \n# print(\"Complete writing tfrecords... {}-row{}-col{}\".format(image_id, row_number, col_number))\n \n tile_df.to_csv(tile_df_path)\n# print(\"Complete writing tfrecords... {}\".format(image_id))\n \n \n### \ndef pool_init2(image_base, mask_base, image_shape, mask_shape):\n global image, mask\n image = np.ctypeslib.as_array(image_base.get_obj())\n image = image.reshape(*image_shape)\n \n mask = np.ctypeslib.as_array(mask_base.get_obj())\n mask = mask.reshape(*mask_shape)\n \n \n# ###\n# def pool_init(image_base, mask_base, shared_image, shared_mask, image, mask):\n# shared_image = np.ctypeslib.as_array(image_base.get_obj())\n# shared_image = shared_image.reshape(*image.shape)\n# shared_image[:] = image[:]\n \n# shared_mask = np.ctypeslib.as_array(mask_base.get_obj())\n# shared_mask = shared_mask.reshape(*mask.shape)\n# shared_mask[:] = mask[:]\n\n\n\n \n \n \n###\ndef compare_data(*tf_datasets):\n \"\"\"\n Shows two dataset's image and label.\n \n Parameters\n ----------\n tf_datasets : 2 tf.datasets\n \n \"\"\"\n \n tfd1, tfd2 = tf_datasets\n \n for e1, e2 in zip(tfd1, tfd2):\n fig, axes = plt.subplots(2, 2, figsize=(10, 10))\n \n for row_axes, ee in zip(axes, (e1, e2)):\n row_axes[0].imshow(ee[0].numpy().astype(np.float32) / 255.)\n row_axes[1].imshow(ee[1])\n \n \n# for i, ee1, ee2 in zip(range(len(axes[0])), e1, e2):\n# axes[:, i][0].imshow(ee1[0].numpy().astype(np.float32) / 255.)\n# axes[:, i][1].imshow(ee[1])\n\n\n###\ndef extract_image_label_only(tfdata, image_feature_description = image_feature_description):\n parsed_example = tf.io.parse_single_example(tfdata, image_feature_description)\n \n h, w, ch = parsed_example['height'], parsed_example['width'], parsed_example['num_channels']\n \n img_dtype = parsed_example['img_dtype']\n image_decoded = tf.io.decode_raw(parsed_example['img_bytes'], out_type = 'uint8')\n image = tf.reshape(image_decoded, (h, w, ch))\n \n# image_float32 = tf.cast(image, dtype = tf.float32)\n \n mask_dtype = parsed_example['mask_dtype']\n mask_decoded = tf.io.decode_raw(parsed_example['mask'], out_type = 'bool')\n mask = tf.reshape(mask_decoded, (h, w, 1))\n \n# mask_uint8 = tf.cast(mask, dtype = tf.uint8)\n \n return image, mask\n\n\n###\ndef data_aug(image, label):\n label = tf.cast(label, tf.uint8)\n \n image_shape = tf.cast(tf.shape(image), tf.float32)\n h, w, ch = image_shape[0], image_shape[1], image_shape[2]\n\n ### random shift\n shift_vector = (\n h * tf.random.uniform([], -0.05, 0.05), \n w * tf.random.uniform([], -0.05, 0.05)\n )\n image_1 = tfa.image.translate(image, shift_vector)\n label_1 = tfa.image.translate(label, shift_vector)\n\n\n ### random flip\n vertial_cond = tf.cast(tf.random.categorical(tf.math.log([[0.5, 0.5]]), 1)[0][0], tf.bool)\n image_2 = tf.cond(\n vertial_cond,\n lambda : tf.image.flip_left_right(image_1),\n lambda : image_1\n )\n label_2 = tf.cond(\n vertial_cond,\n lambda : tf.image.flip_left_right(label_1),\n lambda : label_1\n )\n\n horizontal_cond = tf.cast(tf.random.categorical(tf.math.log([[0.5, 0.5]]), 1)[0][0], tf.bool)\n image_3 = tf.cond(\n horizontal_cond,\n lambda : tf.image.flip_up_down(image_2),\n lambda : image_2\n )\n label_3 = tf.cond(\n horizontal_cond,\n lambda : tf.image.flip_up_down(label_2),\n lambda : label_2\n )\n\n ### random rotation\n rotate_angle = tf.random.uniform([], -45, 45)\n\n image_4 = tfa.image.rotate(image_3, rotate_angle)\n label_4 = tfa.image.rotate(label_3, rotate_angle)\n\n\n ### random shear\n shear_alpha = tf.random.uniform([], -0.27, 0.27) # about [-15 degree ~ +15 degree ]\n \n image_5 = tfa.image.shear_x(image_4, shear_alpha, 255)\n label_5 = tf.image.rgb_to_grayscale(\n tfa.image.shear_x(tf.image.grayscale_to_rgb(label_4), shear_alpha, 0)\n )\n \n verify_label_values(label_5)\n \n# image_5 = tf.keras.preprocessing.image.random_shear(image_4.numpy(), intensity = shear_angle, row_axis=0, col_axis=1, channel_axis=2)\n# label_5 = tf.keras.preprocessing.image.random_shear(label_4.numpy(), intensity = shear_angle, row_axis=0, col_axis=1, channel_axis=2)\n \n# image_5 = image_4\n# label_5 = label_4\n \n ### random resizing\n resize_factor = tf.random.uniform([], 0.6, 2.0)\n resize_h = tf.cast(tf.math.round(h * resize_factor), tf.int32)\n resize_w = tf.cast(tf.math.round(w * resize_factor), tf.int32)\n\n image_6 = tf.image.resize(image_5, (resize_h, resize_w))\n label_6 = tf.image.resize(label_5, (resize_h, resize_w), method = \"nearest\")\n# tf.print(tf.shape(image_6), tf.shape(label_6))\n \n ### center crop\n image_7, label_7 = center_crop(image, label)\n \n return tf.cast(image_7, dtype = tf.float32), label_7\n\n\n###\n@tf.function\ndef center_crop(image, label):\n ch = tf.cast(tf.shape(image), tf.float32)[2]\n \n ### center crop\n target_size = tf.constant(102)\n# central_fraction = tf.divide(target_size, tf.shape(image_6)[0])\n# image_7 = tf.image.central_crop(image_6, central_fraction)\n# label_7 = tf.image.central_crop(label_6, central_fraction)\n offset_height = tf.subtract(\n tf.round(tf.divide(tf.shape(image)[0], 2)), tf.round(tf.divide(target_size, 2))\n )\n offset_width = tf.subtract(\n tf.round(tf.divide(tf.shape(image)[1], 2)), tf.round(tf.divide(target_size, 2))\n )\n \n offset_height_c = tf.cast(offset_height, tf.int32)\n offset_width_c = tf.cast(offset_width, tf.int32)\n \n image_cropped = tf.image.crop_to_bounding_box(image, offset_height_c, offset_width_c, target_size, target_size)\n label_cropped = tf.image.crop_to_bounding_box(label, offset_height_c, offset_width_c, target_size, target_size)\n \n tf.assert_equal(tf.shape(image_cropped)[0], target_size, \"Augmented data's shape is not [{0}, {0}, {1}]\".format(target_size, ch))\n \n return image_cropped, label_cropped\n\n\n###\ndef _resize_label_only(image, label):\n ### resizing label to (54, 54)\n label_resized = tf.image.resize(tf.cast(label, dtype = tf.uint8), (54, 54), method = \"nearest\")\n# label_one_hotted = tf.one_hot(\n# tf.squeeze(label_resized, axis = -1), depth = 2\n# )\n \n return image, label_resized\n\n \n### \ndef standardize_and_resize_label_only(image, label):\n image_standardized = tf.image.per_image_standardization(tf.cast(image, dtype=tf.float32))\n _, label_resized = _resize_label_only(image, label)\n \n return image_standardized, label_resized\n \n###\ndef val_set_process(image, label):\n ### center crop\n target_size = tf.constant(102)\n# central_fraction = tf.divide(target_size, tf.shape(image_6)[0])\n# image_7 = tf.image.central_crop(image_6, central_fraction)\n# label_7 = tf.image.central_crop(label_6, central_fraction)\n offset_height = tf.subtract(\n tf.round(tf.divide(tf.shape(image)[0], 2)), tf.round(tf.divide(target_size, 2))\n )\n offset_width = tf.subtract(\n tf.round(tf.divide(tf.shape(image)[1], 2)), tf.round(tf.divide(target_size, 2))\n )\n \n offset_height_c = tf.cast(offset_height, tf.int32)\n offset_width_c = tf.cast(offset_width, tf.int32)\n \n image_1 = tf.image.crop_to_bounding_box(image, offset_height_c, offset_width_c, target_size, target_size)\n label_1 = tf.image.crop_to_bounding_box(label, offset_height_c, offset_width_c, target_size, target_size) \n\n return image_1, label_1\n\n\n###\ndef construct_tiles_meta(*args, **kwargs):\n return merge_row_meta(*args, **kwargs)\n\n###\ndef merge_row_meta(csv_root, filter_by_lowband_density = True):\n \"\"\"\n csv_root : csv root path.\n \n \"\"\"\n csv_root = Path(csv_root)\n l1 = list(csv_root.glob(\"*tiles_meta.csv\"))\n \n if l1:\n print(\"tiles_meta.csv was found in csv_root path. {}\\nLoading the file instead of making new one...\".format(l1[0]))\n return pd.read_csv(l1[0]).iloc[:, 2:]\n \n else:\n tiles_meta = pd.DataFrame(columns = pd.read_csv(next(csv_root.glob(\"**/*.csv\")), index_col = 0).columns)\n\n pb = csp.Progressbar()\n el = []\n for i, e in enumerate(csv_root.glob(\"**/*.csv\")):\n try:\n tiles_meta = tiles_meta.append(pd.read_csv(e, index_col = 0))\n if filter_by_lowband_density:\n tiles_meta = tiles_meta.loc[tiles_meta['lowband_density'] > 1000, ] # Exclude non-tissue tiles\n pb.show(i, \"Merge all tile meta dataframes... \")\n \n except Exception as exc:\n print(\"An error occured during processing {}\".format(e))\n el.append((e, exc.__class__.__name__, exc))\n \n tiles_meta.to_csv(csv_root / \"tiles_meta.csv\")\n \n return tiles_meta.reset_index()\n\n\n###\ndef make_meta_ready(tiles_meta, random_seed):\n glom_tile_indices = tiles_meta.index[tiles_meta['mask_density'] > 0]\n \n nonglom_indices = tiles_meta.index[tiles_meta['mask_density'] == 0]\n \n np.random.seed(random_seed)\n nonglom_selected_indices = np.random.choice(nonglom_indices, size = len(glom_tile_indices))\n \n# nonglom_indices_r = set(nonglom_indices).difference(nonglom_selected_indices)\n \n tiles_meta = tiles_meta.loc[np.concatenate((glom_tile_indices, nonglom_selected_indices), axis = 0), ].sort_index(axis = 0).reset_index()\n \n for k,v in locals().items():\n globals()[k] = v\n \n return tiles_meta\n\n\n###\ndef find_shear_alpha(angle):\n \"\"\"\n Find alpha value of tfa.image.shear_x in accordance with the shear angle which you want\n \n angle : a radian angle\n \"\"\"\n return tf.sqrt(\n 1 / tf.square(tf.cos(angle)) - 1\n )\n\n\n###\n@tf.function\ndef verify_label_values(label):\n \"\"\"\n Check label values if the unique values are in [0, 1].\n \n \"\"\"\n y0 = tf.constant(0, dtype = tf.uint8)\n y1 = tf.constant(1, dtype = tf.uint8)\n \n tf.Assert(\n tf.reduce_all(\n tf.logical_or(tf.equal(label, y0), tf.equal(label, y1))\n ),\n [\"Label array has other values than [0, 1] after sheared.\"])\n \n \n \n###\ndef crop_and_cast(image, label):\n \n return center_crop(tf.cast(image, dtype = tf.float32), tf.cast(label, dtype = tf.uint8))\n\n\n###\ndef train_preprocess(tfrecord):\n image, label = extract_image_label_only(tfrecord)\n image_aug, label_aug = data_aug(image, label)\n image_std, label_std = standardize_and_resize_label_only(image_aug, label_aug)\n \n return image_std, label_std\n\n\n###\ndef val_preprocess(tfrecord):\n image, label = extract_image_label_only(tfrecord)\n image_1, label_1 = val_set_process(image, label)\n image_std, label_std = standardize_and_resize_label_only(image_1, label_1)\n \n return image_std, label_std\n\n\n###\ndef test_preprocess(tfrecord):\n image, label = extract_image_label_only_for_test(tfrecord)\n image_std, label_std = standardize_and_resize_label_only(image, label)\n \n return image_std, label_std\n\n\n###\ndef make_dataset_(shard_index, num_shards = None, num_repeat = None, filepaths = None, preprocess_func = None, batch_size = None):\n filepaths = filepaths.shard(num_shards, shard_index)\n tfrecords_dataset = tf.data.TFRecordDataset(filepaths, compression_type = \"GZIP\")\n dataset = tfrecords_dataset.repeat(num_repeat).map(preprocess_func).shuffle(100)\n \n return dataset.batch(batch_size)\n \n\n###\ndef make_dataset_val(shard_index, num_shards = None, num_repeat = None, filepaths = None, preprocess_func = None, batch_size = None):\n filepaths = filepaths.shard(num_shards, shard_index)\n tfrecords_dataset = tf.data.TFRecordDataset(filepaths, compression_type = \"GZIP\")\n dataset = tfrecords_dataset.map(preprocess_func)\n \n return dataset.batch(batch_size)\n \n \n###\ndef make_dataset_2(shard_index, num_shards = None, num_repeat = None, filepaths = None, preprocess_func = None, batch_size = None):\n filepaths = filepaths.shard(num_shards, shard_index)\n tfrecords_dataset = tf.data.TFRecordDataset(filepaths, compression_type = \"GZIP\")\n dataset = tfrecords_dataset.repeat(num_repeat).map(preprocess_func).shuffle(100)\n \n return dataset.batch(batch_size)\n\n\n###\n###\ndef write_tfrecord_tiles_mp_main_test3(row_number, image_index, image_id, output_dir, tile_cols, tile_size, stride):\n\n dir_path = Path(output_dir) / \"row{}\".format(row_number)\n dir_path.mkdir(exist_ok = True)\n print(id(image), id(mask))\n \n image_shape = image.shape\n \n ## set tile_df\n tile_df_path = dir_path / \"{}_row{}_tile-df.csv\".format(image_id, row_number)\n \n if tile_df_path.is_file():\n tile_df = pd.read_csv(tile_df_path, index_col = 0)\n else:\n tile_df = pd.DataFrame(columns = ['img_index', 'img_id', \"image_shape\", 'tile_id', 'tile_rel_path', 'tile_row_num', 'tile_col_num', \n 'lowband_density', 'mask_density', \"zero_padded\"])\n \n tileID = 0\n \n for col_number in range(tile_cols):\n dataset_file_path = dir_path / 'row{}_col{}.tfrecord'.format(row_number, col_number)\n relative_path = image_id + \"/row{0}/row{0}_col{1}.tfrecord\".format(row_number, col_number)\n\n lower_col_range = col_number * stride\n image_tile, orig_image_shape = get_tile(image, tile_size, row_number, col_number, stride)\n tile_mask, _ = get_tile(mask, tile_size, row_number, col_number, stride)\n \n print(\"image_tile.shape = {}\\nimage_id = {}, row_number = {}, col_number = {}\".format(image_tile.shape, image_id, row_number, col_number))\n if image_tile.shape != (tile_size, tile_size, 3):\n raise ValueError(\"tile size is not {} but {}\\nimage_id = {}, row_number = {}, col_number = {}\".format((tile_size, tile_size, 3), image_tile.shape, image_id, row_number, col_number))\n \n num_records = create_tfrecord(image_index, image_tile, tile_mask, tileID, row_number, col_number, dataset_file_path)\n\n # populate the metadata for this tile\n img_hist = np.histogram(image_tile)\n lowband_density = np.sum(img_hist[0][0:4])\n mask_density = np.count_nonzero(tile_mask)\n tile_df.loc[tileID, :] = {'img_index':image_index, 'img_id':image_id, \"image_shape\": image_shape, 'tile_id': tileID,\n 'tile_rel_path':relative_path, 'tile_col_num':col_number, 'tile_row_num':row_number,'lowband_density':lowband_density, \n 'mask_density':mask_density, \"zero_padded\":orig_image_shape}\n\n tileID += 1\n \n del image_tile, tile_mask, orig_image_shape\n \n \n tile_df.to_csv(tile_df_path)\n\n\n###\ndef extract_image_label_only_for_test(tfdata, image_feature_description = image_feature_description):\n parsed_example = tf.io.parse_single_example(tfdata, image_feature_description)\n \n h, w, ch = parsed_example['height'], parsed_example['width'], parsed_example['num_channels']\n \n img_dtype = parsed_example['img_dtype']\n image_decoded = tf.io.decode_raw(parsed_example['img_bytes'], out_type = 'uint8')\n image = tf.reshape(image_decoded, (h, w, ch))\n \n# image_float32 = tf.cast(image, dtype = tf.float32)\n \n mask_dtype = parsed_example['mask_dtype']\n mask_decoded = tf.io.decode_raw(parsed_example['mask'], out_type = 'bool')\n mask = tf.reshape(mask_decoded, (h, w, 3))\n \n# mask_uint8 = tf.cast(mask, dtype = tf.uint8)\n \n return image, mask\n\n\n# New version\ndef rle_encode_less_memory(img):\n '''\n img: numpy array, 1 - mask, 0 - background\n Returns run length as string formated\n This simplified method requires first and last pixel to be zero\n '''\n pixels = img.T.flatten()\n \n # This simplified method requires first and last pixel to be zero\n pixels[0] = 0\n pixels[-1] = 0\n runs = np.where(pixels[1:] != pixels[:-1])[0] + 2\n runs[1::2] -= runs[::2]\n \n return ' '.join(str(x) for x in runs)\n\n\n###\ndef fill_backplate(tile, meta_data, backplate):\n \"\"\"\n tile : 3 dimensional array\n \n \"\"\"\n \n row_pos = meta_data['tile_row_num']\n col_pos = meta_data['tile_col_num']\n \n backplate[row_pos*strides : row_pos*strides + orig_tile_size, col_pos*strides : col_pos*strides + orig_tile_size] = tile" ]
[ [ "tensorflow.io.TFRecordOptions", "numpy.expand_dims", "numpy.squeeze", "tensorflow.cast", "tensorflow.equal", "pandas.DataFrame", "numpy.concatenate", "numpy.histogram", "numpy.where", "tensorflow.train.Int64List", "pandas.read_csv", "tensorflow.data.TFRecordDataset", "tensorflow.divide", "numpy.count_nonzero", "numpy.zeros", "tensorflow.image.grayscale_to_rgb", "tensorflow.shape", "tensorflow.random.uniform", "tensorflow.train.BytesList", "numpy.transpose", "tensorflow.train.FloatList", "tensorflow.train.Features", "tensorflow.math.round", "numpy.array", "numpy.sum", "tensorflow.constant", "tensorflow.cos", "tensorflow.image.crop_to_bounding_box", "numpy.random.seed", "tensorflow.io.parse_single_example", "tensorflow.io.decode_raw", "matplotlib.pyplot.subplots", "tensorflow.reshape", "tensorflow.image.flip_left_right", "tensorflow.io.FixedLenFeature", "tensorflow.image.flip_up_down", "tensorflow.math.log", "tensorflow.image.resize" ] ]
ankitshah009/R2Plus1D
[ "45b04d6b7cd51baf81c144df56369b7521334cc6" ]
[ "c2/tools/train_net.py" ]
[ "# Copyright 2018-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# TODO: sync with latest workflow features (Deepti or Du should have a look)\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport logging\nimport numpy as np\nimport time\n\nfrom caffe2.python import workspace, cnn\nfrom caffe2.python import timeout_guard, experiment_util, data_parallel_model\nimport caffe2.python.predictor.predictor_exporter as pred_exp\nimport models.model_builder as model_builder\nimport utils.model_helper as model_helper\nimport utils.model_loader as model_loader\nfrom utils import reader_utils\nfrom utils import metric\n\n# Logger\nlog = logging.getLogger(\"train_net\")\nlog.setLevel(logging.INFO)\n\n\ndef AddMomentumParameterUpdate(train_model, LR):\n '''\n Add the momentum-SGD update.\n '''\n params = train_model.GetParams()\n assert(len(params) > 0)\n\n for param in params:\n param_grad = train_model.param_to_grad[param]\n param_momentum = train_model.param_init_net.ConstantFill(\n [param], param + '_momentum', value=0.0\n )\n\n # Update param_grad and param_momentum in place\n train_model.net.MomentumSGDUpdate(\n [param_grad, param_momentum, LR, param],\n [param_grad, param_momentum, param],\n momentum=0.9,\n nesterov=1,\n )\n\n\ndef GetCheckpointParams(train_model):\n prefix = \"gpu_{}\".format(train_model._devices[0])\n params = [str(p) for p in train_model.GetParams(prefix)]\n params.extend([str(p) + \"_momentum\" for p in params])\n params.extend([str(p) for p in train_model.GetComputedParams(prefix)])\n\n assert len(params) > 0\n return params\n\n\ndef SaveModel(args, train_model, epoch):\n prefix = \"gpu_{}\".format(train_model._devices[0])\n predictor_export_meta = pred_exp.PredictorExportMeta(\n predict_net=train_model.net.Proto(),\n parameters=GetCheckpointParams(train_model),\n inputs=[prefix + \"/data\"],\n outputs=[prefix + \"/softmax\"],\n shapes={\n prefix + \"/softmax\": (1, args.num_labels),\n prefix + \"/data\": (\n args.num_channels,\n args.clip_length_of if args.input_type else args.clip_length_rgb,\n args.crop_size,\n args.crop_size\n )\n }\n )\n\n # save the train_model for the current epoch\n model_path = \"%s/%s_%d.mdl\" % (\n args.file_store_path,\n args.save_model_name,\n epoch,\n )\n\n # save the model\n pred_exp.save_to_db(\n db_type='minidb',\n db_destination=model_path,\n predictor_export_meta=predictor_export_meta,\n )\n\n\ndef RunEpoch(\n args,\n epoch,\n train_model,\n test_model,\n batch_size,\n num_shards,\n expname,\n explog,\n):\n log.info(\"Starting epoch {}/{}\".format(epoch, args.num_epochs))\n epoch_iters = int(args.epoch_size / batch_size / num_shards)\n if args.multi_label:\n accumulated_prob = np.empty(shape=[0, args.num_labels], dtype=np.float)\n accumulated_label = np.empty(shape=[0, args.num_labels], dtype=np.int32)\n for i in range(epoch_iters):\n # This timeout is required (temporarily) since CUDA-NCCL\n # operators might deadlock when synchronizing between GPUs.\n timeout = 6000.0 if i == 0 else 600.0\n with timeout_guard.CompleteInTimeOrDie(timeout):\n t1 = time.time()\n workspace.RunNet(train_model.net.Proto().name)\n t2 = time.time()\n dt = t2 - t1\n if args.multi_label:\n prefix = \"gpu_{}\".format(train_model._devices[0])\n prob = workspace.FetchBlob(prefix + '/prob')\n label = workspace.FetchBlob(prefix + '/label')\n accumulated_prob = np.concatenate((accumulated_prob, prob), axis=0)\n accumulated_label = np.concatenate(\n (accumulated_label, label), axis=0\n )\n\n if i % args.display_iter == 0:\n fmt = \"Finished iteration {}/{} of epoch {} ({:.2f} clips/sec)\"\n log.info(fmt.format(i, epoch_iters, epoch, batch_size / dt))\n prefix = \"gpu_{}\".format(train_model._devices[0])\n loss = workspace.FetchBlob(prefix + '/loss')\n if args.multi_label:\n mean_auc, mean_ap, _, _ = \\\n metric.mean_ap_metric(accumulated_prob, accumulated_label)\n train_msg = \\\n \"Training loss: {}, AUC: {}, mAP: {}\".format(\n np.mean(loss), mean_auc, mean_ap\n )\n if accumulated_label.shape[0] > 4096:\n accumulated_prob = accumulated_prob[-4096:, :]\n accumulated_label = accumulated_label[-4096:, :]\n else:\n accuracy = workspace.FetchBlob(prefix + '/accuracy')\n train_msg = \"Training loss: {}, accuracy: {}\".format(\n loss, accuracy\n )\n\n log.info(train_msg)\n\n num_clips = epoch * epoch_iters * batch_size\n prefix = \"gpu_{}\".format(train_model._devices[0])\n loss = workspace.FetchBlob(prefix + '/loss')\n learning_rate = workspace.FetchBlob(prefix + '/LR')\n if args.multi_label:\n accuracy = -1\n loss = np.mean(loss)\n else:\n mean_ap = -1\n mean_auc = -1\n if (test_model is not None):\n # Run 100 iters of testing\n ntests = 0\n test_accuracy = 0\n test_mean_auc = 0\n test_mean_ap = 0\n all_prob = np.empty(shape=[0, args.num_labels], dtype=np.float)\n all_label = np.empty(shape=[0, args.num_labels], dtype=np.int32)\n for _ in range(0, 100):\n workspace.RunNet(test_model.net.Proto().name)\n for g in test_model._devices:\n prefix = \"gpu_{}\".format(g)\n if args.multi_label:\n prob = workspace.FetchBlob(prefix + '/prob')\n label = workspace.FetchBlob(prefix + '/label')\n all_prob = np.concatenate((all_prob, prob), axis=0)\n all_label = np.concatenate((all_label, label), axis=0)\n else:\n accuracy = workspace.FetchBlob(prefix + '/accuracy')\n test_accuracy += np.asscalar(accuracy)\n ntests += 1\n if args.multi_label:\n test_mean_auc, test_mean_ap, _, _ = \\\n metric.mean_ap_metric(all_prob, all_label)\n log.info(\"Test AUC: {}, mAP: {}\".format(mean_auc, mean_ap))\n else:\n test_accuracy /= ntests\n log.info(\"Test accuracy: {}\".format(test_accuracy))\n else:\n test_accuracy = (-1)\n test_mean_auc = (-1)\n test_mean_ap = (-1)\n\n explog.log(\n input_count=num_clips,\n batch_count=(i + epoch * epoch_iters),\n additional_values={\n 'accuracy': accuracy,\n 'train_AUC': mean_auc,\n 'train_mAP': mean_ap,\n 'loss': loss,\n 'learning_rate': learning_rate,\n 'epoch': epoch,\n 'test_accuracy': test_accuracy,\n 'test_mean_auc': test_mean_auc,\n 'test_mean_ap': test_mean_ap,\n }\n )\n assert loss < 40, \"Exploded gradients :(\"\n\n return epoch + 1\n\n\ndef Train(args):\n if args.gpus is not None:\n gpus = [int(x) for x in args.gpus.split(',')]\n num_gpus = len(gpus)\n else:\n gpus = range(args.num_gpus)\n num_gpus = args.num_gpus\n\n log.info(\"Running on GPUs: {}\".format(gpus))\n\n # Modify to make it consistent with the distributed trainer\n total_batch_size = args.batch_size * num_gpus\n batch_per_device = args.batch_size\n\n # Round down epoch size to closest multiple of batch size across machines\n epoch_iters = int(args.epoch_size / total_batch_size)\n args.epoch_size = epoch_iters * total_batch_size\n log.info(\"Using epoch size: {}\".format(args.epoch_size))\n\n # Create CNNModeLhelper object\n train_model = cnn.CNNModelHelper(\n order=\"NCHW\",\n name='{}_train'.format(args.model_name),\n use_cudnn=(True if args.use_cudnn == 1 else False),\n cudnn_exhaustive_search=True,\n ws_nbytes_limit=(args.cudnn_workspace_limit_mb * 1024 * 1024),\n )\n\n # Model building functions\n def create_model_ops(model, loss_scale):\n return model_builder.build_model(\n model=model,\n model_name=args.model_name,\n model_depth=args.model_depth,\n num_labels=args.num_labels,\n batch_size=args.batch_size,\n num_channels=args.num_channels,\n crop_size=args.crop_size,\n clip_length=(\n args.clip_length_of if args.input_type\n else args.clip_length_rgb\n ),\n loss_scale=loss_scale,\n pred_layer_name=args.pred_layer_name,\n multi_label=args.multi_label,\n channel_multiplier=args.channel_multiplier,\n bottleneck_multiplier=args.bottleneck_multiplier,\n use_dropout=args.use_dropout,\n conv1_temporal_stride=args.conv1_temporal_stride,\n conv1_temporal_kernel=args.conv1_temporal_kernel,\n use_pool1=args.use_pool1,\n audio_input_3d=args.audio_input_3d,\n g_blend=args.g_blend,\n audio_weight=args.audio_weight,\n visual_weight=args.visual_weight,\n av_weight=args.av_weight,\n )\n\n # SGD\n def add_parameter_update_ops(model):\n model.AddWeightDecay(args.weight_decay)\n ITER = model.Iter(\"ITER\")\n stepsz = args.step_epoch * args.epoch_size / args.batch_size / num_gpus\n LR = model.net.LearningRate(\n [ITER],\n \"LR\",\n base_lr=args.base_learning_rate * num_gpus,\n policy=\"step\",\n stepsize=int(stepsz),\n gamma=args.gamma,\n )\n AddMomentumParameterUpdate(model, LR)\n\n # Input. Note that the reader must be shared with all GPUS.\n train_reader, train_examples = reader_utils.create_data_reader(\n train_model,\n name=\"train_reader\",\n input_data=args.train_data,\n )\n log.info(\"Training set has {} examples\".format(train_examples))\n\n def add_video_input(model):\n model_helper.AddVideoInput(\n model,\n train_reader,\n batch_size=batch_per_device,\n length_rgb=args.clip_length_rgb,\n clip_per_video=1,\n random_mirror=True,\n decode_type=0,\n sampling_rate_rgb=args.sampling_rate_rgb,\n scale_h=args.scale_h,\n scale_w=args.scale_w,\n crop_size=args.crop_size,\n video_res_type=args.video_res_type,\n short_edge=min(args.scale_h, args.scale_w),\n num_decode_threads=args.num_decode_threads,\n do_multi_label=args.multi_label,\n num_of_class=args.num_labels,\n random_crop=True,\n input_type=args.input_type,\n length_of=args.clip_length_of,\n sampling_rate_of=args.sampling_rate_of,\n frame_gap_of=args.frame_gap_of,\n do_flow_aggregation=args.do_flow_aggregation,\n flow_data_type=args.flow_data_type,\n get_rgb=(args.input_type == 0 or args.input_type >= 3),\n get_optical_flow=(args.input_type == 1 or args.input_type >= 4),\n get_logmels=(args.input_type >= 2),\n get_video_id=args.get_video_id,\n jitter_scales=[int(n) for n in args.jitter_scales.split(',')],\n use_local_file=args.use_local_file,\n )\n\n # Create parallelized model\n data_parallel_model.Parallelize_GPU(\n train_model,\n input_builder_fun=add_video_input,\n forward_pass_builder_fun=create_model_ops,\n param_update_builder_fun=add_parameter_update_ops,\n devices=gpus,\n rendezvous=None,\n net_type=('prof_dag' if args.profiling == 1 else 'dag'),\n optimize_gradient_memory=True,\n )\n\n # Add test model, if specified\n test_model = None\n if args.test_data is not None:\n log.info(\"----- Create test net ----\")\n test_model = cnn.CNNModelHelper(\n order=\"NCHW\",\n name='{}_test'.format(args.model_name),\n use_cudnn=(True if args.use_cudnn == 1 else False),\n cudnn_exhaustive_search=True\n )\n\n test_reader, test_examples = reader_utils.create_data_reader(\n test_model,\n name=\"test_reader\",\n input_data=args.test_data,\n )\n\n log.info(\"Testing set has {} examples\".format(test_examples))\n\n def test_input_fn(model):\n model_helper.AddVideoInput(\n model,\n test_reader,\n batch_size=batch_per_device,\n length_rgb=args.clip_length_rgb,\n clip_per_video=1,\n decode_type=0,\n random_mirror=False,\n random_crop=False,\n sampling_rate_rgb=args.sampling_rate_rgb,\n scale_h=args.scale_h,\n scale_w=args.scale_w,\n crop_size=args.crop_size,\n video_res_type=args.video_res_type,\n short_edge=min(args.scale_h, args.scale_w),\n num_decode_threads=args.num_decode_threads,\n do_multi_label=args.multi_label,\n num_of_class=args.num_labels,\n input_type=args.input_type,\n length_of=args.clip_length_of,\n sampling_rate_of=args.sampling_rate_of,\n frame_gap_of=args.frame_gap_of,\n do_flow_aggregation=args.do_flow_aggregation,\n flow_data_type=args.flow_data_type,\n get_rgb=(args.input_type == 0),\n get_optical_flow=(args.input_type == 1),\n get_video_id=args.get_video_id,\n use_local_file=args.use_local_file,\n )\n\n data_parallel_model.Parallelize_GPU(\n test_model,\n input_builder_fun=test_input_fn,\n forward_pass_builder_fun=create_model_ops,\n param_update_builder_fun=None,\n devices=gpus,\n optimize_gradient_memory=True,\n )\n workspace.RunNetOnce(test_model.param_init_net)\n workspace.CreateNet(test_model.net)\n\n workspace.RunNetOnce(train_model.param_init_net)\n workspace.CreateNet(train_model.net)\n\n epoch = 0\n # load the pre-trained model and reset epoch\n if args.load_model_path is not None:\n if args.db_type == 'pickle':\n model_loader.LoadModelFromPickleFile(\n train_model,\n args.load_model_path,\n use_gpu=True,\n root_gpu_id=gpus[0]\n )\n else:\n model_helper.LoadModel(\n args.load_model_path, args.db_type\n )\n # Sync the model params\n data_parallel_model.FinalizeAfterCheckpoint(\n train_model,\n GetCheckpointParams(train_model),\n )\n\n if args.is_checkpoint:\n # reset epoch. load_model_path should end with *_X.mdl,\n # where X is the epoch number\n last_str = args.load_model_path.split('_')[-1]\n if last_str.endswith('.mdl'):\n epoch = int(last_str[:-4])\n log.info(\"Reset epoch to {}\".format(epoch))\n else:\n log.warning(\"The format of load_model_path doesn't match!\")\n\n expname = \"%s_gpu%d_b%d_L%d_lr%.2f\" % (\n args.model_name,\n args.num_gpus,\n total_batch_size,\n args.num_labels,\n args.base_learning_rate,\n )\n explog = experiment_util.ModelTrainerLog(expname, args)\n\n # Run the training one epoch a time\n while epoch < args.num_epochs:\n epoch = RunEpoch(\n args,\n epoch,\n train_model,\n test_model,\n total_batch_size,\n 1,\n expname,\n explog\n )\n\n # Save the model for each epoch\n SaveModel(args, train_model, epoch)\n\n\ndef main():\n # TODO: use argv\n parser = argparse.ArgumentParser(\n description=\"Caffe2: simple video training\"\n )\n parser.add_argument(\"--model_name\", type=str, default='r2plus1d',\n help=\"Name of the model\")\n parser.add_argument(\"--model_depth\", type=int, default=18,\n help=\"Depth of the model\")\n parser.add_argument(\"--train_data\", type=str, default=None,\n help=\"Path to train data\",\n required=True)\n parser.add_argument(\"--test_data\", type=str, default=None,\n help=\"Path to test data\")\n parser.add_argument(\"--db_type\", type=str, default=\"minidb\",\n help=\"Database type to save the training model\")\n parser.add_argument(\"--gpus\", type=str,\n help=\"Comma separated list of GPU devices to use\")\n parser.add_argument(\"--num_gpus\", type=int, default=1,\n help=\"Number of GPU devices (instead of --gpus)\")\n parser.add_argument(\"--scale_h\", type=int, default=128,\n help=\"Scale image height to\")\n parser.add_argument(\"--scale_w\", type=int, default=171,\n help=\"Scale image width to\")\n parser.add_argument(\"--crop_size\", type=int, default=112,\n help=\"Input image size (to crop to)\")\n parser.add_argument(\"--num_decode_threads\", type=int, default=4,\n help=\"# of threads/GPU dedicated for video decoding\")\n parser.add_argument(\"--clip_length_rgb\", type=int, default=16,\n help=\"Length of input clips\")\n parser.add_argument(\"--sampling_rate_rgb\", type=int, default=1,\n help=\"Frame sampling rate\")\n parser.add_argument(\"--num_labels\", type=int, default=101,\n help=\"Number of labels\")\n parser.add_argument(\"--num_channels\", type=int, default=3,\n help=\"Number of channels\")\n parser.add_argument(\"--clip_length_of\", type=int, default=8,\n help=\"Frames of optical flow data\")\n parser.add_argument(\"--sampling_rate_of\", type=int, default=2,\n help=\"\")\n parser.add_argument(\"--frame_gap_of\", type=int, default=2,\n help=\"\")\n parser.add_argument(\"--input_type\", type=int, default=0,\n help=\"False=rgb, True=optical flow\")\n parser.add_argument(\"--flow_data_type\", type=int, default=0,\n help=\"0=Flow2C, 1=Flow3C, 2=FlowWithGray, 3=FlowWithRGB\")\n parser.add_argument(\"--do_flow_aggregation\", type=int, default=0,\n help=\"whether to aggregate optical flow across \"\n + \"multiple frames\")\n parser.add_argument(\"--get_video_id\", type=int, default=0,\n help=\"Output video id\")\n parser.add_argument(\"--batch_size\", type=int, default=32,\n help=\"Batch size, total over all GPUs\")\n parser.add_argument(\"--epoch_size\", type=int, default=110000,\n help=\"Number of videos/epoch, total over all machines\")\n parser.add_argument(\"--num_epochs\", type=int, default=50,\n help=\"Num epochs.\")\n parser.add_argument(\"--base_learning_rate\", type=float, default=0.003,\n help=\"Initial learning rate.\")\n parser.add_argument(\"--step_epoch\", type=int, default=10,\n help=\"Reducing learning rate every step_epoch.\")\n parser.add_argument(\"--gamma\", type=float, default=0.1,\n help=\"Learning rate decay factor.\")\n parser.add_argument(\"--display_iter\", type=int, default=10,\n help=\"Display information every # of iterations.\")\n parser.add_argument(\"--weight_decay\", type=float, default=0.005,\n help=\"Weight decay (L2 regularization)\")\n parser.add_argument(\"--cudnn_workspace_limit_mb\", type=int, default=64,\n help=\"CuDNN workspace limit in MBs\")\n parser.add_argument(\"--file_store_path\", type=str, default=\"/tmp\",\n help=\"Path to directory to use for saving checkpoints\")\n parser.add_argument(\"--save_model_name\", type=str, default=\"simple_c3d\",\n help=\"Save the trained model to a given name\")\n parser.add_argument(\"--load_model_path\", type=str, default=None,\n help=\"Load previously saved model to continue training\")\n parser.add_argument(\"--use_cudnn\", type=int, default=1,\n help=\"Use CuDNN\")\n parser.add_argument(\"--profiling\", type=int, default=0,\n help=\"Profile training time\")\n parser.add_argument(\"--pred_layer_name\", type=str, default=None,\n help=\"the prediction layer name\")\n parser.add_argument(\"--multi_label\", type=int, default=0,\n help=\"Multiple label training\")\n parser.add_argument(\"--channel_multiplier\", type=float, default=1.0,\n help=\"Channel multiplier\")\n parser.add_argument(\"--bottleneck_multiplier\", type=float, default=1.0,\n help=\"Bottleneck multiplier\")\n parser.add_argument(\"--use_dropout\", type=int, default=0,\n help=\"Use dropout at the prediction layer\")\n parser.add_argument(\"--conv1_temporal_stride\", type=int, default=1,\n help=\"Conv1 temporal striding\")\n parser.add_argument(\"--conv1_temporal_kernel\", type=int, default=3,\n help=\"Conv1 temporal kernel\")\n parser.add_argument(\"--video_res_type\", type=int, default=1,\n help=\"Video frame scaling option, 0: scaled by \"\n + \"height x width; 1: scaled by short edge\")\n parser.add_argument(\"--use_pool1\", type=int, default=0,\n help=\"use pool1 layer\")\n parser.add_argument(\"--jitter_scales\", type=str, default=\"128,160\", required=True,\n help=\"spatial scales jitter, separated by commas\")\n parser.add_argument(\"--use_local_file\", type=int, default=0,\n help=\"use local file\")\n parser.add_argument(\"--is_checkpoint\", type=int, default=1,\n help=\"0: pretrained_model is used as initalization\"\n + \"1: pretrained_model is used as a checkpoint\")\n parser.add_argument(\"--audio_input_3d\", type=int, default=0,\n help=\"is audio input 3d or 2d; 0 for 2d\")\n parser.add_argument(\"--g_blend\", type=int, default=0,\n help=\"use gradient-blending to train model\")\n parser.add_argument(\"--audio_weight\", type=float, default=0.0,\n help=\"g_blend weights for audio head\")\n parser.add_argument(\"--visual_weight\", type=float, default=0.0,\n help=\"g_blend weights for visual head\")\n parser.add_argument(\"--av_weight\", type=float, default=1.0,\n help=\"g_blend weights for av head\")\n args = parser.parse_args()\n\n log.info(args)\n\n assert model_builder.model_validation(\n args.model_name,\n args.model_depth,\n args.clip_length_of if args.input_type else args.clip_length_rgb,\n args.crop_size\n )\n\n Train(args)\n\n\nif __name__ == '__main__':\n workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])\n main()\n" ]
[ [ "numpy.concatenate", "numpy.asscalar", "numpy.mean", "numpy.empty" ] ]
hiryou/MLPractice
[ "72de4e54b7775b3e51415deef25dfba13f0df2bd" ]
[ "ludus/neural_net/hello_world/by_numpy.py" ]
[ "import os\nfrom datetime import datetime as dt\n\nimport numpy as np\n\n\"\"\"\nInspired by https://repl.it/repls/OrganicVainDoom#main.py\n\"\"\"\n\n\nclass NeuralNet(object):\n r\"\"\"\n This is the bare NN impl to help remind you of how forward/back propagation work in terms of code data structure\n\n Example: we want to map 2 neuron input -> 1 neuron output (input is a 2-d vector, output is a scalar). Let say we\n have 2 hidden neural layers, having 3 & 4 neurons in that order. Also, assume the number of training datapoint = 99.\n This gives us the following NN:\n\n [2 neurons] -> [3 neurons] -> [4 neurons] -> [1 neuron], or\n\n X W0 H0 W1 H1 W2 Y\n (2) -> (3) -> (4) -> (1)\n where:\n X is a [99x2] matrix: 99 datapoints, each is a 2-d vector\n W0 is a [2x3] matrix, W1 is a [3x4] matrix, etc\n H0 is a [99x3] matrix, H1 is a [99x4] matrix, etc\n Y is a [99x1] matrix: 99 datapoints, each is a 1-d vector\n\n To simplify the later for-loop computation, think of Y as H2 (to be added to the list of hidden layers). The idea is\n that weight matrix Wi goes with hidden layer Hi:\n\n X[99x2] | W0[2x3] H0[99x3] | W1[3x4] H1[99x4] | W2[4x1] H2[99x1] |\n (2) | -> (3) | -> (4) | -> (1) |\n\n Following is the data structure we would have:\n\n * h_layers = [3, 4, 1] # count of neurons in each Hi layer\n * W = [ W0[2x3], W1[3x4], W2[4x1] ] # list of weight matrix toward each Hi layer\n * H = [ H0[99x3], H1[99x4], H2[99x1] ] # 99 datapoints, each produces an activation vector Hi at each Hi layer\n\n Algo: for each training epoch (training iteration):\n * Forward:\n X[99x2] . W0[2x3] ~sigmoid -> H0[99x3]\n H0[99x3] . W1[3x4] ~sigmoid -> H1[99x4]\n H1[99x4] . W2[4x1] ~sigmoid -> H2[99x1]\n * Back propagation:\n * Hi -> delta_Hi\n delta_H = [ delta_H0[99x3], delta_H1[99x4], delta_H2[99x1] ] # track the big Delta at each H layer\n ---------\n (Y[99x1] - H2[99x1]) * sigmoid_prime(H2[99x1]) -> delta_H2[99x1]\n ---------\n delta_H2[99x1] . W2_transpose[1x4] * sigmoid_prime(H1[99x4]) -> delta_H1[99x4]\n delta_H1[99x4] . W1_transpose[4x3] * sigmoid_prime(H0[99x3]) -> delta_H0[99x3]\n * delta_Hi -> smoothing/updating Wi\n H1_transpose[4x99] . delta_H2[99x1] ~> W2[4x1]\n H0_transpose[3x99] . delta_H1[99x4] ~> W1[3x4]\n ---------\n X_transpose[2x99] . delta_H0[99x3] ~> W0[2x3]\n \"\"\"\n\n eta = 0.5\n\n def __init__(self, X, Y, epoch):\n self.X, self.Y = self.__scaled(X, Y)\n self.epoch = epoch\n\n # TODO make constructor-only param\n # len of this array = number of hidden layers; each num is # of neurons in each layer\n # to simplify algo, we consider output Y as last element of h_layers also\n Y_size = len(self.Y[0]) # neuron count of Y\n h_layers = [3, Y_size]\n\n self.W = list() # weight matrix for each layer: hidden layers & last output layer\n self.H = list() # matrix [#datapoint x neuron count] for each layer: hidden layers & last output layer\n\n X_size = len(self.X[0]) # neuron count of X\n left_neuron_cnt = X_size\n for neuron_cnt in h_layers:\n ww = np.random.randn(left_neuron_cnt, neuron_cnt)\n hh = np.full((len(self.X), neuron_cnt), -0.0001)\n self.W.append(ww)\n self.H.append(hh)\n left_neuron_cnt = neuron_cnt\n\n @staticmethod\n def sigmoid(s):\n return 1 / (1 + np.exp(-s))\n\n @staticmethod\n def sigmoid_prime(sig):\n return sig * (1 - sig)\n\n def get_train_loss(self):\n Y = self.__scaled_back(self.Y)\n H_last = self.__scaled_back(self.H[-1])\n return np.mean(\n np.square(Y - H_last)\n )\n pass\n\n def do_train(self):\n for i in range(self.epoch):\n self.__forward(self.X)\n self.__backward()\n print(\"--epoch={}, loss = {}\".format(i, self.get_train_loss()))\n\n def __scaled(self, X, Y):\n # normalize\n # max 24h a day\n # max score = 100\n return X/24, Y/100\n\n def __scaled_back(self, Y):\n # max score = 100\n return Y*100\n\n def __forward(self, X):\n left_mt = X\n for idx in range(len(self.H)):\n net_H_idx = np.dot(left_mt, self.W[idx])\n self.H[idx] = self.sigmoid(net_H_idx)\n left_mt = self.H[idx]\n\n return self.H[-1]\n\n def __backward(self):\n delta_H = [None for _ in range(len(self.H))]\n # delta: start initially from last layer H[-1] (output)\n delta_H[-1] = (self.Y - self.H[-1]) * self.sigmoid_prime(self.H[-1])\n # then delta: reversed loop from semi-last element (last hidden layer) -> 1st hidden layer\n for idx in range(len(self.H)-2, -1, -1):\n delta_H[idx] = delta_H[idx+1].dot(self.W[idx+1].T) * self.sigmoid_prime(self.H[idx])\n\n # update weights: from right most layer to one before 1st hidden layer\n for idx in range(len(self.W)-1, 0, -1):\n #self.W[idx] += (1 / self.train_cnt) * self.eta * self.H[idx-1].T.dot(delta_H[idx])\n self.W[idx] += self.H[idx-1].T.dot(delta_H[idx])\n # update weights: at layer W0 back to input\n #self.W[0] += (1 / self.train_cnt) * self.eta * self.X.T.dot(delta_H[0])\n self.W[0] += self.X.T.dot(delta_H[0])\n\n\nf = open(os.getcwd()+'/hello_world/study-sleep-grade.txt')\nlines = f.readlines()\nf.close()\n# print(lines)\n\nx_all = []\ny_all = []\nfor line in lines:\n p = line.strip().split(\", \")\n y = p[0].strip().split(' ')\n x = p[1].strip().split(' ')\n x_all.append(x)\n y_all.append(y)\n\nINP = np.array((x_all), dtype=float)\nY = np.array((y_all), dtype=float)\nnn = NeuralNet(INP, Y, epoch=100)\n\nprint(\"-------------------------\")\nprint(\"training ...\")\ntic = dt.now().microsecond\nnn.do_train()\ntoc = dt.now().microsecond\nprint(\"-------------------------\")\nprint(\"train loss = {:.2f}/100 (max score = 100)\".format(nn.get_train_loss()))\nprint(\"Train taken {} micro-secs\".format('{:,}'.format(toc - tic)))\n\n" ]
[ [ "numpy.square", "numpy.dot", "numpy.random.randn", "numpy.exp", "numpy.array" ] ]
SemiUnsupervisedLearning/DGMs_for_semi-unsupervised_learning
[ "a89c7be92403c3582f3bce534f982382f9733055" ]
[ "src/models/m2.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom models.model import model\n\nimport numpy as np\nnp.random.seed(1)\nimport utils.dgm as dgm\n\nimport tensorflow as tf\ntf.set_random_seed(1)\n\nfrom keras.layers import Dense, Activation\nfrom keras.models import Sequential\nfrom keras import initializers\n\n\n\"\"\" \nImplementation of semi-supervised DGMs from Kingma et al.(2014):\np(x,y,z) = p(z) * p(y) * p(x|y,z) \nInference network: q(z,y|x) = q(y|x) * q(z|y,x) \n\nHere we use keras layers to implement MLPs\n\"\"\"\n\n\nclass m2(model):\n\n def __init__(self, n_x, n_y, n_h, n_z=2, x_dist='Gaussian', mc_samples=1,\n alpha=0.1, l2_reg=0.3, ckpt=None,\n learning_paradigm='supervised', name='m2', prior=None,\n analytic_kl=False, output_dir=None, loss_balance='average',\n model_name='m2'):\n\n self.reg_term = tf.placeholder(tf.float32, shape=[], name='reg_term')\n if prior is None:\n self.prior = tf.constant(np.array([1.0 / n_y] * n_y),\n dtype=tf.float32, shape=[1, n_y],\n name='prior_p_y')\n else:\n self.prior = tf.constant(prior, dtype=tf.float32, shape=[1, n_y],\n name='prior_p_y')\n\n super(m2, self).__init__(n_x, n_y, n_h, n_z, x_dist, mc_samples, l2_reg,\n alpha, ckpt, learning_paradigm, name,\n analytic_kl, output_dir, loss_balance, model_name)\n \"\"\" TODO: add any general terms we want to have here \"\"\"\n\n\n def build_model(self):\n \"\"\" Define model components and variables \"\"\"\n self.create_placeholders()\n\n glorot_initializer = initializers.glorot_normal()\n normal_initializer = initializers.random_normal(stddev=0.001)\n\n ## Inference Networks:\n # Make q(y|x) network\n self.q_y_x_model = Sequential()\n self.q_y_x_model.add(Dense(self.intermediate_dim, name='hidden_1',\n kernel_initializer=glorot_initializer,\n bias_initializer=normal_initializer,\n input_dim=self.n_x))\n\n self.q_y_x_model.add(Activation('relu'))\n\n self.q_y_x_model.add(Dense(self.intermediate_dim, name='hidden_2',\n kernel_initializer=glorot_initializer,\n bias_initializer=normal_initializer))\n\n self.q_y_x_model.add(Activation('relu'))\n\n self.q_y_x_model.add(Dense(self.n_y, name='q_y_x_logit',\n kernel_initializer=normal_initializer,\n bias_initializer=normal_initializer))\n\n # Make q(z|x,y) network\n self.q_z_xy = Sequential()\n self.q_z_xy.add(Dense(self.intermediate_dim,\n kernel_initializer=glorot_initializer,\n bias_initializer=normal_initializer,\n input_dim=self.n_x + self.n_y))\n\n self.q_z_xy.add(Activation('relu'))\n self.q_z_xy.add(Dense(self.intermediate_dim,\n kernel_initializer=glorot_initializer,\n bias_initializer=normal_initializer))\n\n self.q_z_xy.add(Activation('relu'))\n\n # which results in two networks, one for mean and one for log variance\n self.q_z_xy_mean = Sequential()\n self.q_z_xy_mean.add(self.q_z_xy)\n self.q_z_xy_mean.add(Dense(self.n_z,\n kernel_initializer=normal_initializer,\n bias_initializer=normal_initializer))\n\n self.q_z_xy_log_var = Sequential()\n self.q_z_xy_log_var.add(self.q_z_xy)\n self.q_z_xy_log_var.add(Dense(self.n_z,\n kernel_initializer=normal_initializer,\n bias_initializer=normal_initializer))\n # Make p(x|y,z) network - if Gaussain: two, one for mean one for log var\n # - if Bernoulii: just one for prob for each entry\n\n if self.x_dist == 'Gaussian':\n self.p_x_yz_model = Sequential()\n self.p_x_yz_model.add(Dense(self.intermediate_dim,\n kernel_initializer=glorot_initializer,\n bias_initializer=normal_initializer,\n input_dim=self.n_z + self.n_y))\n self.p_x_yz_model.add(Activation('relu'))\n self.p_x_yz_model.add(Dense(self.intermediate_dim,\n kernel_initializer=glorot_initializer,\n bias_initializer=normal_initializer))\n self.p_x_yz_model.add(Activation('relu'))\n\n self.p_x_yz_mean = Sequential()\n self.p_x_yz_mean.add(self.p_x_yz_model)\n self.p_x_yz_mean.add(Dense(self.n_x,\n kernel_initializer=normal_initializer,\n bias_initializer=normal_initializer))\n self.p_x_yz_log_var = Sequential()\n self.p_x_yz_log_var.add(self.p_x_yz_model)\n self.p_x_yz_log_var.add(Dense(self.n_x,\n kernel_initializer=normal_initializer,\n bias_initializer=normal_initializer))\n #\n elif self.x_dist == 'Bernoulli':\n self.p_x_z_mean=Sequential()\n self.p_x_z_mean.add(Dense(self.intermediate_dim,\n kernel_initializer=glorot_initializer,\n bias_initializer=normal_initializer,\n input_dim=self.n_z + self.n_y))\n self.p_x_z_mean.add(Activation('relu'))\n self.p_x_z_mean.add(Dense(self.intermediate_dim,\n kernel_initializer=glorot_initializer,\n bias_initializer=normal_initializer))\n self.p_x_z_mean.add(Activation('relu'))\n self.p_x_z_mean.add(Dense(self.n_x,\n kernel_initializer=normal_initializer,\n bias_initializer=normal_initializer))\n\n def compute_loss(self):\n \"\"\" manipulate computed components and compute loss \"\"\"\n self.elbo_l = tf.reduce_mean(self.labelled_loss(self.x_l, self.y_l))\n self.qy_ll = tf.reduce_mean(self.qy_loss(self.x_l, self.y_l))\n self.elbo_u = tf.reduce_mean(self.unlabelled_loss(self.x_u))\n weight_priors = self.l2_reg * self.weight_prior() / self.reg_term\n if self.loss_balance == 'average':\n return -(self.elbo_l + self.elbo_u + self.alpha * self.qy_ll + weight_priors)\n elif self.loss_balance == 'weighted':\n return -((float(self.n_l)/float(self.n_train)) * self.elbo_l + (float(self.n_u)/float(self.n_train)) * self.elbo_u + self.alpha * self.qy_ll + weight_priors)\n\n def compute_unsupervised_loss(self):\n \"\"\" manipulate computed components and compute unsup loss \"\"\"\n self.elbo_u = tf.reduce_mean(self.unlabelled_loss(self.x_u))\n weight_priors = self.l2_reg * self.weight_prior() / self.reg_term\n return -(self.elbo_u + weight_priors)\n\n def compute_supervised_loss(self):\n \"\"\" manipulate computed components and compute loss \"\"\"\n self.elbo_l = tf.reduce_mean(self.labelled_loss(self.x_l, self.y_l))\n self.qy_ll = tf.reduce_mean(self.qy_loss(self.x_l, self.y_l))\n weight_priors = self.l2_reg * self.weight_prior() / self.reg_term\n return -(self.elbo_l + self.alpha * self.qy_ll + weight_priors)\n\n def labelled_loss(self, x, y):\n z_m, z_lv, z = self.sample_z(x, y)\n x_ = tf.tile(tf.expand_dims(x, 0), [self.mc_samples, 1, 1])\n y_ = tf.tile(tf.expand_dims(y, 0), [self.mc_samples, 1, 1])\n return self.lowerBound(x_, y_, z, z_m, z_lv)\n\n def unlabelled_loss(self, x):\n qy_l = self.predict(x)\n x_r = tf.tile(x, [self.n_y, 1])\n y_u = tf.reshape(tf.tile(tf.eye(self.n_y), [1, tf.shape(x)[0]]),\n [-1, self.n_y])\n n_u = tf.shape(x)[0]\n lb_u = tf.transpose(tf.reshape(self.labelled_loss(x_r, y_u),\n [self.n_y, n_u]))\n lb_u = tf.reduce_sum(qy_l * lb_u, axis=-1)\n qy_entropy = -tf.reduce_sum(qy_l * tf.log(qy_l + 1e-10), axis=-1)\n return lb_u + qy_entropy\n\n def lowerBound(self, x, y, z, z_m, z_lv):\n \"\"\" Compute densities and lower bound given all inputs \n of shape: (mc_samps X n_obs X n_dim)\n \"\"\"\n l_px = self.compute_logpx(x, y, z)\n l_py = dgm.multinoulliLogDensity(y, self.prior, on_priors=True)\n l_pz = dgm.standardNormalLogDensity(z)\n l_qz = dgm.gaussianLogDensity(z, z_m, z_lv)\n return tf.reduce_mean(l_px + l_py + l_pz - l_qz, axis=0)\n\n def qy_loss(self, x, y=None):\n y_ = self.q_y_x_model(x)\n if y is None:\n return dgm.multinoulliUniformLogDensity(y_)\n else:\n return dgm.multinoulliLogDensity(y, y_)\n\n def sample_z(self, x, y, n_samples=None):\n if n_samples == None:\n n_samples = self.mc_samples\n l_qz_in = tf.concat([x, y], axis=-1)\n z_mean = dgm.forwardPass(self.q_z_xy_mean, l_qz_in)\n z_log_var = dgm.forwardPass(self.q_z_xy_log_var, l_qz_in)\n return z_mean, z_log_var, dgm.sampleNormal(z_mean, z_log_var,\n n_samples)\n\n def compute_logpx(self, x, y, z):\n px_in = tf.reshape(tf.concat([y, z], axis=-1), [-1, self.n_y + self.n_z])\n if self.x_dist == 'Gaussian':\n mean, log_var = self.p_x_yz_mean(px_in), self.p_x_yz_log_var(px_in)\n mean = tf.reshape(mean, [self.mc_samples, -1, self.n_x])\n log_var = tf.reshape(log_var, [self.mc_samples, -1, self.n_x])\n return dgm.gaussianLogDensity(x, mean, log_var)\n elif self.x_dist == 'Bernoulli':\n logits = self.p_x_z_mean(px_in)\n logits = tf.reshape(logits, [self.mc_samples, -1, self.n_x])\n return dgm.bernoulliLogDensity(x, logits)\n\n def predict(self, x):\n \"\"\" predict y for given x with q(y|x) \"\"\"\n return tf.nn.softmax(self.q_y_x_model(x))\n\n def encode(self, x, y=None, n_iters=1):\n \"\"\" encode a new example into z-space (labelled or unlabelled) \"\"\"\n if y is None:\n y = tf.one_hot(tf.argmax(self.predict(x)))\n z_mean, z_log_var, z = self.sample_z(x, y, n_iters)\n return z_mean, z_log_var, z\n\n #def training_fd(self, x_l, y_l, x_u):\n # return {self.x_l: x_l, self.y_l: y_l, self.x_u: x_u, self.x: x_l, self.y: y_l, self.reg_term:self.n_train}\n\n def _printing_feed_dict(self, Data, x_l, x_u, y, eval_samps, binarize):\n fd = super(m2, self)._printing_feed_dict(Data, x_l, x_u, y,\n eval_samps, binarize)\n fd[self.reg_term] = self.n_train\n return fd\n\n def print_verbose1(self, epoch, fd, sess):\n total, elbo_l, elbo_u, qy_ll, weight_priors = \\\n sess.run([self.compute_loss(), self.elbo_l, self.elbo_u,\n self.qy_ll, weight_priors], fd)\n train_acc, test_acc = sess.run([self.train_acc, self.test_acc], fd) \n print(\"Epoch: {}: Total: {:5.3f}, labelled: {:5.3f}, Unlabelled: {:5.3f}, q_y_ll: {:5.3f}, weight_priors: {:5.3f}, Training: {:5.3f}, Testing: {:5.3f}\".format(epoch, total, elbo_l, elbo_u, qy_ll, weight_priors, train_acc, test_acc)) \n\n def print_verbose2(self, epoch, fd, sess):\n total, elbo_l, elbo_u = sess.run([self.compute_loss(), self.elbo_l, self.elbo_u] ,fd)\n train_acc, test_acc = sess.run([self.train_acc, self.test_acc], fd) \n print(\"Epoch: {}: Total: {:5.3f}, labelled: {:5.3f}, Unlabelled: {:5.3f}, Training: {:5.3f}, Testing: {:5.3f}\".format(epoch, total, elbo_l, elbo_u, train_acc, test_acc)) \n\n def print_verbose3(self, epoch):\n print(\"Epoch: {}: Total: {:5.3f}, Unlabelled: {:5.3f}, KL_y: {:5.3f}, TrainingAc: {:5.3f}, TestingAc: {:5.3f}, TrainingK: {:5.3f}, TestingK: {:5.3f}\".format(epoch, sum(self.curve_array[epoch][1:3]), self.curve_array[epoch][2], self.curve_array[epoch][3], self.curve_array[epoch][0], self.curve_array[epoch][6], self.curve_array[epoch][12], self.curve_array[epoch][13])) \n" ]
[ [ "tensorflow.concat", "tensorflow.constant", "numpy.random.seed", "tensorflow.reduce_mean", "tensorflow.shape", "tensorflow.reduce_sum", "tensorflow.reshape", "tensorflow.placeholder", "tensorflow.expand_dims", "tensorflow.eye", "tensorflow.log", "tensorflow.set_random_seed", "numpy.array", "tensorflow.tile" ] ]
Leinadh/PeruvianImageGenerator
[ "9bf11125f4ea3090e217cf15866ec19ce944f9c6" ]
[ "evaluation/tsne_analysis_baseline/tsne_evaluation_utils/grid.py" ]
[ "import logging\nimport os\nfrom glob import glob\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom MulticoreTSNE import MulticoreTSNE as TSNE\nfrom PIL import Image\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\n\nlogger = logging.getLogger(__name__)\n\n\ndef build(paths, frow=60, fcol=60, perplexity=30, n_iter=1000, jitter_win=0, pca_components=50,\n output_dir=\"./output\", save_data=True, save_scatter=True, use_features=False):\n os.makedirs(output_dir, exist_ok=True)\n df, image_shape, tsne_input = load_data(paths, use_features)\n tsne_results = apply_tsne(df, tsne_input, perplexity, n_iter, pca_components=pca_components)\n logger.info(\"tsne finished: %s\", tsne_results.shape)\n df['tsne_x_raw'], df['tsne_y_raw'] = tsne_results[:, 0], tsne_results[:, 1]\n norm = StandardScaler().fit_transform(df[[\"tsne_x_raw\", \"tsne_y_raw\"]])\n df['tsne_x'], df['tsne_y'] = norm[:, 0], norm[:, 1]\n if save_scatter:\n generate_scatter(df, output_dir)\n df = generate_images(fcol, frow, image_shape, df, output_dir=output_dir, jitter_win=jitter_win)\n if save_data:\n logger.info(\"saving data.csv\")\n df.to_csv(os.path.join(output_dir, \"data.csv\"), index=False)\n logger.info(\"finished\")\n return df, image_shape\n\n\ndef generate_images(fcol, frow, image_shape, df, output_dir=None, jitter_win=None):\n df[\"tsne_x_int\"] = ((fcol - 1) * (df[\"tsne_x\"] - np.min(df[\"tsne_x\"])) / np.ptp(df[\"tsne_x\"])).astype(int)\n df[\"tsne_y_int\"] = ((frow - 1) * (df[\"tsne_y\"] - np.min(df[\"tsne_y\"])) / np.ptp(df[\"tsne_y\"])).astype(int)\n all_possibilities = []\n if jitter_win:\n yy, xx = np.mgrid[-jitter_win:jitter_win + 1, -jitter_win:jitter_win + 1]\n all_possibilities = np.vstack([xx.reshape(-1), yy.reshape(-1)]).T.tolist()\n all_possibilities.sort(key=lambda x: (max(abs(x[0]), abs(x[1])), abs(x[0]) + abs(x[1])))\n all_possibilities.pop(0)\n\n for model_name, group in df.groupby(by=\"name\"):\n ordered_images = np.zeros((frow, fcol, *image_shape))\n overlap, show = 0, 0\n for i, row in group.iterrows():\n x, y = row[\"tsne_x_int\"], row[\"tsne_y_int\"]\n possibilities = list(all_possibilities)\n while len(possibilities) and np.sum(ordered_images[x, y]) != 0:\n dx, dy = possibilities.pop(0)\n x, y = np.clip(x + dx, 0, fcol - 1), np.clip(y + dy, 0, frow - 1)\n if np.sum(ordered_images[x, y]) == 0:\n show += 1\n ordered_images[x, y] = row[get_features(image_shape)].values.reshape((-1, *image_shape))\n else:\n overlap += 1\n logger.info(\"overlap for %s: %d, show: %d\", model_name, overlap, show)\n ordered_images = np.flipud(np.transpose(ordered_images, (1, 0, 2, 3, 4))).reshape(frow * fcol, *image_shape)\n\n grid = (ordered_images.reshape(frow, fcol, *image_shape).swapaxes(1, 2)\n .reshape(image_shape[0] * frow, image_shape[1] * fcol, image_shape[2]))\n logger.info(\"tsne grid shape: %s\", grid.shape)\n plt.figure(figsize=(20, 20))\n plt.imsave(os.path.join(output_dir, f\"tsne_{model_name}.png\"), grid)\n return df\n\n\ndef apply_tsne(df, data, perplexity, n_iter, learning_rate=200, pca_components=None, tsne_jobs=4):\n if pca_components:\n logger.info(\"shape before pca: %s\", data.shape)\n pca = PCA(n_components=pca_components, svd_solver='randomized')\n data = pca.fit_transform(data)\n pca_cols = [f\"pca_{c}\" for c in range(pca.n_components)]\n df[pca_cols] = pd.DataFrame(data, index=df.index)\n logger.info(\"shape after pca: %s\", data.shape)\n tsne = TSNE(n_components=2, verbose=1, perplexity=perplexity, n_iter=n_iter, learning_rate=learning_rate,\n n_jobs=tsne_jobs)\n return tsne.fit_transform(data)\n\n\ndef load_features(image_path, extensions=(\"npz\", \"npy\")):\n base_path = os.path.splitext(image_path)[0]\n for ext in extensions:\n f = f\"{base_path}.{ext}\"\n if os.path.exists(f):\n data = np.load(f)\n if ext == \"npz\":\n data = data[\"arr_0\"]\n return data\n return None\n\n\ndef load_data(paths, use_features):\n df = pd.DataFrame()\n image_shape = None\n all_features = []\n for path in paths:\n logger.info(\"loading images from %s\", path)\n name = os.path.basename(path)\n for f in glob(os.path.join(path, \"*.png\")):\n if use_features:\n features = load_features(f)\n if features is None:\n logger.warning(\"features not found for %s\", f)\n continue\n all_features.append(features)\n image = np.array(Image.open(f))/255\n image_shape = image.shape\n df_new = pd.DataFrame(image.reshape((-1, np.prod(image_shape))))\n df_new[\"name\"] = name\n df_new[\"file\"] = f\n df = df.append(df_new)\n# with open(os.path.basename(path).split('/')[-1]+'.npy', 'wb') as f:\n# np.save(f, all_features)\n# all_features = []\n logger.info(\"loaded %d images with shape %s\", len(df), image_shape)\n tsne_input = np.array(all_features) if use_features else get_image_data(df, image_shape)\n \n return df.reset_index(), image_shape, tsne_input\n\n\ndef generate_scatter(df, output_dir):\n plt.figure(figsize=(10, 10))\n sns.scatterplot(x=\"tsne_x\", y=\"tsne_y\", hue=\"name\", data=df, legend=\"full\", alpha=0.2)\n plt.savefig(os.path.join(output_dir, f\"models_scatter.png\"))\n\n\ndef get_features(image_shape):\n return list(range(np.prod(image_shape)))\n\n\ndef get_image_data(df, image_shape):\n return df[get_features(image_shape)].values\n" ]
[ [ "numpy.sum", "numpy.clip", "numpy.min", "numpy.ptp", "pandas.DataFrame", "numpy.prod", "numpy.transpose", "numpy.load", "sklearn.preprocessing.StandardScaler", "numpy.array", "numpy.zeros", "sklearn.decomposition.PCA", "matplotlib.pyplot.figure" ] ]
JohnHBrock/NMAP-Cluster
[ "ee3eb57ebe9a70b0509a9763d1ae5f9c5461cb19" ]
[ "clusteringnmap/validation.py" ]
[ "from sklearn.metrics import silhouette_samples, silhouette_score\nfrom scipy.spatial.distance import pdist\nimport numpy as np\n\ndef validate_clusters(vectors, labels, ignore_single_point_clusters=False):\n \"\"\"\n\n :param vectors:\n :param labels:\n :return: (adjusted silhouette score average (removing single point clusters) and all label silhouette scores)\n \"\"\"\n overall_sum = 0\n overall_count = 0\n has_single_point_clusters = 0\n per_sample = silhouette_samples(vectors, labels)\n per_cluster = {}\n for cluster_id in set(labels):\n total = 0\n count = 0\n for sample_index in xrange(per_sample.shape[0]):\n if labels[sample_index] == cluster_id:\n total += per_sample[sample_index]\n count += 1\n per_cluster[cluster_id] = float(total) / float(count)\n if count > 1 or not ignore_single_point_clusters:\n overall_sum += float(total) / float(count)\n overall_count += 1\n return float(overall_sum) / float(overall_count), per_cluster\n\n\ndef get_average_distance_per_cluster(vectors, labels):\n overall_sum = 0\n overall_count = 0\n per_cluster = {}\n for cluster_id in set(labels):\n total = 0\n count = 0\n c_vecs = []\n\n for sample_index in xrange(vectors.shape[0]):\n if labels[sample_index] == cluster_id:\n c_vecs.append(vectors[sample_index, :])\n\n c_vecs = np.vstack(c_vecs)\n c_distances = pdist(c_vecs)\n if c_distances.shape[0] > 0:\n mean_distances = c_distances.mean()\n else:\n mean_distances = 0\n overall_sum += c_distances.sum()\n overall_count += c_vecs.shape[0]\n\n per_cluster[cluster_id] = mean_distances\n\n return float(overall_sum) / float(overall_count), per_cluster" ]
[ [ "numpy.vstack", "scipy.spatial.distance.pdist", "sklearn.metrics.silhouette_samples" ] ]
Padfoot-ted/toad
[ "0b6973e910c337b779b6c95087f6d24b89a20eed" ]
[ "toad/preprocessing/partition_test.py" ]
[ "import pytest\nimport numpy as np\nimport pandas as pd\n\n\nfrom .partition import TimePartition, ValuePartition\n\n\nnp.random.seed(1)\n\nab = np.array(list('ABCDEFG'))\n\nhistory = np.full(500, np.datetime64('2020-03-01')) - np.random.randint(30, 400, size = 500)\nopen_time = np.full(500, np.datetime64('2020-03-01')) - np.random.randint(30, size = 500)\nA = ab[np.random.choice(7, 500)]\nB = np.random.randint(10, size = 500).astype(float)\nB[np.random.choice(500, 10)] = np.nan\n\n\ndf = pd.DataFrame({\n 'history': history,\n 'open_time': open_time,\n 'A': A,\n 'B': B,\n})\n\n\ndef test_timepartition():\n tp = TimePartition('open_time', 'history', ['90d', '180d'])\n mask, suffix = next(tp.partition(df))\n assert mask.sum() == 93\n\n\ndef test_timepartition_all():\n tp = TimePartition('open_time', 'history', ['all'])\n mask, suffix = next(tp.partition(df))\n assert mask.sum() == 500\n\ndef test_valuepartition():\n vp = ValuePartition('A')\n mask, suffix = next(vp.partition(df))\n assert mask.sum() == 67\n\ndef test_valuepartition_with_na():\n vp = ValuePartition('B')\n s = 0\n for mask, suffix in vp.partition(df):\n s += mask.sum()\n \n assert s == 500" ]
[ [ "numpy.random.seed", "numpy.random.choice", "pandas.DataFrame", "numpy.datetime64", "numpy.random.randint" ] ]
lazidoca/DeOldify
[ "047799721d190b2555a3e22834a25c88cff6d52f" ]
[ "app_utils.py" ]
[ "import os\nimport random\nimport shutil\nfrom uuid import uuid4\n\nimport requests\n\nimport _thread as thread\nimport numpy as np\nimport skimage\nfrom PIL import Image\nfrom skimage.filters import gaussian\n\n\ndef compress_image(image, path_original):\n size = 1920, 1080\n width = 1920\n height = 1080\n\n name = os.path.basename(path_original).split('.')\n first_name = os.path.join(os.path.dirname(path_original), name[0] + '.jpg')\n\n if image.size[0] > width and image.size[1] > height:\n image.thumbnail(size, Image.ANTIALIAS)\n image.save(first_name, quality=85)\n elif image.size[0] > width:\n wpercent = (width / float(image.size[0]))\n height = int((float(image.size[1]) * float(wpercent)))\n image = image.resize((width, height), PIL.Image.ANTIALIAS)\n image.save(first_name, quality=85)\n elif image.size[1] > height:\n wpercent = (height / float(image.size[1]))\n width = int((float(image.size[0]) * float(wpercent)))\n image = image.resize((width, height), Image.ANTIALIAS)\n image.save(first_name, quality=85)\n else:\n image.save(first_name, quality=85)\n\n\ndef convertToJPG(path_original):\n img = Image.open(path_original)\n name = os.path.basename(path_original).split('.')\n first_name = os.path.join(os.path.dirname(path_original), name[0] + '.jpg')\n\n if img.format == \"JPEG\":\n image = img.convert('RGB')\n compress_image(image, path_original)\n img.close()\n\n elif img.format == \"GIF\":\n i = img.convert(\"RGBA\")\n bg = Image.new(\"RGBA\", i.size)\n image = Image.composite(i, bg, i)\n compress_image(image, path_original)\n img.close()\n\n elif img.format == \"PNG\":\n try:\n image = Image.new(\"RGB\", img.size, (255, 255, 255))\n image.paste(img, img)\n compress_image(image, path_original)\n except ValueError:\n image = img.convert('RGB')\n compress_image(image, path_original)\n\n img.close()\n\n elif img.format == \"BMP\":\n image = img.convert('RGB')\n compress_image(image, path_original)\n img.close()\n\n\ndef download(url, filepath):\n print(f'Download {url}')\n print('It may take a while. Please wait ...')\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(filepath, \"wb\") as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(f'Finish download {url}')\n\n\ndef blur(image, x0, x1, y0, y1, sigma=1, multichannel=True):\n y0, y1 = min(y0, y1), max(y0, y1)\n x0, x1 = min(x0, x1), max(x0, x1)\n im = image.copy()\n sub_im = im[y0:y1, x0:x1].copy()\n blur_sub_im = gaussian(sub_im, sigma=sigma, multichannel=multichannel)\n blur_sub_im = np.round(255 * blur_sub_im)\n im[y0:y1, x0:x1] = blur_sub_im\n return im\n\n\n# def download(url, filename):\n# data = requests.get(url).content\n# with open(filename, 'wb') as handler:\n# handler.write(data)\n\n# return filename\n\n\ndef generate_random_filename(upload_directory, extension):\n filename = str(uuid4())\n filename = os.path.join(upload_directory, filename + \".\" + extension)\n return filename\n\n\ndef clean_me(filename):\n if os.path.exists(filename):\n try:\n os.remove(filename)\n except:\n pass\n\n\ndef clean_all(files):\n for me in files:\n clean_me(me)\n\n\ndef create_directory(path):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n\ndef get_model_bin(url, output_path):\n if not os.path.exists(output_path):\n create_directory(output_path)\n download(url, output_path)\n\n return output_path\n\n\n#model_list = [(url, output_path), (url, output_path)]\ndef get_multi_model_bin(model_list):\n for m in model_list:\n thread.start_new_thread(get_model_bin, m)\n" ]
[ [ "numpy.round" ] ]
jordanhoare/pybot-lostark
[ "88a1f25255fbde9ccf55fd0e84a3dfe908cb4fb8" ]
[ "draft/core/vision.py" ]
[ "import cv2 as cv\nimport numpy as np\n\n\nclass Vision:\n # constants\n TRACKBAR_WINDOW = \"Trackbars\"\n\n # properties\n needle_img = None\n needle_w = 0\n needle_h = 0\n method = None\n\n # constructor\n def __init__(self, needle_img_path, method=cv.TM_CCOEFF_NORMED):\n # load the image we're trying to match\n # https://docs.opencv.org/4.2.0/d4/da8/group__imgcodecs.html\n self.needle_img = cv.imread(needle_img_path)\n\n # Save the dimensions of the needle image\n self.needle_w = self.needle_img.shape[1]\n self.needle_h = self.needle_img.shape[0]\n\n # There are 6 methods to choose from:\n # TM_CCOEFF, TM_CCOEFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_SQDIFF, TM_SQDIFF_NORMED\n self.method = method\n\n # given a list of [x, y, w, h] rectangles returned by find(), convert those into a list of\n # [x, y] positions in the center of those rectangles where we can click on those found items\n def get_click_points(self, rectangles):\n points = []\n\n # Loop over all the rectangles\n for (x, y, w, h) in rectangles:\n # Determine the center position\n center_x = x + int(w / 2)\n center_y = y + int(h / 2)\n # Save the points\n points.append((center_x, center_y))\n\n return points\n\n def find(self, haystack_img, threshold=0.5, max_results=10):\n ## run the OpenCV algorithm\n try:\n result = cv.matchTemplate(haystack_img, self.needle_img, self.method)\n except:\n print(\"Image not found - matchTemplate error\")\n exit()\n\n # Get the all the positions from the match result that exceed our threshold\n locations = np.where(result >= threshold)\n locations = list(zip(*locations[::-1]))\n # print(locations)\n\n # if we found no results, return now. this reshape of the empty array allows us to\n # concatenate together results without causing an error\n if not locations:\n return np.array([], dtype=np.int32).reshape(0, 4)\n\n # You'll notice a lot of overlapping rectangles get drawn. We can eliminate those redundant\n # locations by using groupRectangles().\n # First we need to create the list of [x, y, w, h] rectangles\n rectangles = []\n for loc in locations:\n rect = [int(loc[0]), int(loc[1]), self.needle_w, self.needle_h]\n # Add every box to the list twice in order to retain single (non-overlapping) boxes\n rectangles.append(rect)\n rectangles.append(rect)\n # Apply group rectangles.\n # The groupThreshold parameter should usually be 1. If you put it at 0 then no grouping is\n # done. If you put it at 2 then an object needs at least 3 overlapping rectangles to appear\n # in the result. I've set eps to 0.5, which is:\n # \"Relative difference between sides of the rectangles to merge them into a group.\"\n rectangles, weights = cv.groupRectangles(rectangles, groupThreshold=1, eps=0.5)\n # print(rectangles)\n\n return rectangles\n\n # given a list of [x, y, w, h] rectangles and a canvas image to draw on, return an image with\n # all of those rectangles drawn\n def draw_rectangles(self, haystack_img, rectangles):\n # these colors are actually BGR\n line_color = (0, 255, 0)\n line_type = cv.LINE_4\n\n for (x, y, w, h) in rectangles:\n # determine the box positions\n top_left = (x, y)\n bottom_right = (x + w, y + h)\n # draw the box\n cv.rectangle(\n haystack_img, top_left, bottom_right, line_color, lineType=line_type\n )\n\n return haystack_img\n\n # given a list of [x, y] positions and a canvas image to draw on, return an image with all\n # of those click points drawn on as crosshairs\n def draw_crosshairs(self, haystack_img, points):\n # these colors are actually BGR\n marker_color = (255, 0, 255)\n marker_type = cv.MARKER_CROSS\n\n for (center_x, center_y) in points:\n # draw the center point\n cv.drawMarker(haystack_img, (center_x, center_y), marker_color, marker_type)\n\n return haystack_img\n" ]
[ [ "numpy.array", "numpy.where" ] ]
whatbeg/Data-Analysis
[ "b1f878564448527ca730f6d869dc3cb0d9b678d7" ]
[ "CNN_for_Mnist/SK_0.2Sigmoid.py" ]
[ "from __future__ import print_function\nimport numpy as np\nimport preprocessing as proc\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\n# Training settings\nparser = argparse.ArgumentParser(description='BASE Model')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\nparser.add_argument('--epochs', type=int, default=20, metavar='N',\n help='number of epochs to train (default: 20)')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--seed', type=int, default=0, metavar='S',\n help='random seed (default: 0)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--SKs', type=float, default=0.2, metavar='SK',\n help='use how many channels times every layer (except last layer)')\nparser.add_argument('--datastart', type=int, default=0, metavar='START',\n help='training set start sample index')\nparser.add_argument('--dataend', type=int, default=60000, metavar='END',\n help='training set end sample index')\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1_1 = nn.Conv2d(1, int(20*args.SKs), kernel_size=(3, 3), stride=(1, 1), padding=0)\n self.conv1_2 = nn.Conv2d(int(20*args.SKs), int(20*args.SKs), kernel_size=(3, 3), stride=(1, 1), padding=0)\n self.conv2 = nn.Conv2d(int(20*args.SKs), int(50*args.SKs), kernel_size=(3, 3), stride=(1, 1), padding=0)\n self.fc1 = nn.Linear(int(5*5*50*args.SKs), 500)\n self.fc2 = nn.Linear(500, 10)\n self.bn1_1 = nn.BatchNorm2d(int(20*args.SKs))\n self.bn1_2 = nn.BatchNorm2d(int(20*args.SKs))\n self.bn2 = nn.BatchNorm2d(int(50*args.SKs))\n self.bn3 = nn.BatchNorm1d(500)\n self.drop = nn.Dropout(p=0.5)\n\n def forward(self, x):\n x = F.sigmoid(self.bn1_1(self.conv1_1(x)))\n x = F.sigmoid(self.bn1_2(self.conv1_2(x)))\n x = F.max_pool2d(x, 2)\n x = self.conv2(x)\n x = F.max_pool2d(self.bn2(x), 2)\n x = x.view(-1, int(5*5*50*args.SKs))\n x = self.fc1(x)\n x = F.sigmoid(self.bn3(x))\n x = self.fc2(x)\n return F.log_softmax(x)\n\nmodel = Net()\n# print(model)\nif args.cuda:\n model.cuda()\n\noptimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n\ndef generate_data(data, label, batchSize, data_type='train', shuffle=True):\n assert batchSize > 0\n data_len = data.shape[0]\n total_batch = data_len / batchSize + (1 if data_len % batchSize != 0 else 0)\n if shuffle:\n indices = np.random.permutation(data_len)\n data = data[indices]\n label = label[indices]\n for idx in range(total_batch):\n start = idx * batchSize\n end = min((idx + 1) * batchSize, data_len)\n if data_type == 'train':\n yield proc.Normalize(data[start:end], (proc.TRAIN_MEAN,)*(end-start),\n (proc.TRAIN_STD,)*(end-start)), label[start:end]\n else:\n yield proc.Normalize(data[start:end], (proc.TRAIN_MEAN,)*(end-start),\n (proc.TRAIN_STD,)*(end-start)), label[start:end]\n\n\ndef train(epoch, train_data, train_labels, data_start, data_end):\n model.train() # set to training mode\n batch_idx = 1\n for (_data, _target) in generate_data(train_data[data_start:data_end], train_labels[data_start:data_end], batchSize=args.batch_size, shuffle=True):\n data = torch.from_numpy(_data)\n target = torch.from_numpy(_target).long()\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data), Variable(target)\n optimizer.zero_grad()\n output = model.forward(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{:5d}/{} ({:2d}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), data_end-data_start,\n int(100. * batch_idx * len(data) / (data_end-data_start)), loss.data[0]))\n batch_idx += 1\n\n\ndef test(test_data, test_labels):\n model.eval() # set to evaluation mode\n test_loss = 0\n correct = 0\n for (data, target) in generate_data(test_data, test_labels,\n batchSize=args.batch_size, shuffle=True):\n data = torch.from_numpy(data)\n target = torch.from_numpy(target).long()\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n data, target = Variable(data, volatile=True), Variable(target)\n output = model.forward(data)\n test_loss += F.nll_loss(output, target).data[0]\n pred = output.data.max(1)[1] # get the index of the max log-probability\n correct += pred.eq(target.data).cpu().sum()\n\n test_loss = test_loss\n test_loss /= test_data.shape[0] # loss function already averages over batch size\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)\\n'.format(\n test_loss, correct, test_data.shape[0],\n 100. * correct / test_data.shape[0]))\n\n\ndef go():\n train_images, train_labels = proc.get_data(\"train\")\n test_images, test_labels = proc.get_data(\"test\")\n for epoch in range(1, args.epochs + 1):\n train(epoch, train_images, train_labels, args.datastart, args.dataend)\n test(test_images, test_labels)\n\nif __name__ == '__main__':\n go()\n" ]
[ [ "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.cuda.manual_seed", "torch.nn.functional.log_softmax", "torch.nn.functional.nll_loss", "torch.manual_seed", "torch.from_numpy", "torch.nn.Linear", "numpy.random.permutation", "torch.cuda.is_available", "torch.nn.functional.max_pool2d", "torch.autograd.Variable" ] ]
maziarraissi/BSNNs
[ "f0a14d1b03cc17e89642106cf3ebba76ddf6bb0c" ]
[ "BlackScholesBarenblatt100D.py" ]
[ "\"\"\"\n@author: Maziar Raissi\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom FBSNNs import FBSNN\nimport matplotlib.pyplot as plt\nfrom plotting import newfig, savefig\n\nclass BlackScholesBarenblatt(FBSNN):\n def __init__(self, Xi, T,\n M, N, D,\n layers):\n \n super().__init__(Xi, T,\n M, N, D,\n layers)\n \n def phi_tf(self, t, X, Y, Z): # M x 1, M x D, M x 1, M x D\n return 0.05*(Y - tf.reduce_sum(X*Z, 1, keepdims = True)) # M x 1\n \n def g_tf(self, X): # M x D\n return tf.reduce_sum(X**2, 1, keepdims = True) # M x 1\n\n def mu_tf(self, t, X, Y, Z): # M x 1, M x D, M x 1, M x D\n return super().mu_tf(t, X, Y, Z) # M x D\n \n def sigma_tf(self, t, X, Y): # M x 1, M x D, M x 1\n return 0.4*tf.matrix_diag(X) # M x D x D\n \n ###########################################################################\n\nif __name__ == \"__main__\":\n \n M = 100 # number of trajectories (batch size)\n N = 50 # number of time snapshots\n D = 100 # number of dimensions\n \n layers = [D+1] + 4*[256] + [1]\n\n Xi = np.array([1.0,0.5]*int(D/2))[None,:]\n T = 1.0\n \n # Training\n model = BlackScholesBarenblatt(Xi, T,\n M, N, D,\n layers)\n \n model.train(N_Iter = 2*10**4, learning_rate=1e-3)\n model.train(N_Iter = 3*10**4, learning_rate=1e-4)\n model.train(N_Iter = 3*10**4, learning_rate=1e-5)\n model.train(N_Iter = 2*10**4, learning_rate=1e-6)\n \n ##### PLOT RESULTS\n \n t_test, W_test = model.fetch_minibatch()\n \n X_pred, Y_pred = model.predict(Xi, t_test, W_test)\n \n def u_exact(t, X): # (N+1) x 1, (N+1) x D\n r = 0.05\n sigma_max = 0.4\n return np.exp((r + sigma_max**2)*(T - t))*np.sum(X**2, 1, keepdims = True) # (N+1) x 1\n \n Y_test = np.reshape(u_exact(np.reshape(t_test[0:M,:,:],[-1,1]), np.reshape(X_pred[0:M,:,:],[-1,D])),[M,-1,1])\n \n samples = 5\n \n plt.figure()\n plt.plot(t_test[0:1,:,0].T,Y_pred[0:1,:,0].T,'b',label='Learned $u(t,X_t)$')\n plt.plot(t_test[0:1,:,0].T,Y_test[0:1,:,0].T,'r--',label='Exact $u(t,X_t)$')\n plt.plot(t_test[0:1,-1,0],Y_test[0:1,-1,0],'ko',label='$Y_T = u(T,X_T)$')\n \n plt.plot(t_test[1:samples,:,0].T,Y_pred[1:samples,:,0].T,'b')\n plt.plot(t_test[1:samples,:,0].T,Y_test[1:samples,:,0].T,'r--')\n plt.plot(t_test[1:samples,-1,0],Y_test[1:samples,-1,0],'ko')\n\n plt.plot([0],Y_test[0,0,0],'ks',label='$Y_0 = u(0,X_0)$')\n \n plt.xlabel('$t$')\n plt.ylabel('$Y_t = u(t,X_t)$')\n plt.title('100-dimensional Black-Scholes-Barenblatt')\n plt.legend()\n \n # savefig('./figures/BSB_Apr18_50', crop = False)\n \n \n errors = np.sqrt((Y_test-Y_pred)**2/Y_test**2)\n mean_errors = np.mean(errors,0)\n std_errors = np.std(errors,0)\n \n plt.figure()\n plt.plot(t_test[0,:,0],mean_errors,'b',label='mean')\n plt.plot(t_test[0,:,0],mean_errors+2*std_errors,'r--',label='mean + two standard deviations')\n plt.xlabel('$t$')\n plt.ylabel('relative error')\n plt.title('100-dimensional Black-Scholes-Barenblatt')\n plt.legend()\n \n # savefig('./figures/BSB_Apr18_50_errors', crop = False)" ]
[ [ "matplotlib.pyplot.legend", "numpy.sqrt", "matplotlib.pyplot.title", "numpy.reshape", "matplotlib.pyplot.figure", "tensorflow.reduce_sum", "matplotlib.pyplot.plot", "numpy.std", "numpy.mean", "numpy.exp", "matplotlib.pyplot.xlabel", "tensorflow.matrix_diag", "numpy.sum", "matplotlib.pyplot.ylabel" ] ]
devanshjani/codechallenges
[ "d5383916611b82e2490c2f1097fb9e32bdfec0d0" ]
[ "quantum_black_challange3.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Challenge 3: .\"\"\"\n# Created: 2019-04-10 Devansh Jani <devanshjani@gmail.com>\nimport numpy as np\nfrom numpy import genfromtxt\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.multiclass import OneVsRestClassifier\n\n\ndef train_classifier(train_data: np.array, target: np.array, test: np.array):\n \"\"\"\n Train image classifier for given images.\n :param train_data: image data.\n :param target: Multi-class labels.\n :param test: Testing data.\n \"\"\"\n # Doing some parameter tuning, possibly can do cross validation\n # Sampling and grid search given more time than 10 sec to converge.\n rf_clf = RandomForestClassifier(max_depth=50, n_estimators=100)\n\n image_cls = OneVsRestClassifier(rf_clf)\n\n image_cls.fit(train_data, target)\n\n prediction = image_cls.predict(test)\n # Casting int for prediction numpy array.\n prediction = prediction.astype(int)\n # Saving csv for predictions.\n np.savetxt('prediction.csv', prediction, delimiter=',', fmt='%i')\n\n\nif __name__ == '__main__':\n \"\"\"Classifying processed images.\"\"\"\n\n # Storing this in feature vector array.\n\n training_data_with_target = genfromtxt('train.csv', delimiter=',')\n test_data = genfromtxt('test.csv', delimiter=',')\n\n # Just data\n training_data: np.array = training_data_with_target[:, :-6]\n\n # Just targets.\n training_target: np.array = training_data_with_target[:, -6:]\n\n # Training Decision tree classifier.\n train_classifier(training_data, training_target, test_data)\n" ]
[ [ "numpy.savetxt", "sklearn.multiclass.OneVsRestClassifier", "sklearn.ensemble.RandomForestClassifier", "numpy.genfromtxt" ] ]
cbsilver/glaciationBCs
[ "36e26a59681243c90af57f5ff43e56d4ab9ce82d" ]
[ "src/glaciationBCs/pythonBCsOGS.py" ]
[ "# Collection of python boundary condition (BC) classes for OpenGeoSys\n# BCs reflect the external geosphere: cryo-, litho- and atmosphere\n# Physical units: depending on parameter set, see below!\n\nimport OpenGeoSys\nimport glaciationBCs\nfrom glaciationBCs import glacierclass as glc\t#glacial objects\nfrom glaciationBCs import crustclass as crc \t#crustal objects\nfrom glaciationBCs import airclass as air\t\t# aerial objects\n\nimport numpy as np\n\ns_a = 365.25*24*3600 #=31557600 seconds per year\n\n# Choose parametrization\nT_N = 266.15 #K\nT_S = 276.15 #K\nT_C = 8 #K\nSet = \"TH\"\n\nif (Set==\"M\"): # units: kg, m, s, K\n\tL_dom = 120000 #m\n\tL_max = 0.7*L_dom\n\tH_max = 200 #m\n\tx_0 = -0.5*L_dom\n\tt_0 = 0.00 #s\n\tt_1 = 1.0000 #s\n\tt_2 = 2\n\tt_3 = 3\n\tt_4 = 4\n\nif (Set==\"T\"): # units: kg, m, a, K\n\tL_dom = 1150000 #m\n\tL_max = 575000 #m\n\tH_max = 3200 #m\n\tx_0 = 0.0 #m\n\tt_0 = 17500 #a\n\tt_1 = t_0 + 12500 #a\n\tt_2 = t_1 + 5000 #a\n\tt_3 = t_2 + 5000 #a\n\tt_4 = t_3 + 10000 #a\n\nif (Set==\"HM\"): # units: kg, m, s, K\n\tL_dom = 1150000 #m\n\tL_max = 575000 #m\n\tH_max = 3200 #m\n\tx_0 = 0.0 #m\n\tt_0 = 0.0 * 32500 * s_a #s\n\tt_1 = t_0 + 12500 * s_a #s\n\tt_2 = t_1 + 5000 * s_a #s\n\tt_3 = t_2 + 5000 * s_a #s\n\tt_4 = t_3 + 10000 * s_a #s \n\nif (Set==\"TH\"): # units: kg, m, a, K\n\tL_dom = 1150000 #m\n\tL_max = 575000 #m\n\tH_max = 3200 #m\n\tx_0 = 0.0 #m\n\tt_0 = 20000000 #a\n\tt_1 = t_0 + 12500 #a\n\tt_2 = t_1 + 5000 #a\n\tt_3 = t_2 + 5000 #a\n\tt_4 = t_3 + 10000 #a\n\n# Required: Choose vertical scaling factor\ny_sfactor = 20 #TODO!\n\n# Optional: Choose path to external data\npath2data = '~/Forschung/Simulations/OpenGeoSys/SedimentaryBasinBense/HM/dataGIA/'\nplotinput = False\n\n# Nomenclature: BC Process_LocationQuantity_Component\n# \t\t\t\t\t(THM)\t\t\t(XYZ)\n\n# Process\tDirichlet BC\tNeumann BC (normal to boundary)\n# T\t\t\ttemperature\t\theat flux\n# H\t\t\tpressure\t\thydraulic flux\n# M\t\t\tdisplacement\tmomentum flux (stress vector)\n\n\n# Thermal BCs\n# -----------\nclass BCT_SurfaceTemperature(OpenGeoSys.BoundaryCondition):\n\n\tdef __init__(self, L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4):\n\t\tsuper(BCT_SurfaceTemperature, self).__init__()\n\t\t# instantiate member objects of the external geosphere\n\t\tself.air = air.air(L_dom, T_N, T_S, T_C, t_0, t_1, t_2, t_3, t_4)\n\t\tself.glacier = glc.glacier(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\n\n\tdef getDirichletBCValue(self, t, coords, node_id, primary_vars):\n\t\tx, y, z = coords\n\t\t\n\t\tprint(self.glacier.stagecontrol(t))\n\t\t\n\t\tif x-self.glacier.x_0 > self.glacier.length(t) or self.glacier.length(t)==0.0:\n\t\t\t#linear profile from north to south\n\t\t\tvalue = self.air.temperature_profile(x,t)\n\t\telse:\n\t\t\t# prescribe fixed temperature underneath the glacier body\n\t\t\tvalue = self.glacier.temperature(x,t)\n\t\t\n\t\treturn (True, value)\n\n# Hydraulic BCs\n# -------------\nclass BCH_SurfacePressure(OpenGeoSys.BoundaryCondition):\n\n\tdef __init__(self, L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4):\n\t\tsuper(BCH_SurfacePressure, self).__init__()\n\t\t# instantiate member objects of the external geosphere\n\t\tself.air = air.air(L_dom, T_N, T_S, T_C, t_0, t_1, t_2, t_3, t_4)\n\t\tself.glacier = glc.glacier(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\n\n\tdef getDirichletBCValue(self, t, coords, node_id, primary_vars):\n\t\tx, y, z = coords\n\n\t\tprint(self.glacier.stagecontrol(t))\n\t\t\n\t\tif x-self.glacier.x_0 <= self.glacier.length(t):\n\t\t\t# height dependent pressure from glacier\n\t\t\tvalue = self.glacier.pressure(x,t)\n\t\telse:\n\t\t\t# fixed pressure from ambient air\n\t\t\tvalue = self.air.pressure\n\t\t\n\t\treturn (True, value)\n\nclass BCH_SurfaceHydrohead(OpenGeoSys.BoundaryCondition):\n\n\tdef __init__(self, L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4):\n\t\tsuper(BCH_SurfaceHydrohead, self).__init__()\n\t\t# instantiate member objects of the external geosphere\n\t\tself.air = air.air(L_dom, T_N, T_S, T_C, t_0, t_1, t_2, t_3, t_4)\n\t\tself.glacier = glc.glacier(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\n\n\tdef getDirichletBCValue(self, t, coords, node_id, primary_vars):\n\t\tx, y, z = coords\n\n\t\tprint(self.glacier.stagecontrol(t))\n\t\t# get vertical displacement\n\t\tu_y = self.glacier.local_deflection_heuristic(x,t)\n\t\t\n\t\t# head from surface topology\n\t\th_top = y/20 + u_y # scaled!\n\t\t\n\t\tif x-self.glacier.x_0 <= self.glacier.length(t):\n\t\t\t# height dependent hydraulic head from glacier\n\t\t\th_ice = self.glacier.hydrohead(x,t)\n\t\t\tvalue = h_ice + h_top\n\t\telse:\n\t\t\t# fixed head from ambient air\n\t\t\th_air = self.air.hydrohead\n\t\t\tvalue = h_air + h_top\n\t\t\n\t\treturn (True, value)\n\nclass BCH_SurfaceInflux(OpenGeoSys.BoundaryCondition):\n\n\tdef __init__(self, L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4):\n\t\tsuper(BCH_SurfaceInflux, self).__init__()\n\t\t# instantiate member objects of the external geosphere\n\t\tself.glacier = glc.glacier(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\n\t\n\tdef getFlux(self, t, coords, primary_vars): #here Neumann BC: hydraulic flux\n\t\tx, y, z = coords\n\t\t\n\t\tif x-self.glacier.x_0 <= self.glacier.length(t):\n\t\t\t# get hydraulic flux under glacier\n\t\t\tvalue = self.glacier.local_meltwater(x,t)\n\t\t\tderivative = [ 0.0, 0.0 ]\n\t\t\treturn (True, value, derivative)\n\t\t# no BC => free boundary then (no flux)\n\t\treturn (False, 0.0, [ 0.0, 0.0 ])\n\nclass BCH_SourceFromDeflection(OpenGeoSys.SourceTerm):\n\n\tdef __init__(self, L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4):\n\t\tsuper(BCH_SourceFromDeflection, self).__init__()\n\t\t# instantiate member objects of the external geosphere\n\t\tself.glacier = glc.glacier(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\n\t\tif plotinput: self.glacier.plot_deflection()\n\n\tdef getFlux(self, t, coords, primary_vars):\n\t\tx, y, z = coords\n\t\t\n\t\t# get subsidence velocity acting as a hydraulic head source term\n\t\tvalue = self.glacier.local_deflection_rate_heuristic(x,t)\n\t\tJac = [0.0, 0.0]\n\t\treturn (value, Jac)\n\n\n# Mechanics BCs\n# -------------\nclass BCM_SurfaceTraction_X(OpenGeoSys.BoundaryCondition):\n\t\n\tdef __init__(self, L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4):\n\t\tsuper(BCM_SurfaceTraction_X, self).__init__()\n\t\t# instantiate member objects of the external geosphere\n\t\tself.glacier = glc.glacier(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\n\t\tif plotinput: self.glacier.print_max_load()\n\t\tif plotinput: self.glacier.plot_evolution()\n\t\t\n\tdef getFlux(self, t, coords, primary_vars): #here Neumann BC: flux of linear momentum\n\t\tx, y, z = coords\n\t\t\n\t\tif x-self.glacier.x_0 <= self.glacier.length(t):\n\t\t\tvalue = self.glacier.tangentialstress(x,t)\n\t\t\tderivative = [ 0.0, 0.0 ]\n\t\t\treturn (True, value, derivative)\n\t\t# no BC => free boundary then (no flux)\n\t\treturn (False, 0.0, [ 0.0, 0.0 ])\n\nclass BCM_SurfaceTraction_Y(OpenGeoSys.BoundaryCondition):\n\t\n\tdef __init__(self, L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4):\n\t\tsuper(BCM_SurfaceTraction_Y, self).__init__()\n\t\t# instantiate member objects of the external geosphere\n\t\tself.glacier = glc.glacier(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\n\n\tdef getFlux(self, t, coords, primary_vars): #here Neumann BC: flux of linear momentum\n\t\tx, y, z = coords\n\t\t\n\t\tif x-self.glacier.x_0 <= self.glacier.length(t):\n\t\t\tvalue = self.glacier.normalstress(x,t)\n\t\t\tderivative = [ 0.0, 0.0, ]\n\t\t\treturn (True, value, derivative)\n\t\t# no BC => free boundary then (no flux)\n\t\treturn (False, 0.0, [ 0.0, 0.0, ])\n\nclass BCM_BottomDeflection(OpenGeoSys.BoundaryCondition):\n\n\tdef __init__(self, L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4):\n\t\tsuper(BCM_BottomDeflection, self).__init__()\n\t\t# instantiate member objects of the external geosphere\n\t\tself.glacier = glc.glacier(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\n\t\tif plotinput: self.glacier.plot_deflection()\n\n\tdef getDirichletBCValue(self, t, coords, node_id, primary_vars):\n\t\tx, y, z = coords\n\t\t\n\t\t# prescribe displacement u_y\n\t\t# scale here with 20 !TODO!\n\t\tvalue = 20 * self.glacier.local_deflection(x,t)\n\t\t\n\t\treturn (True, value)\n\nclass BCM_DomainDisplacement(OpenGeoSys.BoundaryCondition):\n\n\tdef __init__(self, L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4):\n\t\tsuper(BCM_DomainDisplacement, self).__init__()\n\t\t# instantiate member objects of the external geosphere\n\t\tself.glacier = glc.glacier(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\n\t\tif plotinput: self.glacier.plot_deflection()\n\n\tdef getDirichletBCValue(self, t, coords, node_id, primary_vars):\n\t\tx, y, z = coords\n\t\t\n\t\t# prescribe displacement u_y\n\t\t# scale here with 20 !TODO!\n\t\tvalue = 20 * self.glacier.local_displacement_heuristic(x,y,t)\n\t\t\n\t\treturn (True, value)\n\nclass BCM_BottomDisplacement_X(OpenGeoSys.BoundaryCondition):\n\n\tdef __init__(self, path2data):\n\t\tsuper(BCM_BottomDisplacement_X, self).__init__()\n\t\t# instantiate member objects of the external geosphere\n\t\tself.crust = crc.crust(path2data)\n\n\tdef getDirichletBCValue(self, t, coords, node_id, primary_vars):\n\t\tx, y, z = coords\n\t\t\n\t\t# prescribe displacement u_x\n\t\tt_in_ka = t/s_a/1000\n\t\t# ?TODO? y_scale = y/20\n\t\tvalue = self.crust.interpolateX_data_uxuy(x,y,t_in_ka)[0]\n\t\t\n\t\treturn (True, value)\n\nclass BCM_BottomDisplacement_Y(OpenGeoSys.BoundaryCondition):\n\n\tdef __init__(self, path2data):\n\t\tsuper(BCM_BottomDisplacement_Y, self).__init__()\n\t\t# instantiate member objects of the external geosphere\n\t\tself.crust = crc.crust(path2data)\n\t\tif plotinput:\n\t\t\tidx = 3\n\t\t\ttRange = np.linspace(t_0/s_a/1000, t_1/s_a/1000, 26)\n\t\t\tself.crust.ylineplot_evolution_uxuy(idx, tRange)\n\n\tdef getDirichletBCValue(self, t, coords, node_id, primary_vars):\n\t\tx, y, z = coords\n\t\t\n\t\t# prescribe displacement u_y\n\t\tt_in_ka = t/s_a/1000\n\t\t# ?TODO? y_scale = y/20\n\t\tvalue = self.crust.interpolateX_data_uxuy(x,y,t_in_ka)[1]\n\t\t\n\t\treturn (True, value)\n\nclass BCM_LateralDisplacement_X(OpenGeoSys.BoundaryCondition):\n\n\tdef __init__(self, path2data):\n\t\tsuper(BCM_LateralDisplacement_X, self).__init__()\n\t\t# instantiate member objects of the external geosphere\n\t\tself.crust = crc.crust(path2data)\n\n\tdef getDirichletBCValue(self, t, coords, node_id, primary_vars):\n\t\tx, y, z = coords\n\t\t\n\t\t# prescribe displacement u_x\n\t\tt_in_ka = t/s_a/1000\n\t\ty_scale = y/20\n\t\tvalue = self.crust.interpolateY_data_uxuy(x,y_scale,t_in_ka)[0]\n\t\t\n\t\treturn (True, value)\n\nclass BCM_LateralDisplacement_Y(OpenGeoSys.BoundaryCondition):\n\n\tdef __init__(self, path2data):\n\t\tsuper(BCM_LateralDisplacement_Y, self).__init__()\n\t\t# instantiate member objects of the external geosphere\n\t\tself.crust = crc.crust(path2data)\n\n\tdef getDirichletBCValue(self, t, coords, node_id, primary_vars):\n\t\tx, y, z = coords\n\t\t\n\t\t# prescribe displacement u_y\n\t\tt_in_ka = t/s_a/1000\n\t\ty_scale = y/20\n\t\tvalue = self.crust.interpolateY_data_uxuy(x,y_scale,t_in_ka)[1]\n\t\t\n\t\treturn (True, value)\n\n\n# instantiate the BC objects used by OpenGeoSys\n# ---------------------------------------------\n# Naming convention:\n# bc_Process_(external)origin_boundary_type(_coefficient)\n\n# Cryosphere BCs\nbc_T_glacier_above_Dirichlet = BCT_SurfaceTemperature(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\nbc_H_glacier_above_Dirichlet = BCH_SurfacePressure(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\nbc_H_glacier_above_Dirichlet_head = BCH_SurfaceHydrohead(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\nbc_H_glacier_above_VolSource_head = BCH_SourceFromDeflection(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\nbc_H_glacier_above_Neumann = BCH_SurfaceInflux(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\nbc_M_glacier_above_Neumann_x = BCM_SurfaceTraction_X(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\nbc_M_glacier_above_Neumann_y = BCM_SurfaceTraction_Y(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\n#bc_H_glacier_north_Neumann\n\n# Lithosphere BCs\nbc_M_crustal_south_Dirichlet_x = BCM_LateralDisplacement_X(path2data)\nbc_M_crustal_south_Dirichlet_y = BCM_LateralDisplacement_Y(path2data)\nbc_M_crustal_below_Dirichlet_x = BCM_BottomDisplacement_X(path2data)\nbc_M_crustal_below_Dirichlet_y = BCM_BottomDisplacement_Y(path2data)\n#bc_M_crustal_below_Dirichlet_y = BCM_BottomDeflection(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\n#bc_M_glacier_above_Dirichlet_y = BCM_BottomDeflection(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\nbc_M_glacier_above_Dirichlet_y = BCM_DomainDisplacement(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\n#bc_M_crustal_north\n#bc_M_crustal_aside\n\n# just for downward compatibility\nbc_thermally_dirichlet = BCT_SurfaceTemperature(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\nbc_hydraulic_dirichlet = BCH_SurfacePressure(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\nbc_hydraulic_neumann = BCH_SurfaceInflux(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\nbc_mechanics_neumann_x = BCM_SurfaceTraction_X(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\nbc_mechanics_neumann_y = BCM_SurfaceTraction_Y(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\nbc_mechanics_dirichlet = BCM_BottomDeflection(L_dom, L_max, H_max, x_0, t_0, t_1, t_2, t_3, t_4)\n\nbc_y = BCM_SurfaceTraction_Y(L_dom, L_max, H_max, x_0, t_0, t_1, 0, 0, 0)\n" ]
[ [ "numpy.linspace" ] ]
sionab/simmate
[ "6dedea7310829aae425bf3393e7923e454a0129f" ]
[ "src/simmate/toolkit/symmetry/webscraper/aflow.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom selenium import webdriver\nimport pandas as pd\nfrom tqdm import tqdm\n\n# launch the webbrowser\ndriver = webdriver.Chrome(\n executable_path=\"/snap/bin/chromium.chromedriver\"\n) # /snap/bin/chromium gives errors\n\n# load the webpage\ndriver.get(\"http://aflowlib.org/CrystalDatabase/prototype_index.html\")\n\ntable = driver.find_element_by_id(\"myTable\")\n\nrows = table.find_elements_by_tag_name(\"tr\")\n\n# grab all the headers from the first row\nheaders = [column.text for column in rows[0].find_elements_by_tag_name(\"th\")]\n# add one extra header for the POSCAR\nheaders.append(\"POSCAR_url\")\n\n# grab all the data from the rest of the rows\ndata = []\nfor row in tqdm(rows[1:]):\n row_data = []\n\n for column in row.find_elements_by_tag_name(\"td\"):\n row_data.append(column.text)\n\n # we need to grab the POSCAR url too\n # which we can get indirectly from the link within the first column\n href = row.find_element_by_tag_name(\"a\").get_attribute(\"href\")\n poscar_url = href.replace(\".html\", \".poscar\").replace(\n \"/CrystalDatabase/\", \"/CrystalDatabase/POSCAR/\"\n )\n # append to the row data and we will go through these later - rather switch back and forth from this main page\n row_data.append(poscar_url)\n\n data.append(row_data)\n\n\n# add a POSCAR header\nheaders.append(\"POSCAR\")\n# add POSCARs to each data row\nfor entry in tqdm(data):\n\n try: # some POSCARs don't have a page... not sure why. I should contact their devs. see A_oC8_64_f.I (#179)\n\n # grab prototype POSCAR url\n poscar_url = entry[9]\n\n # move to the webpage for its POSCAR\n driver.get(poscar_url)\n\n # download the POSCAR string (note '\\n' breaks will be there, but that's needed)\n textElement = driver.find_element_by_tag_name(\"pre\")\n poscar = textElement.text\n\n # append to results\n entry.append(poscar)\n except:\n entry.append(None)\n\n\n# make DataFrame\ndf = pd.DataFrame(data=data, columns=headers)\n\n# close the window\ndriver.close()\n\n# export to csv file\ndf.to_csv(\"aflow_prototypes.csv\")\n\n##############################################################################\n\n# # if you want to go from a str in POSCAR format to a pymatgen structure\n# # Easier method would use Structure.from_file() but we currently have just a string\n# from pymatgen.io.vasp import Poscar\n# input_string = textElement.text\n# Poscar.from_string(input_string, False,\n# read_velocities=False).structure\n\n##############################################################################\n" ]
[ [ "pandas.DataFrame" ] ]
jeremymanning/hypertools
[ "1b39b41aaa634e816d73635e0b9b773f1ed6e709" ]
[ "hypertools/cluster/cluster.py" ]
[ "# noinspection PyPackageRequirements\nimport datawrangler as dw\nimport pandas as pd\n\nfrom ..core import apply_model, get_default_options, eval_dict\n\n\n@dw.decorate.apply_stacked\ndef cluster(data, model='KMeans', **kwargs):\n \"\"\"\n Cluster the data and return a list of cluster labels\n\n Parameters\n ----------\n :param data: any hypertools-compatible dataset\n :param model: a string containing the name of any of the following scikit-learn (or compatible) models (default:\n 'KMeans'):\n - A discrete cluster model: https://scikit-learn.org/stable/modules/classes.html#module-sklearn.cluster\n - A Gaussian mixture model: https://scikit-learn.org/stable/modules/classes.html#module-sklearn.mixture\n Models can also be specified as any other scikit-learn compatible model, or a list of models/strings. To\n customize the behavior of any model, models may also be passed as dictionaries with fields:\n - 'model': the model(s) to apply\n - 'args': a list of unnamed arguments (appended to data)\n - 'kwargs': a list of named keyword arguments\n :param kwargs: keyword arguments are first passed to datawrangler.decorate.funnel, and any remaining arguments\n are passed onto the model initialization function. Keyword arguments override any model-specific parameters.\n\n Returns\n -------\n :return: a DataFrame (or list of DataFrames) containing the cluster labels or mixture proportions\n \"\"\"\n labels = apply_model(data, model, search=['sklearn.cluster', 'sklearn.mixture'],\n **dw.core.update_dict(eval_dict(get_default_options()['cluster']), kwargs))\n labels = pd.DataFrame(labels, index=data.index)\n\n if len(labels) == 1:\n return labels[0]\n else:\n return labels\n\n" ]
[ [ "pandas.DataFrame" ] ]
Atomu2014/Describing_a_Knowledge_Base
[ "4a79d00e0620ebe01facfbb0ccf8f35b7c2c9c0f" ]
[ "structure_generator/DecoderRNN.py" ]
[ "import torch.nn as nn\nimport numpy as np\nimport torch\nimport sys\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .baseRNN import BaseRNN\n# self.word2idx['<UNK>'] = 1\n\n\nclass DecoderRNN(BaseRNN):\n\n def __init__(self, vocab_size, embedding, embed_size, pemsize, sos_id, eos_id,\n unk_id, max_len=100, n_layers=1, rnn_cell='gru',\n bidirectional=True, input_dropout_p=0, dropout_p=0,\n lmbda=1.5, USE_CUDA = torch.cuda.is_available(), beam_size=5):\n hidden_size = embed_size\n\n super(DecoderRNN, self).__init__(vocab_size, hidden_size, input_dropout_p, dropout_p, n_layers, rnn_cell)\n\n self.bidirectional_encoder = bidirectional\n self.rnn = self.rnn_cell(embed_size, hidden_size, n_layers, batch_first=True, dropout=dropout_p)\n self.output_size = vocab_size\n self.hidden_size = hidden_size\n self.max_length = max_len\n self.eos_id = eos_id\n self.sos_id = sos_id\n self.unk_id = unk_id\n self.embedding = embedding\n self.lmbda = lmbda\n self.beam_size = beam_size\n self.device = torch.device(\"cuda:0\" if USE_CUDA and torch.cuda.is_available() else \"cpu\")\n #directions\n self.Wh = nn.Linear(hidden_size * 2, hidden_size)\n #output\n self.V = nn.Linear(hidden_size * 3, self.output_size)\n #params for attention\n self.Wih = nn.Linear(hidden_size, hidden_size) # for obtaining e from encoder input\n self.Wfh = nn.Linear(hidden_size, hidden_size) # for obtaining e from encoder field\n self.Ws = nn.Linear(hidden_size, hidden_size) # for obtaining e from current state\n self.w_c = nn.Linear(1, hidden_size) # for obtaining e from context vector\n self.v = nn.Linear(hidden_size, 1)\n # parameters for p_gen\n self.w_ih = nn.Linear(hidden_size, 1) # for changing context vector into a scalar\n self.w_fh = nn.Linear(hidden_size, 1) # for changing context vector into a scalar\n self.w_s = nn.Linear(hidden_size, 1) # for changing hidden state into a scalar\n self.w_x = nn.Linear(embed_size, 1) # for changing input embedding into a scalar\n # parameters for self attention\n self_size = pemsize * 2 # hidden_size +\n self.wp = nn.Linear(self_size, self_size)\n self.wc = nn.Linear(self_size, self_size)\n self.wa = nn.Linear(self_size, self_size)\n\n def get_matrix(self, encoderp):\n tp = torch.tanh(self.wp(encoderp))\n tc = torch.tanh(self.wc(encoderp))\n f = tp.bmm(self.wa(tc).transpose(1, 2))\n return F.softmax(f, dim=2)\n\n def self_attn(self, f_matrix, encoderi, encoderf):\n c_contexti = torch.bmm(f_matrix, encoderi)\n c_contextf = torch.bmm(f_matrix, encoderf)\n return c_contexti, c_contextf\n\n def decode_step(self, input_ids, coverage, _h, enc_proj, batch_size, max_enc_len,\n enc_mask, c_contexti, c_contextf, embed_input, max_source_oov):\n dec_proj = self.Ws(_h).unsqueeze(1).expand_as(enc_proj)\n cov_proj = self.w_c(coverage.view(-1, 1)).view(batch_size, max_enc_len, -1)\n e_t = self.v(torch.tanh(enc_proj + dec_proj + cov_proj).view(batch_size*max_enc_len, -1))\n\n # mask to -INF before applying softmax\n attn_scores = e_t.view(batch_size, max_enc_len)\n del e_t\n attn_scores.data.masked_fill_(enc_mask.data.bool(), -10000)\n attn_scores = F.softmax(attn_scores, dim=1)\n\n contexti = attn_scores.unsqueeze(1).bmm(c_contexti).squeeze(1)\n contextf = attn_scores.unsqueeze(1).bmm(c_contextf).squeeze(1)\n\n # output proj calculation\n p_vocab = F.softmax(self.V(torch.cat((_h, contexti, contextf), 1)), dim=1)\n # p_gen calculation\n p_gen = torch.sigmoid(self.w_ih(contexti) + self.w_fh(contextf) + self.w_s(_h) + self.w_x(embed_input))\n p_gen = p_gen.view(-1, 1)\n weighted_Pvocab = p_vocab * p_gen\n weighted_attn = (1-p_gen) * attn_scores\n\n if max_source_oov > 0:\n # create OOV (but in-article) zero vectors\n ext_vocab = torch.zeros(batch_size, max_source_oov)\n ext_vocab=ext_vocab.to(self.device)\n combined_vocab = torch.cat((weighted_Pvocab, ext_vocab), 1)\n del ext_vocab\n else:\n combined_vocab = weighted_Pvocab\n del weighted_Pvocab # 'Recheck OOV indexes!'\n # scatter article word probs to combined vocab prob.\n combined_vocab = combined_vocab.scatter_add(1, input_ids, weighted_attn)\n return combined_vocab, attn_scores\n\n def forward(self, max_source_oov=0, targets=None, targets_id=None, input_ids=None,\n enc_mask=None, encoder_hidden=None, encoderi=None, encoderf=None,\n encoderp=None, teacher_forcing_ratio=None, w2fs=None, fig=False, beam=False):\n\n targets, batch_size, max_length, max_enc_len = self._validate_args(targets, encoder_hidden, encoderi, teacher_forcing_ratio)\n\n decoder_hidden = self._init_state(encoder_hidden)\n if beam: \n return self.beam_search(targets, max_length, decoder_hidden, max_enc_len, max_source_oov, input_ids, enc_mask, encoderi, encoderf,\n encoderp, w2fs)\n coverage = torch.zeros(batch_size, max_enc_len).to(self.device)\n enci_proj = self.Wih(encoderi.view(batch_size*max_enc_len, -1)).view(batch_size, max_enc_len, -1)\n encf_proj = self.Wfh(encoderf.view(batch_size*max_enc_len, -1)).view(batch_size, max_enc_len, -1)\n f_matrix = self.get_matrix(encoderp)\n enc_proj = enci_proj + encf_proj\n\n # get position attention scores\n c_contexti, c_contextf = self.self_attn(f_matrix, encoderi, encoderf)\n if teacher_forcing_ratio:\n embedded = self.embedding(targets)\n embed_inputs = self.input_dropout(embedded)\n # coverage initially zero\n dec_lens = (targets > 0).float().sum(1)\n lm_loss, cov_loss = [], []\n hidden, _ = self.rnn(embed_inputs, decoder_hidden)\n # step through decoder hidden states\n for _step in range(max_length):\n _h = hidden[:, _step, :]\n target_id = targets_id[:, _step+1].unsqueeze(1)\n embed_input = embed_inputs[:, _step, :]\n\n combined_vocab, attn_scores = self.decode_step(input_ids, coverage, _h, enc_proj, batch_size,\n max_enc_len, enc_mask, c_contexti, c_contextf,\n embed_input, max_source_oov)\n # mask the output to account for PAD\n target_mask_0 = target_id.ne(0).detach()\n output = combined_vocab.gather(1, target_id).add_(sys.float_info.epsilon)\n lm_loss.append(output.log().mul(-1) * target_mask_0.float())\n\n coverage = coverage + attn_scores\n\n # Coverage Loss\n # take minimum across both attn_scores and coverage\n _cov_loss, _ = torch.stack((coverage, attn_scores), 2).min(2)\n cov_loss.append(_cov_loss.sum(1))\n # add individual losses\n total_masked_loss = torch.cat(lm_loss, 1).sum(1).div(dec_lens) + self.lmbda * \\\n torch.stack(cov_loss, 1).sum(1).div(dec_lens)\n return total_masked_loss\n else:\n return self.evaluate(targets, batch_size, max_length, max_source_oov, c_contexti, c_contextf, f_matrix,\n decoder_hidden, enc_mask, input_ids, coverage, enc_proj, max_enc_len, w2fs, fig)\n\n def evaluate(self, targets, batch_size, max_length, max_source_oov, c_contexti, c_contextf, f_matrix,\n decoder_hidden, enc_mask, input_ids, coverage, enc_proj, max_enc_len, w2fs, fig):\n lengths = np.array([max_length] * batch_size)\n decoded_outputs = []\n if fig:\n attn = []\n embed_input = self.embedding(targets)\n # step through decoder hidden states\n for _step in range(max_length):\n _h, decoder_hidden = self.rnn(embed_input, decoder_hidden)\n combined_vocab, attn_scores = self.decode_step(input_ids, coverage,\n _h.squeeze(1), enc_proj, batch_size, max_enc_len, enc_mask,\n c_contexti, c_contextf, embed_input.squeeze(1),\n max_source_oov)\n # not allow decoder to output UNK\n combined_vocab[:, self.unk_id] = 0\n symbols = combined_vocab.topk(1)[1]\n\n if fig:\n attn.append(attn_scores)\n decoded_outputs.append(symbols.clone())\n eos_batches = symbols.data.eq(self.eos_id)\n if eos_batches.dim() > 0:\n eos_batches = eos_batches.cpu().view(-1).numpy()\n update_idx = ((lengths > _step) & eos_batches) != 0\n lengths[update_idx] = len(decoded_outputs)\n # change unk to corresponding field\n for i in range(symbols.size(0)):\n w2f = w2fs[i]\n if symbols[i].item() > self.vocab_size-1:\n symbols[i] = w2f[symbols[i].item()]\n # symbols.masked_fill_((symbols > self.vocab_size-1), self.unk_id)\n embed_input = self.embedding(symbols)\n coverage = coverage + attn_scores\n if fig:\n return torch.stack(decoded_outputs, 1).squeeze(2), lengths.tolist(), f_matrix[0], \\\n torch.stack(attn, 1).squeeze(2)[0]\n else:\n return torch.stack(decoded_outputs, 1).squeeze(2), lengths.tolist()\n\n def beam_search(self, targets, max_length, decoder_hidden, max_enc_len, max_source_oov, input_ids, enc_mask, encoderi, encoderf,\n encoderp, w2fs):\n # the code is very similar to the forward function\n w2f = w2fs[0]\n\n ### YOUR CODE HERE (~1 Lines) \n ### Initialize coverage vector \n batch_size = 1\n coverage = torch.zeros(batch_size, max_enc_len).to(self.device)\n ### END YOUR CODE \n \n # results --> list of all ouputs terminated with stop tokens and of minimal length\n results = []\n # all_hyps --> list of current beam hypothesis. start with base initial hypothesis\n all_hyps = [Hypothesis([self.sos_id], decoder_hidden, coverage, 0)]\n # start decoding\n\n ### YOUR CODE HERE (~4 Lines) \n ### Initialize enci_proj, encf_proj, f_matrix, enc_proj vector \n enci_proj = self.Wih(encoderi.view(batch_size * max_enc_len, -1)).view(batch_size, max_enc_len, -1)\n encf_proj = self.Wfh(encoderf.view(batch_size * max_enc_len, -1)).view(batch_size, max_enc_len, -1)\n f_matrix = self.get_matrix(encoderp)\n enc_proj = enci_proj + encf_proj\n ### END YOUR CODE \n\n ### YOUR CODE HERE (~1 Lines) \n # get position attention scores\n c_contexti, c_contextf = self.self_attn(f_matrix, encoderi, encoderf)\n ### END YOUR CODE \n\n embed_input = self.embedding(targets)\n # step through decoder hidden states\n for _step in range(max_length):\n # after first step, input is of batch_size=curr_beam_size\n # curr_beam_size <= self.beam_size due to pruning of beams that have terminated\n # adjust enc_states and init_state accordingly\n\n _h, decoder_hidden = self.rnn(embed_input, decoder_hidden)\n\n ### YOUR CODE HERE (~1 Lines) \n ### get current beam size curr_beam_size\n curr_beam_size = embed_input.size(0)\n ### END YOUR CODE \n \n combined_vocab, attn_scores = self.decode_step(input_ids, coverage,\n _h.squeeze(1), enc_proj, curr_beam_size, max_enc_len, enc_mask,\n c_contexti, c_contextf, embed_input.squeeze(1),\n max_source_oov)\n combined_vocab[:, self.unk_id] = 0\n all_hyps, symbols, results, decoder_hidden, coverage = self.getOverallTopk(combined_vocab,\n attn_scores, coverage, all_hyps, results,\n decoder_hidden.squeeze(0))\n # change unk to corresponding field\n for i in range(symbols.size(0)):\n if symbols[i].item() > self.vocab_size-1:\n symbols[i] = w2f[symbols[i].item()]\n embed_input = self.embedding(symbols)\n curr_beam_size = embed_input.size(0)\n if embed_input.size(0) > encoderi.size(0):\n c_contexti = c_contexti.expand(curr_beam_size, max_enc_len, -1).contiguous()\n c_contextf = c_contextf.expand(curr_beam_size, max_enc_len, -1).contiguous()\n enc_proj = enc_proj.expand(curr_beam_size, max_enc_len, -1).contiguous()\n enc_mask = enc_mask.expand(curr_beam_size, -1).contiguous()\n input_ids = input_ids.expand(curr_beam_size, -1).contiguous()\n \n if len(results) > 0:\n candidates = results\n else:\n candidates = all_hyps\n all_outputs = sorted(candidates, key=lambda x:x.survivability, reverse=True)\n return all_outputs[0].full_prediction\n \n\n\n def getOverallTopk(self, vocab_probs, attn_scores, coverage, all_hyps, results, decoder_hidden):\n new_decoder_hidden, new_coverage = [], []\n new_vocab_probs = []\n for i, hypo in enumerate(all_hyps):\n curr_vocab_probs = vocab_probs[i]\n new_vocab_probs.append(curr_vocab_probs.unsqueeze(0))\n vocab_probs = torch.cat(new_vocab_probs, 0)\n coverage += attn_scores\n # return top-k values i.e. top-k over all beams i.e. next step input ids\n # return hidden, cell states corresponding to topk\n probs, inds = vocab_probs.topk(k=self.beam_size, dim=1)\n probs = probs.log()\n candidates = []\n assert len(all_hyps) == probs.size(0), '# Hypothesis and log-prob size dont match'\n ### YOUR CODE HERE (~4 Lines) \n ### cycle through all hypothesis in full beam\n for i, hypo in enumerate(probs.tolist()):\n for j, _ in enumerate(hypo):\n new_cand = all_hyps[i].extend(token_id=inds[i, j].item(),\n hidden_state=decoder_hidden[i].unsqueeze(0),\n coverage=coverage[i].unsqueeze(0),\n log_prob=probs[i,j])\n candidates.append(new_cand)\n ### END YOUR CODE \n # sort in descending order\n candidates = sorted(candidates, key=lambda x:x.survivability, reverse=True)\n # print('len of candidiates: ', len(candidates))\n new_beam, next_inp = [], []\n # prune hypotheses and generate new beam\n for h in candidates:\n if h.full_prediction[-1] == self.eos_id and len(h.full_prediction) > 2:\n # weed out small sentences that likely have no meaning\n if len(h.full_prediction) >= 15:\n results.append(h)\n else:\n new_beam.append(h)\n next_inp.append(h.full_prediction[-1])\n new_decoder_hidden.append(h.hidden_state)\n new_coverage.append(h.coverage)\n if len(new_beam) == self.beam_size:\n break\n # print('len of beam: ', len(new_beam))\n assert len(new_beam) >= 1, 'Non-existent beam'\n # print(next_inp)\n return new_beam, torch.LongTensor(next_inp).to(self.device).view(-1, 1), results, \\\n torch.cat(new_decoder_hidden, 0).unsqueeze(0), torch.cat(new_coverage, 0)\n\n def _init_state(self, encoder_hidden):\n \"\"\" Initialize the encoder hidden state. \"\"\"\n if encoder_hidden is None:\n return None\n if isinstance(encoder_hidden, tuple):\n encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])\n else:\n encoder_hidden = self._cat_directions(encoder_hidden)\n return encoder_hidden\n\n def _cat_directions(self, h):\n \"\"\" If the encoder is bidirectional, do the following transformation.\n (#directions * #layers, #batch, hidden_size) -> (#layers, #batch, #directions * hidden_size)\n \"\"\"\n if self.bidirectional_encoder:\n h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n h = self.Wh(h)\n return h\n\n def _validate_args(self, targets, encoder_hidden, encoder_outputs, teacher_forcing_ratio):\n if encoder_outputs is None:\n raise ValueError(\"Argument encoder_outputs cannot be None when attention is used.\")\n else:\n max_enc_len = encoder_outputs.size(1)\n # inference batch size\n if targets is None and encoder_hidden is None:\n batch_size = 1\n else:\n if targets is not None:\n batch_size = targets.size(0)\n else:\n if self.rnn_cell is nn.LSTM:\n batch_size = encoder_hidden[0].size(1)\n elif self.rnn_cell is nn.GRU:\n batch_size = encoder_hidden.size(1)\n\n # set default targets and max decoding length\n if targets is None:\n if teacher_forcing_ratio > 0:\n raise ValueError(\"Teacher forcing has to be disabled (set 0) when no targets is provided.\")\n # torch.set_grad_enabled(False)\n targets = torch.LongTensor([self.sos_id] * batch_size).view(batch_size, 1)\n targets = targets.to(self.device)\n max_length = self.max_length\n else:\n max_length = targets.size(1) - 1 # minus the start of sequence symbol\n\n return targets, batch_size, max_length, max_enc_len\n \n\nclass Hypothesis(object):\n def __init__(self, token_id, hidden_state, coverage, log_prob):\n self._h = hidden_state\n self.log_prob = log_prob\n self.hidden_state = hidden_state\n self.coverage = coverage.detach()\n self.full_prediction = token_id # list\n self.survivability = self.log_prob/ float(len(self.full_prediction))\n\n def extend(self, token_id, hidden_state, coverage, log_prob):\n return Hypothesis(token_id= self.full_prediction + [token_id],\n hidden_state=hidden_state,\n coverage=coverage.detach(),\n log_prob= self.log_prob + log_prob)" ]
[ [ "torch.nn.functional.softmax", "torch.LongTensor", "torch.cat", "torch.zeros", "torch.tanh", "torch.nn.Linear", "torch.bmm", "torch.cuda.is_available", "torch.stack", "numpy.array" ] ]
mrkem598/moviepy
[ "a90d4ab4825cd368963e05ad01fd526d51edd9a5" ]
[ "moviepy/audio/AudioClip.py" ]
[ "import os\nimport numpy as np\nfrom moviepy.audio.io.ffmpeg_audiowriter import ffmpeg_audiowrite\nfrom moviepy.decorators import requires_duration\nfrom moviepy.tools import (deprecated_version_of,\n extensions_dict)\n\nfrom moviepy.Clip import Clip\nfrom tqdm import tqdm\n\nclass AudioClip(Clip):\n \"\"\" Base class for audio clips.\n \n See ``AudioFileClip`` and ``CompositeSoundClip`` for usable classes.\n \n An AudioClip is a Clip with a ``make_frame`` attribute of\n the form `` t -> [ f_t ]`` for mono sound and\n ``t-> [ f1_t, f2_t ]`` for stereo sound (the arrays are Numpy arrays).\n The `f_t` are floats between -1 and 1. These bounds can be\n trespassed wihtout problems (the program will put the\n sound back into the bounds at conversion time, without much impact). \n \n Parameters\n -----------\n \n make_frame\n A function `t-> frame at time t`. The frame does not mean much\n for a sound, it is just a float. What 'makes' the sound are\n the variations of that float in the time.\n \n nchannels\n Number of channels (one or two for mono or stereo).\n \n Examples\n ---------\n \n >>> # Plays the note A (a sine wave of frequency 404HZ)\n >>> import numpy as np\n >>> make_frame = lambda t : 2*[ np.sin(404 * 2 * np.pi * t) ]\n >>> clip = AudioClip(make_frame, duration=5)\n >>> clip.preview()\n \n \"\"\"\n \n def __init__(self, make_frame = None, duration=None):\n Clip.__init__(self)\n if make_frame is not None:\n self.make_frame = make_frame\n frame0 = self.get_frame(0)\n if hasattr(frame0, '__iter__'):\n self.nchannels = len(list(frame0))\n else:\n self.nchannels = 1\n if duration is not None:\n self.duration = duration\n self.end = duration\n \n @requires_duration\n def iter_chunks(self, chunksize=None, chunk_duration=None, fps=None,\n quantize=False, nbytes=2, progress_bar=False):\n \"\"\" Iterator that returns the whole sound array of the clip by chunks\n \"\"\"\n if fps is None:\n fps=self.fps\n if chunk_duration is not None:\n chunksize = int(chunk_duration*fps)\n \n totalsize = int(fps*self.duration)\n\n nchunks = totalsize // chunksize + 1\n\n pospos = np.linspace(0, totalsize, nchunks + 1, endpoint=True, dtype=int)\n\n def generator():\n for i in range(nchunks):\n size = pospos[i+1] - pospos[i]\n assert(size <= chunksize)\n tt = (1.0/fps)*np.arange(pospos[i],pospos[i+1])\n yield self.to_soundarray(tt, nbytes= nbytes, quantize=quantize, fps=fps,\n buffersize=chunksize)\n\n if progress_bar:\n return tqdm(generator(), total=nchunks)\n else:\n return generator()\n\n @requires_duration\n def to_soundarray(self,tt=None, fps=None, quantize=False, nbytes=2, buffersize=50000):\n \"\"\"\n Transforms the sound into an array that can be played by pygame\n or written in a wav file. See ``AudioClip.preview``.\n \n Parameters\n ------------\n \n fps\n Frame rate of the sound for the conversion.\n 44100 for top quality.\n \n nbytes\n Number of bytes to encode the sound: 1 for 8bit sound,\n 2 for 16bit, 4 for 32bit sound.\n \n \"\"\"\n if fps is None:\n fps = self.fps\n \n stacker = np.vstack if self.nchannels==2 else np.hstack \n max_duration = 1.0 * buffersize / fps\n if (tt is None):\n if self.duration>max_duration:\n return stacker(self.iter_chunks(fps=fps, quantize=quantize, nbytes=2,\n chunksize=buffersize))\n else:\n tt = np.arange(0, self.duration, 1.0/fps)\n \"\"\"\n elif len(tt)> 1.5*buffersize:\n nchunks = int(len(tt)/buffersize+1)\n tt_chunks = np.array_split(tt, nchunks)\n return stacker([self.to_soundarray(tt=ttc, buffersize=buffersize, fps=fps,\n quantize=quantize, nbytes=nbytes)\n for ttc in tt_chunks])\n \"\"\"\n #print tt.max() - tt.min(), tt.min(), tt.max()\n \n snd_array = self.get_frame(tt)\n\n if quantize:\n snd_array = np.maximum(-0.99, np.minimum(0.99,snd_array))\n inttype = {1:'int8',2:'int16', 4:'int32'}[nbytes]\n snd_array= (2**(8*nbytes-1)*snd_array).astype(inttype)\n \n return snd_array\n\n def max_volume(self, stereo=False, chunksize=50000, progress_bar=False):\n \n stereo = stereo and (self.nchannels == 2)\n\n maxi = np.array([0,0]) if stereo else 0\n for chunk in self.iter_chunks(chunksize=chunksize, progress_bar=progress_bar):\n maxi = np.maximum(maxi,abs(chunk).max(axis=0)) if stereo else max(maxi,abs(chunk).max())\n return maxi\n\n\n\n \n @requires_duration\n def write_audiofile(self,filename, fps=44100, nbytes=2,\n buffersize=2000, codec=None,\n bitrate=None, ffmpeg_params=None,\n write_logfile=False, verbose=True,\n progress_bar=True):\n \"\"\" Writes an audio file from the AudioClip.\n\n\n Parameters\n -----------\n\n filename\n Name of the output file\n\n fps\n Frames per second\n\n nbyte\n Sample width (set to 2 for 16-bit sound, 4 for 32-bit sound)\n\n codec\n Which audio codec should be used. If None provided, the codec is\n determined based on the extension of the filename. Choose\n 'pcm_s16le' for 16-bit wav and 'pcm_s32le' for 32-bit wav.\n\n bitrate\n Audio bitrate, given as a string like '50k', '500k', '3000k'.\n Will determine the size and quality of the output file.\n Note that it mainly an indicative goal, the bitrate won't\n necessarily be the this in the output file.\n\n ffmpeg_params\n Any additional parameters you would like to pass, as a list\n of terms, like ['-option1', 'value1', '-option2', 'value2']\n\n write_logfile\n If true, produces a detailed logfile named filename + '.log'\n when writing the file\n\n verbose\n Boolean indicating whether to print infomation\n \n progress_bar\n Boolean indicating whether to show the progress bar.\n\n \"\"\"\n\n if codec is None:\n name, ext = os.path.splitext(os.path.basename(filename))\n try:\n codec = extensions_dict[ext[1:]]['codec'][0]\n except KeyError:\n raise ValueError(\"MoviePy couldn't find the codec associated \"\n \"with the filename. Provide the 'codec' parameter in \"\n \"write_videofile.\")\n\n return ffmpeg_audiowrite(self, filename, fps, nbytes, buffersize,\n codec=codec, bitrate=bitrate, write_logfile=write_logfile,\n verbose=verbose, ffmpeg_params=ffmpeg_params,\n progress_bar=progress_bar)\n\n\n# The to_audiofile method is replaced by the more explicit write_audiofile.\nAudioClip.to_audiofile = deprecated_version_of(AudioClip.write_audiofile,\n 'to_audiofile')\n###\n\nclass AudioArrayClip(AudioClip):\n \"\"\"\n \n An audio clip made from a sound array.\n \n Parameters\n -----------\n \n array\n A Numpy array representing the sound, of size Nx1 for mono,\n Nx2 for stereo.\n \n fps\n Frames per second : speed at which the sound is supposed to be\n played.\n \n \"\"\"\n \n def __init__(self, array, fps):\n \n Clip.__init__(self)\n self.array = array\n self.fps = fps\n self.duration = 1.0 * len(array) / fps\n \n \n def make_frame(t):\n \"\"\" complicated, but must be able to handle the case where t\n is a list of the form sin(t) \"\"\"\n \n if isinstance(t, np.ndarray):\n array_inds = (self.fps*t).astype(int)\n in_array = (array_inds>0) & (array_inds < len(self.array))\n result = np.zeros((len(t),2))\n result[in_array] = self.array[array_inds[in_array]]\n return result\n else:\n i = int(self.fps * t)\n if i < 0 or i >= len(self.array):\n return 0*self.array[0]\n else:\n return self.array[i]\n\n self.make_frame = make_frame\n self.nchannels = len(list(self.get_frame(0)))\n \n \nclass CompositeAudioClip(AudioClip):\n\n \"\"\" Clip made by composing several AudioClips.\n \n An audio clip made by putting together several audio clips.\n \n Parameters\n ------------\n \n clips\n List of audio clips, which may start playing at different times or\n together. If all have their ``duration`` attribute set, the\n duration of the composite clip is computed automatically.\n \n \"\"\"\n\n def __init__(self, clips):\n\n Clip.__init__(self)\n self.clips = clips\n \n ends = [c.end for c in self.clips]\n self.nchannels = max([c.nchannels for c in self.clips])\n if not any([(e is None) for e in ends]):\n self.duration = max(ends)\n self.end = max(ends)\n\n def make_frame(t):\n \n played_parts = [c.is_playing(t) for c in self.clips]\n \n sounds= [c.get_frame(t - c.start)*np.array([part]).T\n for c,part in zip(self.clips, played_parts)\n if (part is not False) ]\n \n if isinstance(t,np.ndarray):\n zero = np.zeros((len(t),self.nchannels))\n \n else:\n zero = np.zeros(self.nchannels)\n \n return zero + sum(sounds)\n\n self.make_frame = make_frame\n\n\ndef concatenate_audioclips(clips):\n durations = [c.duration for c in clips]\n tt = np.cumsum([0]+durations) # start times, and end time.\n newclips= [c.set_start(t) for c,t in zip(clips, tt)]\n return CompositeAudioClip(newclips).set_duration(tt[-1])\n" ]
[ [ "numpy.minimum", "numpy.linspace", "numpy.arange", "numpy.cumsum", "numpy.array", "numpy.zeros" ] ]
KitchenSong/effective_mass_tensor
[ "de958571f414feda6af7d6802d14badd776ee13e" ]
[ "effective_mass.py" ]
[ "from __future__ import division\nimport numpy as np\nimport re\nfrom scipy import interpolate\nfrom scipy.interpolate import griddata\n\n# Constant\n\nhbar = 6.62607004e-34 # J S\npi = np.pi\nRy2J = 2.1798741e-18 # J/Ry\nBohr2m = 5.29177249e-11 # m/Bohr\nme = 9.10938356e-31 # kg\nlatt = 11.8873 * Bohr2m\n\n\n# The first Brillouin zone of a face centered cubic lattice\n\nb1 = [ -1 , -1, 1]\nb2 = [ 1 , 1 , 1]\nb3 = [-1 , 1 , -1]\n\n# Transforming matrix (Crystal to Cartesian)\n\nbm = np.matrix([b1,b2,b3])\n\n# The band minimum\n\nkm = [0.5, 0.5, 0.5]\n\n# Generating mesh\n\n## Mesh dimensions\n\nNx = 5 # 5 is chosen for convenience of decrete derivative\nNy = 5\nNz = 5\n\npos = np.zeros((Nx*Ny*Nz,3))\n\n## Resolution\n\ndx = 1.0/100.0/(Nx-1)\ndy = 1.0/100.0/(Ny-1)\ndz = 1.0/100.0/(Nz-1)\n\n# K points\n\nfor i in range(Nx):\n for j in range(Ny):\n for k in range(Nz):\n pos[i*Ny*Nz + j*Ny + k, 0] = km[0] + i * dx - dx * (Nx-1)/2\n pos[i*Ny*Nz + j*Ny + k, 1] = km[1] + j * dy - dy * (Ny-1)/2\n pos[i*Ny*Nz + j*Ny + k, 2] = km[2] + k * dz - dz * (Nz-1)/2\n\n# Write the K points\n\nf = open(\"kcdt.dat\", \"wb\")\nf.write(str(Nx * Ny * Nz)+'\\n')\nfor i in range(Nx):\n for j in range(Ny):\n for k in range(Nz):\n f.write(('{0:12.8f} {1:12.8f} {2:12.8f} {3:12.6e}'.format(pos[i*Ny*Nz + j*Ny + k, 0],\n pos[i*Ny*Nz + j*Ny + k, 1],\n pos[i*Ny*Nz + j*Ny + k, 2],\n 1/(Nx * Ny * Nz)))+'\\n')\nf.close()\n\n# Read the band structure\n\n\n\nf = open(\"fit.out\", \"r\") # output file for band calculation\nlines = f.readlines()\nfor i, line in enumerate(lines):\n if re.search(\"End of band structure calculation\", line):\n mark_line_start = i # find the start\n if re.search(\"Writing output data file\", line):\n mark_line_end = i # find the end\n\n# Number of bands\n\nnbnd = 24\nspacing = int(round(nbnd/8.0)+3)\n\n\nkmat = np.zeros((Nx*Ny*Nz,3))\nkmat_dxyz = np.zeros((Nx*Ny*Nz,3))\nEmat = np.zeros((Nx*Ny*Nz,1))\nEmat_dE = np.zeros((Nx*Ny*Nz,1))\nEband = np.zeros((Nx*Ny*Nz,nbnd))\n\n\n# Define the index of band minimum (from low energy to high energy)\n\nNmin = 11 # 10 means No.10 band with the count starting from 1\n\n# Read the k coordinates and energy\n\nfor i in range(Nx*Ny*Nz):\n for nnn, a in enumerate(lines[mark_line_start + spacing * i + 2].split()):\n if \"=\" in a:\n temp = lines[mark_line_start + spacing * i + 2].split()[nnn]\n # lines[mark_line_start + spacing * i + 2].split()[nnn] = temp[1:]\n kmat[i,:] = np.dot(pos[i,:],bm) # float(temp[1:])\n # kmat[i,0] = float(lines[mark_line_start + spacing * i + 2].split()[1])\n # kmat[i,1] = # float(lines[mark_line_start + spacing * i + 2].split()[2])\n # kmat[i,2] = # float(lines[mark_line_start + spacing * i + 2].split()[3])\n for j in range(spacing-3):\n for k in range(8):\n Eband[i,j*8+k] = float(lines[mark_line_start + spacing * i + j +4].split()[k])\n Emat[i,0] = Eband[i,Nmin -1]\nf.close()\n\n# Calculate the deviation\n\nfor i in range(Nx*Ny*Nz):\n kmat_dxyz[i,:] = kmat[i,:]-np.dot(km,bm)\n Emat_dE[i,:] = (Emat[i,:]-min(Emat))\n\n# Interplate\n\ntemp1 = np.dot([dx, dy ,dz],bm)\n\n\nddx = abs(temp1[0, 0])\nddy = abs(temp1[0, 1])\nddz = abs(temp1[0, 2])\n\n# fit fxx\n\nf_2 = griddata(kmat_dxyz, Emat_dE, (-2 * ddx, 0, 0), method='linear')\nf_1 = griddata(kmat_dxyz, Emat_dE, (-1 * ddx, 0, 0), method='linear')\nf0 = griddata(kmat_dxyz, Emat_dE, (0 * ddx, 0, 0), method='linear')\nf1 = griddata(kmat_dxyz, Emat_dE, (1 * ddx, 0, 0), method='linear')\nf2 = griddata(kmat_dxyz, Emat_dE, (2 * ddx, 0, 0), method='linear')\n\nfxx = 1.0/(12.0*ddx**2)*(-(f_2+f2)+16*(f_1+f1)-30*f0)\n\n# fit fyy\n\nf_2 = griddata(kmat_dxyz, Emat_dE, (0, -2 * ddy, 0), method='linear')\nf_1 = griddata(kmat_dxyz, Emat_dE, (0, -1 * ddy, 0), method='linear')\nf0 = griddata(kmat_dxyz, Emat_dE, (0, 0 * ddy, 0), method='linear')\nf1 = griddata(kmat_dxyz, Emat_dE, (0, 1 * ddy, 0), method='linear')\nf2 = griddata(kmat_dxyz, Emat_dE, (0, 2 * ddy, 0), method='linear')\n\nfyy = 1.0/(12.0*ddy**2)*(-(f_2+f2)+16*(f_1+f1)-30*f0)\n\n\n# fit fzz\n\nf_2 = griddata(kmat_dxyz, Emat_dE, (0, 0, -2 * ddz), method='linear')\nf_1 = griddata(kmat_dxyz, Emat_dE, (0, 0, -1 * ddz), method='linear')\nf0 = griddata(kmat_dxyz, Emat_dE, (0, 0, 0 * ddz), method='linear')\nf1 = griddata(kmat_dxyz, Emat_dE, (0, 0, 1 * ddz), method='linear')\nf2 = griddata(kmat_dxyz, Emat_dE, (0, 0, 2 * ddz), method='linear')\n\nfzz = 1.0/(12.0*ddz**2)*(-(f_2+f2)+16*(f_1+f1)-30*f0)\n\n# fit fxy\n\nf1_2 = griddata(kmat_dxyz, Emat_dE, (1 * ddx, -2 * ddy, 0), method='linear')\nf2_1 = griddata(kmat_dxyz, Emat_dE, (2 * ddx, -1 * ddy, 0), method='linear')\nf_21 = griddata(kmat_dxyz, Emat_dE, (-2 * ddx, 1 * ddy, 0), method='linear')\nf_12 = griddata(kmat_dxyz, Emat_dE, (-1 * ddx, 2 * ddy, 0), method='linear')\n\nf_1_2 = griddata(kmat_dxyz, Emat_dE, (-1 * ddx, -2 * ddy, 0), method='linear')\nf_2_1 = griddata(kmat_dxyz, Emat_dE, (-2 * ddx, -1 * ddy, 0), method='linear')\nf12 = griddata(kmat_dxyz, Emat_dE, (1 * ddx, 2 * ddy, 0), method='linear')\nf21 = griddata(kmat_dxyz, Emat_dE, (2 * ddx, 1 * ddy, 0), method='linear')\n\nf2_2 = griddata(kmat_dxyz, Emat_dE, (2 * ddx, -2 * ddy, 0), method='linear')\nf_22 = griddata(kmat_dxyz, Emat_dE, (-2 * ddx, 2 * ddy, 0), method='linear')\nf_2_2 = griddata(kmat_dxyz, Emat_dE, (-2 * ddx, -2 * ddy, 0), method='linear')\nf22 = griddata(kmat_dxyz, Emat_dE, (2 * ddx, 2 * ddy, 0), method='linear')\n\nf_1_1 = griddata(kmat_dxyz, Emat_dE, (-1 * ddx, -1 * ddy, 0), method='linear')\nf11 = griddata(kmat_dxyz, Emat_dE, (1 * ddx, 1 * ddy, 0), method='linear')\nf1_1 = griddata(kmat_dxyz, Emat_dE, (1 * ddx, -1 * ddy, 0), method='linear')\nf_11 = griddata(kmat_dxyz, Emat_dE, (-1 * ddx, 1 * ddy, 0), method='linear')\n\nfxy = 1.0/(600.0*ddx*ddy)*(-63*(f1_2+f2_1+f_21+f_12)\\\n +63*(f_1_2+f_2_1+f12+f21)\\\n +44*(f2_2+f_22-f_2_2-f22)\\\n +74*(f_1_1+f11-f1_1-f_11))\n\n# fit fxz\n\nf1_2 = griddata(kmat_dxyz, Emat_dE, (1 * ddx, 0, -2 * ddz), method='linear')\nf2_1 = griddata(kmat_dxyz, Emat_dE, (2 * ddx, 0, -1 * ddz), method='linear')\nf_21 = griddata(kmat_dxyz, Emat_dE, (-2 * ddx, 0, 1 * ddz), method='linear')\nf_12 = griddata(kmat_dxyz, Emat_dE, (-1 * ddx, 0, 2 * ddz), method='linear')\n\nf_1_2 = griddata(kmat_dxyz, Emat_dE, (-1 * ddx, 0, -2 * ddz), method='linear')\nf_2_1 = griddata(kmat_dxyz, Emat_dE, (-2 * ddx, 0, -1 * ddz), method='linear')\nf12 = griddata(kmat_dxyz, Emat_dE, (1 * ddx, 0, 2 * ddy), method='linear')\nf21 = griddata(kmat_dxyz, Emat_dE, (2 * ddx, 0, 1 * ddy), method='linear')\n\nf2_2 = griddata(kmat_dxyz, Emat_dE, (2 * ddx, 0, -2 * ddz), method='linear')\nf_22 = griddata(kmat_dxyz, Emat_dE, (-2 * ddx, 0, 2 * ddz), method='linear')\nf_2_2 = griddata(kmat_dxyz, Emat_dE, (-2 * ddx, 0, -2 * ddz), method='linear')\nf22 = griddata(kmat_dxyz, Emat_dE, (2 * ddx, 0, 2 * ddz), method='linear')\n\nf_1_1 = griddata(kmat_dxyz, Emat_dE, (-1 * ddx, 0, -1 * ddz), method='linear')\nf11 = griddata(kmat_dxyz, Emat_dE, (1 * ddx, 0, 1 * ddz), method='linear')\nf1_1 = griddata(kmat_dxyz, Emat_dE, (1 * ddx, 0, -1 * ddz), method='linear')\nf_11 = griddata(kmat_dxyz, Emat_dE, (-1 * ddx, 0, 1 * ddz), method='linear')\n\nfxz = 1.0/(600.0*ddx*ddz)*(-63*(f1_2+f2_1+f_21+f_12)\\\n +63*(f_1_2+f_2_1+f12+f21)\\\n +44*(f2_2+f_22-f_2_2-f22)\\\n +74*(f_1_1+f11-f1_1-f_11))\n\n# fit fyz\n\nf1_2 = griddata(kmat_dxyz, Emat_dE, (0, 1 * ddy, -2 * ddz), method='linear')\nf2_1 = griddata(kmat_dxyz, Emat_dE, (0, 2 * ddy, -1 * ddz), method='linear')\nf_21 = griddata(kmat_dxyz, Emat_dE, (0, -2 * ddy, 1 * ddz), method='linear')\nf_12 = griddata(kmat_dxyz, Emat_dE, (0, -1 * ddy, 2 * ddz), method='linear')\n\nf_1_2 = griddata(kmat_dxyz, Emat_dE, (0, -1 * ddy, -2 * ddz), method='linear')\nf_2_1 = griddata(kmat_dxyz, Emat_dE, (0, -2 * ddy, -1 * ddz), method='linear')\nf12 = griddata(kmat_dxyz, Emat_dE, (0, 1 * ddy, 2 * ddy), method='linear')\nf21 = griddata(kmat_dxyz, Emat_dE, (0, 2 * ddy, 1 * ddy), method='linear')\n\nf2_2 = griddata(kmat_dxyz, Emat_dE, (0, 2 * ddy, -2 * ddz), method='linear')\nf_22 = griddata(kmat_dxyz, Emat_dE, (0, -2 * ddy, 2 * ddz), method='linear')\nf_2_2 = griddata(kmat_dxyz, Emat_dE, (0, -2 * ddy, -2 * ddz), method='linear')\nf22 = griddata(kmat_dxyz, Emat_dE, (0, 2 * ddy, 2 * ddz), method='linear')\n\nf_1_1 = griddata(kmat_dxyz, Emat_dE, (0, -1 * ddy, -1 * ddz), method='linear')\nf11 = griddata(kmat_dxyz, Emat_dE, (0, 1 * ddy, 1 * ddz), method='linear')\nf1_1 = griddata(kmat_dxyz, Emat_dE, (0, 1 * ddy, -1 * ddz), method='linear')\nf_11 = griddata(kmat_dxyz, Emat_dE, (0, -1 * ddy, 1 * ddz), method='linear')\n\nfyz = 1.0/(600.0*ddy*ddz)*(-63*(f1_2+f2_1+f_21+f_12)\\\n +63*(f_1_2+f_2_1+f12+f21)\\\n +44*(f2_2+f_22-f_2_2-f22)\\\n +74*(f_1_1+f11-f1_1-f_11))\n\n# Unit conversion\n\ncvt = Ry2J / (2.0 * pi/latt) ** 2 / hbar**2\n\n# Inverse effective mass tensor\n\nm_1 = cvt * \\\nnp.matrix([[fxx[0],fxy[0],fxz[0]],[fxy[0],fyy[0],fyz[0]],[fxz[0],fyz[0],fzz[0]]])\n\n# Effective mass tensor\n\nm = np.linalg.inv(m_1)/me\n\n# Eigenvalue and eigen vector (principle axies) of effective mass tensor\n\nmeig, eivtr = np.linalg.eigh(m, UPLO='L')\n\n# Write the effective tensor\n\nf = open(\"mass_tensor.out\", \"w\") # output file for band calculation\nf.write(\"Effective mass on priciple axies:\\n\\n\")\nf.write('{0:12.8f} {1:12.8f} {2:12.8f}'.format(meig[0],meig[1],meig[2])+'\\n') # Eigenvector\nf.write(\"\\n\")\nf.write(\"Eigenvector:\\n\\n\")\nfor i in range(3):\n f.write('{0:12.8f} {1:12.8f} {2:12.8f}'.format(eivtr[i,0],eivtr[i,1],eivtr[i,2])+'\\n') # Eigenvector\nf.write(\"\\n\")\nf.write(\"Initial matrix:\\n\\n\")\nfor i in range(3):\n f.write('{0:12.8f} {1:12.8f} {2:12.8f}'.format(m[i,0],m[i,1],m[i,2])+'\\n') # Eigenvector\nf.write(\"\\n\")\nf.write(\"Conductivity effective mass (if cubic):\\n\\n\")\nmc = 1.0/3.0*(1/meig[0]+1/meig[1]+1/meig[2])\nmc = 1/mc\nf.write('{0:12.8f}'.format(mc)+'\\n') # Eigenvector\nmd = (abs(meig[0])*abs(meig[1])*abs(meig[2]))**(1.0/3.0)\nf.write(\"\\n\")\nf.write(\"Density of state effective mass:\\n\\n\")\nf.write('{0:12.8f}'.format(md)+'\\n') # Eigenvector\nf.close()\n" ]
[ [ "numpy.matrix", "numpy.dot", "numpy.linalg.inv", "numpy.linalg.eigh", "scipy.interpolate.griddata", "numpy.zeros" ] ]
HiddeLekanne/Robocup-SPL-Simulated2Real
[ "eb905bc256df838ff6963347a8c2dff6277ef1ec", "eb905bc256df838ff6963347a8c2dff6277ef1ec" ]
[ "PyTorch-GAN/implementations/munit/munit_input.py", "PyTorch-GAN/implementations/cyclegan/cyclegan.py" ]
[ "import argparse\nimport os\nimport numpy as np\nimport math\nimport itertools\nimport datetime\nimport time\nimport sys\n\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nfrom models import *\nfrom datasets_input import *\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dataset_name\", type=str, default=\"simulated2real\", help=\"name of the dataset\")\nparser.add_argument(\"--input_location\", type=str, default=\"input_images/\", help=\"input folder\")\nparser.add_argument(\"--output_location\", type=str, default=\"output_images/\", help=\"output folder\")\nparser.add_argument(\"--model_location\", type=str, default=\"saved_models/\", help=\"model location folder\")\nparser.add_argument(\"--model_number\", type=int, default = 247, help=\"epoch number of the model\")\nparser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\nparser.add_argument(\"--img_height\", type=int, default=240, help=\"size of image height\")\nparser.add_argument(\"--img_width\", type=int, default=320, help=\"size of image width\")\nparser.add_argument(\"--channels\", type=int, default=3, help=\"number of image channels\")\nparser.add_argument(\"--dim\", type=int, default=64, help=\"number of filters in first encoder layer\")\nparser.add_argument(\"--style_dim\", type=int, default=5, help=\"dimensionality of the style code\")\nparser.add_argument(\"--n_downsample\", type=int, default=2, help=\"number downsampling layers in encoder\")\nparser.add_argument(\"--n_residual\", type=int, default=3, help=\"number of residual blocks in encoder / decoder\")\nopt = parser.parse_args()\nprint(opt)\n\n# Create sample and checkpoint directories\nos.makedirs(\"%s\" % opt.output_location, exist_ok=True)\n\ncuda = torch.cuda.is_available()\n\ninput_shape = (opt.channels, opt.img_height, opt.img_width)\n\ncriterion_recon = torch.nn.L1Loss()\n\n# Initialize encoders, generators and discriminators\nEnc1 = Encoder(dim=opt.dim, n_downsample=opt.n_downsample, n_residual=opt.n_residual, style_dim=opt.style_dim)\nDec2 = Decoder(dim=opt.dim, n_upsample=opt.n_downsample, n_residual=opt.n_residual, style_dim=opt.style_dim)\n\nif cuda:\n Enc1 = Enc1.cuda()\n Dec2 = Dec2.cuda()\n\nEnc1.load_state_dict(torch.load(\"saved_models/%s/Enc1_%d.pth\" % (opt.dataset_name, opt.model_number)))\nDec2.load_state_dict(torch.load(\"saved_models/%s/Dec2_%d.pth\" % (opt.dataset_name, opt.model_number)))\n\nTensor = torch.cuda.FloatTensor if cuda else torch.Tensor\n\n# Configure dataloaders\ntransforms_ = [\n transforms.Resize((opt.img_height, opt.img_width), Image.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n]\n\ndataloader = DataLoader(\n ImageDataset(opt.input_location, transforms_=transforms_),\n batch_size=1,\n shuffle=False,\n num_workers=1,\n)\n\nprint(len(dataloader.dataset))\ndata = iter(dataloader)\nfor i in range(len(dataloader.dataset)):\n \"\"\"Saves a generated sample from the validation set\"\"\"\n img = next(data)\n # print(img.unsqueeze(0).size())\n X1 = img.repeat(opt.style_dim, 1, 1, 1)\n X1 = Variable(X1.type(Tensor))\n # Get random style codes\n s_code = np.random.uniform(-1, 1, (opt.style_dim, opt.style_dim))\n s_code = Variable(Tensor(s_code))\n # Generate samples\n c_code_1, _ = Enc1(X1)\n X12 = Dec2(c_code_1, s_code)\n # Concatenate samples horisontally\n name = dataloader.dataset.files[i].split(\"/\")[-1]\n\n for i, sample in enumerate(X12):\n tmp_name = name.split(\".\")[0] + \"_\" + str(i) + \".\" + name.split(\".\")[-1]\n\n sample = F.interpolate(sample.unsqueeze(0), size=(480,640), mode='bicubic')\n # sample = transform.resize(sample.unsqueeze(0), (480, 640))\n save_image(sample, opt.output_location + \"/\" + tmp_name, normalize=True)\n", "import argparse\nimport os\nimport numpy as np\nimport math\nimport itertools\nimport datetime\nimport time\n\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image, make_grid\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.autograd import Variable\n\nfrom models import *\nfrom datasets import *\nfrom utils import *\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--epoch\", type=int, default=0, help=\"epoch to start training from\")\nparser.add_argument(\"--n_epochs\", type=int, default=30, help=\"number of epochs of training\")\nparser.add_argument(\"--dataset_name\", type=str, default=\"simulated2real\", help=\"name of the dataset\")\nparser.add_argument(\"--batch_size\", type=int, default=2, help=\"size of the batches\")\nparser.add_argument(\"--lr\", type=float, default=0.0002, help=\"adam: learning rate\")\nparser.add_argument(\"--b1\", type=float, default=0.5, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--b2\", type=float, default=0.999, help=\"adam: decay of first order momentum of gradient\")\nparser.add_argument(\"--decay_epoch\", type=int, default=15, help=\"epoch from which to start lr decay\")\nparser.add_argument(\"--n_cpu\", type=int, default=8, help=\"number of cpu threads to use during batch generation\")\nparser.add_argument(\"--img_height\", type=int, default=240, help=\"size of image height\")\nparser.add_argument(\"--img_width\", type=int, default=320, help=\"size of image width\")\nparser.add_argument(\"--channels\", type=int, default=3, help=\"number of image channels\")\nparser.add_argument(\"--sample_interval\", type=int, default=1000, help=\"interval between saving generator outputs\")\nparser.add_argument(\"--checkpoint_interval\", type=int, default=1, help=\"interval between saving model checkpoints\")\nparser.add_argument(\"--n_residual_blocks\", type=int, default=9, help=\"number of residual blocks in generator\")\nparser.add_argument(\"--lambda_cyc\", type=float, default=10.0, help=\"cycle loss weight\")\nparser.add_argument(\"--lambda_id\", type=float, default=5.0, help=\"identity loss weight\")\nopt = parser.parse_args()\nprint(opt)\n\n# Create sample and checkpoint directories\nos.makedirs(\"images/%s\" % opt.dataset_name, exist_ok=True)\nos.makedirs(\"saved_models/%s\" % opt.dataset_name, exist_ok=True)\n\n# Losses\ncriterion_GAN = torch.nn.MSELoss()\ncriterion_cycle = torch.nn.L1Loss()\ncriterion_identity = torch.nn.L1Loss()\n\ncuda = torch.cuda.is_available()\n\ninput_shape = (opt.channels, opt.img_height, opt.img_width)\n\n# Initialize generator and discriminator\nG_AB = GeneratorResNet(input_shape, opt.n_residual_blocks)\nG_BA = GeneratorResNet(input_shape, opt.n_residual_blocks)\nD_A = Discriminator(input_shape)\nD_B = Discriminator(input_shape)\n\nif cuda:\n G_AB = G_AB.cuda()\n G_BA = G_BA.cuda()\n D_A = D_A.cuda()\n D_B = D_B.cuda()\n criterion_GAN.cuda()\n criterion_cycle.cuda()\n criterion_identity.cuda()\n\nif opt.epoch != 0:\n # Load pretrained models\n G_AB.load_state_dict(torch.load(\"saved_models/%s/G_AB_%d.pth\" % (opt.dataset_name, opt.epoch)))\n G_BA.load_state_dict(torch.load(\"saved_models/%s/G_BA_%d.pth\" % (opt.dataset_name, opt.epoch)))\n D_A.load_state_dict(torch.load(\"saved_models/%s/D_A_%d.pth\" % (opt.dataset_name, opt.epoch)))\n D_B.load_state_dict(torch.load(\"saved_models/%s/D_B_%d.pth\" % (opt.dataset_name, opt.epoch)))\nelse:\n # Initialize weights\n G_AB.apply(weights_init_normal)\n G_BA.apply(weights_init_normal)\n D_A.apply(weights_init_normal)\n D_B.apply(weights_init_normal)\n\n# Optimizers\noptimizer_G = torch.optim.Adam(\n itertools.chain(G_AB.parameters(), G_BA.parameters()), lr=opt.lr, betas=(opt.b1, opt.b2)\n)\noptimizer_D_A = torch.optim.Adam(D_A.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\noptimizer_D_B = torch.optim.Adam(D_B.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))\n\n# Learning rate update schedulers\nlr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(\n optimizer_G, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step\n)\nlr_scheduler_D_A = torch.optim.lr_scheduler.LambdaLR(\n optimizer_D_A, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step\n)\nlr_scheduler_D_B = torch.optim.lr_scheduler.LambdaLR(\n optimizer_D_B, lr_lambda=LambdaLR(opt.n_epochs, opt.epoch, opt.decay_epoch).step\n)\n\nTensor = torch.cuda.FloatTensor if cuda else torch.Tensor\n\n# Buffers of previously generated samples\nfake_A_buffer = ReplayBuffer()\nfake_B_buffer = ReplayBuffer()\n\n# Image transformations\ntransforms_ = [\n transforms.Resize((int(opt.img_height * 1.12),int(opt.img_width * 1.12)) , Image.BICUBIC),\n transforms.RandomCrop((opt.img_height, opt.img_width)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n]\n\n# Training data loader\ndataloader = DataLoader(\n ImageDataset(\"../../data/%s\" % opt.dataset_name, transforms_=transforms_, unaligned=True),\n batch_size=opt.batch_size,\n shuffle=True,\n num_workers=opt.n_cpu,\n)\n# Test data loader\nval_dataloader = DataLoader(\n ImageDataset(\"../../data/%s\" % opt.dataset_name, transforms_=transforms_, unaligned=True, mode=\"test\"),\n batch_size=5,\n shuffle=True,\n num_workers=1,\n)\n\n\ndef sample_images(batches_done):\n \"\"\"Saves a generated sample from the test set\"\"\"\n imgs = next(iter(val_dataloader))\n G_AB.eval()\n G_BA.eval()\n real_A = Variable(imgs[\"A\"].type(Tensor))\n fake_B = G_AB(real_A)\n real_B = Variable(imgs[\"B\"].type(Tensor))\n fake_A = G_BA(real_B)\n # Arange images along x-axis\n real_A = make_grid(real_A, nrow=5, normalize=True)\n real_B = make_grid(real_B, nrow=5, normalize=True)\n fake_A = make_grid(fake_A, nrow=5, normalize=True)\n fake_B = make_grid(fake_B, nrow=5, normalize=True)\n # Arange images along y-axis\n image_grid = torch.cat((real_A, fake_B, real_B, fake_A), 1)\n save_image(image_grid, \"images/%s/%s.png\" % (opt.dataset_name, batches_done), normalize=False)\n\n\n# ----------\n# Training\n# ----------\n\nprev_time = time.time()\nfor epoch in range(opt.epoch, opt.n_epochs):\n for i, batch in enumerate(dataloader):\n\n # Set model input\n real_A = Variable(batch[\"A\"].type(Tensor))\n real_B = Variable(batch[\"B\"].type(Tensor))\n\n # Adversarial ground truths\n valid = Variable(Tensor(np.ones((real_A.size(0), *D_A.output_shape))), requires_grad=False)\n fake = Variable(Tensor(np.zeros((real_A.size(0), *D_A.output_shape))), requires_grad=False)\n\n # ------------------\n # Train Generators\n # ------------------\n\n G_AB.train()\n G_BA.train()\n\n optimizer_G.zero_grad()\n\n # Identity loss\n loss_id_A = criterion_identity(G_BA(real_A), real_A)\n loss_id_B = criterion_identity(G_AB(real_B), real_B)\n\n loss_identity = (loss_id_A + loss_id_B) / 2\n\n # GAN loss\n fake_B = G_AB(real_A)\n loss_GAN_AB = criterion_GAN(D_B(fake_B), valid)\n fake_A = G_BA(real_B)\n loss_GAN_BA = criterion_GAN(D_A(fake_A), valid)\n\n loss_GAN = (loss_GAN_AB + loss_GAN_BA) / 2\n\n\n # Cycle loss\n recov_A = G_BA(fake_B)\n loss_cycle_A = criterion_cycle(recov_A, real_A)\n recov_B = G_AB(fake_A)\n loss_cycle_B = criterion_cycle(recov_B, real_B)\n\n loss_cycle = (loss_cycle_A + loss_cycle_B) / 2\n\n # # Cycle loss\n # recov_A = G_BA(fake_B)\n # loss_cycle_A = criterion_cycle(recov_A, real_A)\n # # recov_B = G_AB(fake_A)\n # loss_cycle_B = 0 # criterion_cycle(recov_B, real_B)\n\n # loss_cycle = (loss_cycle_A + loss_cycle_B) * 5 # / 2\n\n # Total loss\n loss_G = loss_GAN + opt.lambda_cyc * loss_cycle + opt.lambda_id * loss_identity\n\n loss_G.backward()\n optimizer_G.step()\n\n # -----------------------\n # Train Discriminator A\n # -----------------------\n\n optimizer_D_A.zero_grad()\n\n # Real loss\n loss_real = criterion_GAN(D_A(real_A), valid)\n # Fake loss (on batch of previously generated samples)\n fake_A_ = fake_A_buffer.push_and_pop(fake_A)\n loss_fake = criterion_GAN(D_A(fake_A_.detach()), fake)\n # Total loss\n loss_D_A = (loss_real + loss_fake) / 2\n\n loss_D_A.backward()\n optimizer_D_A.step()\n\n # -----------------------\n # Train Discriminator B\n # -----------------------\n\n optimizer_D_B.zero_grad()\n\n # Real loss\n loss_real = criterion_GAN(D_B(real_B), valid)\n # Fake loss (on batch of previously generated samples)\n fake_B_ = fake_B_buffer.push_and_pop(fake_B)\n loss_fake = criterion_GAN(D_B(fake_B_.detach()), fake)\n # Total loss\n loss_D_B = (loss_real + loss_fake) / 2\n\n loss_D_B.backward()\n optimizer_D_B.step()\n\n loss_D = (loss_D_A + loss_D_B) / 2\n\n # --------------\n # Log Progress\n # --------------\n\n # Determine approximate time left\n batches_done = epoch * len(dataloader) + i\n batches_left = opt.n_epochs * len(dataloader) - batches_done\n time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time))\n prev_time = time.time()\n\n # Print log\n sys.stdout.write(\n \"\\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f, adv: %f, cycle: %f, identity: %f] ETA: %s\"\n % (\n epoch,\n opt.n_epochs,\n i,\n len(dataloader),\n loss_D.item(),\n loss_G.item(),\n loss_GAN.item(),\n loss_cycle.item(),\n loss_identity.item(),\n time_left,\n )\n )\n\n # If at sample interval save image\n if batches_done % opt.sample_interval == 0:\n sample_images(batches_done)\n\n # Update learning rates\n lr_scheduler_G.step()\n lr_scheduler_D_A.step()\n lr_scheduler_D_B.step()\n\n if opt.checkpoint_interval != -1 and epoch % opt.checkpoint_interval == 0:\n # Save model checkpoints\n torch.save(G_AB.state_dict(), \"saved_models/%s/G_AB_%d.pth\" % (opt.dataset_name, epoch))\n torch.save(G_BA.state_dict(), \"saved_models/%s/G_BA_%d.pth\" % (opt.dataset_name, epoch))\n torch.save(D_A.state_dict(), \"saved_models/%s/D_A_%d.pth\" % (opt.dataset_name, epoch))\n torch.save(D_B.state_dict(), \"saved_models/%s/D_B_%d.pth\" % (opt.dataset_name, epoch))\n" ]
[ [ "numpy.random.uniform", "torch.nn.L1Loss", "torch.cuda.is_available", "torch.load" ], [ "torch.load", "torch.cat", "torch.cuda.is_available", "torch.nn.L1Loss", "torch.nn.MSELoss" ] ]
daiki98/finn
[ "af783db8dc2a1d2e95bd569d39464b935520b6d2" ]
[ "src/finn/transformation/streamline/reorder.py" ]
[ "# Copyright (c) 2020, Xilinx\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of FINN nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport numpy as np\nimport warnings\nfrom onnx import helper as oh\nfrom onnx import TensorProto\n\nfrom finn.transformation.base import Transformation\nimport finn.core.data_layout as DataLayout\nfrom finn.transformation.infer_shapes import InferShapes\nfrom finn.transformation.infer_datatypes import InferDataTypes\nfrom finn.transformation.infer_data_layouts import InferDataLayouts\nfrom finn.core.datatype import DataType\nfrom finn.core.onnx_exec import execute_node\nfrom finn.util.basic import get_by_name\nfrom finn.custom_op.registry import getCustomOp\n\n\nclass MoveAddPastMul(Transformation):\n \"\"\"Move add operations past multiply operations on linear segments of the graph.\n The aim is to have them next to each other such that they can be collapsed into\n a single add.\"\"\"\n\n def apply(self, model):\n graph = model.graph\n node_ind = 0\n graph_modified = False\n for n in graph.node:\n node_ind += 1\n if (\n n.op_type == \"Add\"\n and not model.is_fork_node(n)\n and not model.is_join_node(n)\n ):\n consumer = model.find_consumer(n.output[0])\n if (\n consumer is not None\n and consumer.op_type == \"Mul\"\n and not model.is_join_node(consumer)\n ):\n # have: (x) -> add(,B) -> (x+B) -> mul(,A) -> (xA+BA)\n # want: (x) -> mul(,A) -> (xA) -> add(,BA) -> (xA+BA)\n # assume input 0 is from the previous layer, input 1 is the\n # trained (constant) parameter\n mul_weight_name = consumer.input[1]\n add_weight_name = n.input[1]\n A = model.get_initializer(mul_weight_name)\n B = model.get_initializer(add_weight_name)\n if (A is None) or (B is None):\n warnings.warn(\n \"Mul or add does not have constant params, skipping\"\n )\n continue\n start_name = n.input[0]\n middle_name = n.output[0]\n end_name = consumer.output[0]\n # compute new param value for add\n BA = B * A\n\n # make and insert new nodes\n new_mul = oh.make_node(\n \"Mul\",\n [start_name, mul_weight_name],\n [middle_name],\n name=consumer.name,\n )\n new_add = oh.make_node(\n \"Add\", [middle_name, add_weight_name], [end_name], name=n.name\n )\n graph.node.insert(node_ind, new_mul)\n graph.node.insert(node_ind + 1, new_add)\n # replace add value\n model.set_initializer(add_weight_name, BA)\n # remove old nodes\n graph.node.remove(n)\n graph.node.remove(consumer)\n graph_modified = True\n\n model = model.transform(InferShapes())\n return (model, graph_modified)\n\n\nclass MoveScalarMulPastMatMul(Transformation):\n \"\"\"Move scalar mul operations past matmul operations. We want to have muls\n next to each other such that they can be collapsed into a single mul.\"\"\"\n\n def apply(self, model):\n graph = model.graph\n node_ind = 0\n graph_modified = False\n for n in graph.node:\n node_ind += 1\n if (\n n.op_type == \"Mul\"\n and not model.is_fork_node(n)\n and not model.is_join_node(n)\n ):\n consumer = model.find_consumer(n.output[0])\n if (\n consumer is not None\n and consumer.op_type == \"MatMul\"\n and not model.is_join_node(consumer)\n ):\n mul_weight_name = n.input[1]\n matmul_weight_name = consumer.input[1]\n A = model.get_initializer(mul_weight_name)\n W = model.get_initializer(matmul_weight_name)\n if (A is None) or (W is None):\n warnings.warn(\"MatMul or Mul params are not constant, skipping\")\n continue\n start_name = n.input[0]\n middle_name = n.output[0]\n end_name = consumer.output[0]\n mm_out_shape = model.get_tensor_shape(end_name)\n if all(x == 1 for x in A.shape):\n # if the mul is scalar, we can simply swap the order of ops\n # make and insert new nodes\n new_matmul = oh.make_node(\n \"MatMul\",\n [start_name, matmul_weight_name],\n [middle_name],\n name=consumer.name,\n )\n new_mul = oh.make_node(\n \"Mul\",\n [middle_name, mul_weight_name],\n [end_name],\n name=n.name,\n )\n graph.node.insert(node_ind, new_matmul)\n graph.node.insert(node_ind + 1, new_mul)\n model.set_tensor_shape(middle_name, mm_out_shape)\n # remove old nodes\n graph.node.remove(n)\n graph.node.remove(consumer)\n graph_modified = True\n model = model.transform(InferShapes())\n return (model, graph_modified)\n\n\nclass MoveScalarAddPastMatMul(Transformation):\n \"\"\"Move scalar add operations past matmul operations. We want to have adds\n next to each other such that they can be collapsed into a single add.\"\"\"\n\n def apply(self, model):\n graph = model.graph\n node_ind = 0\n graph_modified = False\n for n in graph.node:\n node_ind += 1\n if (\n n.op_type == \"Add\"\n and not model.is_fork_node(n)\n and not model.is_join_node(n)\n ):\n consumer = model.find_consumer(n.output[0])\n if (\n consumer is not None\n and consumer.op_type == \"MatMul\"\n and not model.is_join_node(consumer)\n ):\n add_weight_name = n.input[1]\n matmul_weight_name = consumer.input[1]\n A = model.get_initializer(add_weight_name)\n W = model.get_initializer(matmul_weight_name)\n if (A is None) or (W is None):\n warnings.warn(\"MatMul or Add params are not constant, skipping\")\n continue\n start_name = n.input[0]\n middle_name = n.output[0]\n end_name = consumer.output[0]\n mm_out_shape = model.get_tensor_shape(end_name)\n if all(x == 1 for x in A.shape):\n # if the add is scalar, we can move it past the matmul\n # by taking it past the matmul with a dot product\n Anew = np.dot(A * np.ones(W.shape[0], dtype=np.float32), W)\n # update the add weight\n model.set_initializer(add_weight_name, Anew)\n new_matmul = oh.make_node(\n \"MatMul\",\n [start_name, matmul_weight_name],\n [middle_name],\n name=consumer.name,\n )\n new_add = oh.make_node(\n \"Add\",\n [middle_name, add_weight_name],\n [end_name],\n name=n.name,\n )\n graph.node.insert(node_ind, new_matmul)\n graph.node.insert(node_ind + 1, new_add)\n model.set_tensor_shape(middle_name, mm_out_shape)\n # remove old nodes\n graph.node.remove(n)\n graph.node.remove(consumer)\n graph_modified = True\n model = model.transform(InferShapes())\n return (model, graph_modified)\n\n\nclass MoveAddPastConv(Transformation):\n \"\"\"Move scalar and channelwise add operations past conv operations. We want to have adds\n next to each other such that they can be collapsed into a single add.\"\"\"\n\n def apply(self, model):\n graph = model.graph\n node_ind = 0\n graph_modified = False\n for n in graph.node:\n node_ind += 1\n if (\n n.op_type == \"Add\"\n and not model.is_fork_node(n)\n and not model.is_join_node(n)\n ):\n consumer = model.find_consumer(n.output[0])\n if (\n consumer is not None\n and consumer.op_type == \"Conv\"\n and not model.is_join_node(consumer)\n ):\n conv_node = consumer\n add_node = n\n add_weight_name = n.input[1]\n conv_in_name = consumer.input[0]\n conv_in_shape = model.get_tensor_shape(conv_in_name)\n # assume datalayout to be NCHW\n channels = conv_in_shape[1]\n A = model.get_initializer(add_weight_name)\n if A is None:\n warnings.warn(\"Add param is not constant, skipping\")\n continue\n start_name = n.input[0]\n end_name = consumer.output[0]\n conv_out_shape = model.get_tensor_shape(end_name)\n\n using_padding = True\n pads = list(get_by_name(consumer.attribute, \"pads\").ints)\n if sum(pads) == 0:\n using_padding = False\n if (\n all(x == 1 for x in A.shape) or A.shape == (1, channels, 1, 1)\n ) and not using_padding:\n # create a tensor filled with the add constant, in\n # the shape expected by the convolution\n conv_in_const = np.zeros(conv_in_shape, dtype=np.float32)\n if A.shape == (1, channels, 1, 1):\n for ch in range(channels):\n conv_in_const[0][ch].fill(A[0][ch].item())\n else:\n conv_in_const.fill(A.item())\n # create an execution context and put in const input\n exec_ctx = model.make_empty_exec_context()\n exec_ctx[conv_in_name] = conv_in_const\n # execute the conv node only\n execute_node(conv_node, exec_ctx, model.graph)\n # retrieve the conv output\n Anew = exec_ctx[end_name]\n\n # strip out repetition if no padding\n Anew = Anew[0, :, 0, 0].reshape(1, -1, 1, 1)\n # update the add weight\n model.set_initializer(add_weight_name, Anew)\n # rewire add input to be conv input\n conv_node.input[0] = start_name\n model.set_tensor_shape(start_name, conv_in_shape)\n # use old conv input tensor as conv output\n conv_node.output[0] = conv_in_name\n model.set_tensor_shape(conv_in_name, conv_out_shape)\n # use new conv output as new add node input\n add_node.input[0] = conv_in_name\n # use old conv output as new add node output\n add_node.output[0] = end_name\n # move add node past conv node\n graph.node.remove(add_node)\n graph.node.insert(node_ind, add_node)\n graph_modified = True\n\n model = model.transform(InferShapes())\n return (model, graph_modified)\n\n\nclass MoveScalarMulPastConv(Transformation):\n \"\"\"Move scalar mul operations past conv operations. We want to have muls\n next to each other such that they can be collapsed into a single mul.\"\"\"\n\n def apply(self, model):\n graph = model.graph\n node_ind = 0\n graph_modified = False\n for n in graph.node:\n node_ind += 1\n if (\n n.op_type == \"Mul\"\n and not model.is_fork_node(n)\n and not model.is_join_node(n)\n ):\n consumer = model.find_consumer(n.output[0])\n if (\n consumer is not None\n and consumer.op_type == \"Conv\"\n and not model.is_join_node(consumer)\n ):\n mul_weight_name = n.input[1]\n A = model.get_initializer(mul_weight_name)\n if A is None:\n warnings.warn(\"Mul param is not constant, skipping\")\n continue\n conv_node = consumer\n mul_node = n\n start_name = mul_node.input[0]\n conv_in_name = conv_node.input[0]\n conv_in_shape = model.get_tensor_shape(conv_in_name)\n conv_out_name = conv_node.output[0]\n conv_out_shape = model.get_tensor_shape(conv_out_name)\n if all(x == 1 for x in A.shape):\n # if the mul is scalar, we can simply swap the order of ops\n # rewire mul input to be conv input\n conv_node.input[0] = start_name\n model.set_tensor_shape(start_name, conv_in_shape)\n # use old conv input tensor as conv output\n conv_node.output[0] = conv_in_name\n model.set_tensor_shape(conv_in_name, conv_out_shape)\n # use new conv output as new mul node input\n mul_node.input[0] = conv_in_name\n # use old conv output as new mul node output\n mul_node.output[0] = conv_out_name\n # move add node past conv node\n graph.node.remove(mul_node)\n graph.node.insert(node_ind, mul_node)\n graph_modified = True\n model = model.transform(InferShapes())\n return (model, graph_modified)\n\n\nclass MoveMulPastDWConv(Transformation):\n \"\"\"Move channelwise mul operations past depthwise conv operations. We want to have muls\n next to each other such that they can be collapsed into a single mul.\"\"\"\n\n def apply(self, model):\n graph = model.graph\n node_ind = 0\n graph_modified = False\n for n in graph.node:\n node_ind += 1\n if (\n n.op_type == \"Mul\"\n and not model.is_fork_node(n)\n and not model.is_join_node(n)\n ):\n consumer = model.find_consumer(n.output[0])\n if (\n consumer is not None\n and consumer.op_type == \"Conv\"\n and not model.is_join_node(consumer)\n ):\n mul_weight_name = n.input[1]\n A = model.get_initializer(mul_weight_name)\n if A is None:\n warnings.warn(\n \"\"\"Mul weight tensor is not set. If it is a constant,\n please use set_initializer to set the tensor.\"\"\"\n )\n continue\n conv_node = consumer\n mul_node = n\n start_name = mul_node.input[0]\n conv_in_name = conv_node.input[0]\n conv_in_shape = model.get_tensor_shape(conv_in_name)\n ifm_ch = conv_in_shape[1]\n group_attribute = get_by_name(consumer.attribute, \"group\")\n if group_attribute is None:\n continue\n group_attribute = group_attribute.i\n conv_out_name = conv_node.output[0]\n conv_out_shape = model.get_tensor_shape(conv_out_name)\n if A.shape == (1, ifm_ch, 1, 1) and ifm_ch == group_attribute:\n # if the mul is channelwise and conv is depthwise,\n # we can simply swap the order of ops\n # rewire mul input to be conv input\n conv_node.input[0] = start_name\n model.set_tensor_shape(start_name, conv_in_shape)\n model.set_tensor_datatype(start_name, DataType.FLOAT32)\n # use old conv input tensor as conv output\n conv_node.output[0] = conv_in_name\n model.set_tensor_shape(conv_in_name, conv_out_shape)\n model.set_tensor_datatype(conv_in_name, DataType.FLOAT32)\n # use new conv output as new mul node input\n mul_node.input[0] = conv_in_name\n # use old conv output as new mul node output\n mul_node.output[0] = conv_out_name\n model.set_tensor_datatype(conv_out_name, DataType.FLOAT32)\n # move mul node past conv node\n graph.node.remove(mul_node)\n graph.node.insert(node_ind, mul_node)\n graph_modified = True\n model = model.transform(InferShapes())\n return (model, graph_modified)\n\n\nclass MoveLinearPastEltwiseAdd(Transformation):\n \"\"\"Move linear operations (mul, add) past elementwise add operations where possible.\n Specifically,matches and transforms the following patterns:\n (x*C) + (y*C) -> (x + y) * C\n (x+A) + (y+B) -> (x + y) + (A + B)\n where x and y are dynamic inputs, A, B, C are constant tensors (in general).\n \"\"\"\n\n def move_node(self, graph, n, prod0, prod1, node_ind):\n # found! move one of the muls to output, remove the other one\n lin0_in0 = prod0.input[0]\n lin1_in0 = prod1.input[0]\n in0 = n.input[0]\n out = n.output[0]\n # TODO: check shapes don't change through scalar mul or add\n # connect the eltwise add inputs to mul inputs\n n.input[0] = lin0_in0\n n.input[1] = lin1_in0\n # connect mul0 output to eltwise add output\n prod0.output[0] = out\n # connect the input of mul0 and output of eltwise add together\n n.output[0] = in0\n prod0.input[0] = in0\n # move prod0 node past eltwise add node, and remove prod1\n graph.node.remove(prod1)\n graph.node.remove(prod0)\n graph.node.insert(node_ind - 2, prod0)\n\n def apply(self, model):\n graph = model.graph\n node_ind = 0\n graph_modified = False\n nodes = [n for n in graph.node]\n for n in nodes:\n node_ind += 1\n if n.op_type == \"Add\":\n # check for tensors on both inputs (eltwise add)\n # scalar add has an initializer on one input\n in0 = n.input[0]\n in1 = n.input[1]\n if in0 is None or in1 is None:\n continue\n A = model.get_initializer(in0)\n B = model.get_initializer(in1)\n if A is not None or B is not None:\n continue\n # check for mul with same initializer on both inputs\n prod0 = model.find_producer(in0)\n prod1 = model.find_producer(in1)\n # Also check case when both branches are empty and come\n # from the same node: (prod0 == prod1)\n # Other transform should handle that\n if prod0 is None or prod1 is None or (prod0 == prod1):\n continue\n init0 = model.get_initializer(prod0.input[1])\n init1 = model.get_initializer(prod1.input[1])\n # if either initializer is None, skip\n if init0 is None or init1 is None:\n continue\n if prod0.op_type == \"Mul\" and prod1.op_type == \"Mul\":\n if np.array_equal(init0, init1):\n self.move_node(graph, n, prod0, prod1, node_ind)\n node_ind -= 1\n graph_modified = True\n elif prod0.op_type == \"Add\" and prod1.op_type == \"Add\":\n init = init0 + init1\n # update initializer of prod0, which we'll move\n model.set_initializer(prod0.input[1], init)\n self.move_node(graph, n, prod0, prod1, node_ind)\n node_ind -= 1\n graph_modified = True\n else:\n continue\n model = model.transform(InferShapes())\n return (model, graph_modified)\n\n\nclass MoveScalarLinearPastInvariants(Transformation):\n \"\"\"Move scalar linear operations (mul, add) past functions which are invariant\n to them. Specifically, matches and transforms the following patterns:\n f(x*C) -> f(x) * C\n f(x+C) -> f(x) + C\n where x is a dynamic input, C is a constant tensor.\n Known f which obey this property are: Reshape, Flatten, Transpose,\n GlobalAveragePool\n \"\"\"\n\n def apply(self, model):\n graph = model.graph\n node_ind = 0\n graph_modified = False\n nodes = [n for n in graph.node]\n for n in nodes:\n node_ind += 1\n if (\n n.op_type == \"GlobalAveragePool\"\n or n.op_type == \"Reshape\"\n or n.op_type == \"Transpose\"\n or n.op_type == \"Flatten\"\n ):\n in0 = n.input[0]\n if in0 is None:\n continue\n # find and check producer on our input\n prod0 = model.find_producer(in0)\n if prod0 is None:\n continue\n\n if prod0.op_type in [\"Mul\", \"Add\", \"Div\"]:\n # check if second input of producer is an initializer\n init0 = model.get_initializer(prod0.input[1])\n # if either initializer is None, skip\n if init0 is None:\n continue\n # if initializer is not scalar, skip\n if np.prod(init0.shape) != 1:\n continue\n # move prod0 from input to output,\n old_prod0_in = prod0.input[0]\n old_prod0_out = prod0.output[0]\n scalar_op_odt = model.get_tensor_datatype(old_prod0_out)\n old_n_out = n.output[0]\n in_shape = model.get_tensor_shape(n.input[0])\n out_shape = model.get_tensor_shape(n.output[0])\n n.input[0] = old_prod0_in\n n.output[0] = old_prod0_out\n prod0.input[0] = old_prod0_out\n prod0.output[0] = old_n_out\n model.set_tensor_shape(n.input[0], in_shape)\n model.set_tensor_shape(n.output[0], out_shape)\n model.set_tensor_shape(prod0.output[0], out_shape)\n model.set_tensor_datatype(prod0.output[0], scalar_op_odt)\n model.set_tensor_datatype(n.output[0], DataType.FLOAT32)\n graph.node.remove(prod0)\n graph.node.insert(node_ind - 1, prod0)\n graph_modified = True\n else:\n continue\n if graph_modified:\n model = model.transform(InferShapes())\n model = model.transform(InferDataTypes())\n return (model, graph_modified)\n\n\nclass MakeMaxPoolNHWC(Transformation):\n \"\"\"Convert (MaxPool, NHWCTranpose) into (MaxPoolNHWC).\"\"\"\n\n def apply(self, model):\n graph = model.graph\n node_ind = 0\n graph_modified = False\n for n in graph.node:\n node_ind += 1\n if n.op_type == \"MaxPool\":\n consumer = model.find_consumer(n.output[0])\n if consumer is not None and consumer.op_type == \"Transpose\":\n perms = list(get_by_name(consumer.attribute, \"perm\").ints)\n if perms == [0, 2, 3, 1]:\n n.op_type = \"MaxPoolNHWC\"\n n.domain = \"finn.custom_op.general\"\n start_name = n.input[0]\n mid_name = consumer.input[0]\n end_name = consumer.output[0]\n (b, c, hi, wi) = model.get_tensor_shape(start_name)\n (b, c, ho, wo) = model.get_tensor_shape(mid_name)\n consumer.input[0] = start_name\n consumer.output[0] = mid_name\n n.input[0] = mid_name\n n.output[0] = end_name\n model.set_tensor_shape(mid_name, (b, hi, wi, c))\n model.set_tensor_shape(end_name, (b, ho, wo, c))\n graph.node.remove(consumer)\n graph.node.insert(node_ind - 1, consumer)\n graph_modified = True\n return (model, graph_modified)\n\n\nclass MoveOpPastFork(Transformation):\n \"\"\"Move node operations past graph forks. Used when a node before a fork\n can be merged with nodes in the branches\n \"\"\"\n\n def __init__(self, op_name_list):\n super().__init__()\n self.ops_to_move = op_name_list\n\n def apply(self, model):\n graph = model.graph\n graph_modified = False\n nodes = [n for n in graph.node]\n node_ind = 0\n for n in nodes:\n node_ind += 1\n if (\n n.op_type in self.ops_to_move\n and model.is_fork_node(n)\n and not model.is_join_node(n)\n ):\n\n # Restrict this transform to operations with constant parameters\n # Assuming parameters is in input 1\n op_init_param = model.get_initializer(n.input[1])\n if op_init_param is None:\n continue\n\n # Check case when branches are empty and go\n # to the same node\n consumers = model.find_consumers(n.output[0])\n unique_consumer = True\n for consum_node in consumers[1:]:\n if consumers[0] != consum_node:\n unique_consumer = False\n break\n\n if unique_consumer:\n continue\n\n for consumer_node in consumers[1:]:\n # create new node\n new_param_name = model.make_new_valueinfo_name()\n new_output_tensor_name = model.make_new_valueinfo_name()\n new_node = oh.make_node(\n n.op_type,\n [n.input[0], new_param_name],\n [new_output_tensor_name],\n )\n graph.node.insert(node_ind, new_node)\n node_ind += 1\n model.set_initializer(new_param_name, op_init_param)\n\n # change consumer input tensor\n graph.node.remove(consumer_node)\n for idx, consumer_input in enumerate(consumer_node.input):\n if consumer_input == n.output[0]:\n consumer_node.input[idx] = new_output_tensor_name\n break\n else:\n raise Exception(\n \"Consumer should have the current node output as input\"\n )\n\n graph.node.insert(node_ind, consumer_node)\n\n graph_modified = True\n\n model = model.transform(InferShapes())\n return (model, graph_modified)\n\n\nclass MoveAddPastFork(MoveOpPastFork):\n def __init__(self):\n super().__init__([\"Add\"])\n\n\nclass MoveMulPastFork(MoveOpPastFork):\n def __init__(self):\n super().__init__([\"Mul\"])\n\n\nclass MoveLinearPastFork(MoveOpPastFork):\n def __init__(self):\n super().__init__([\"Add\", \"Mul\"])\n\n\nclass MoveMaxPoolPastMultiThreshold(Transformation):\n \"\"\"Move MaxPool nodes past MultiThreshold nodes on linear segments of the graph.\"\"\"\n\n def apply(self, model):\n graph = model.graph\n node_ind = 0\n graph_modified = False\n nodes = [n for n in graph.node]\n for n in nodes:\n node_ind += 1\n if n.op_type == \"MaxPool\" and not model.is_fork_node(n):\n consumer = model.find_consumer(n.output[0])\n pads = get_by_name(n.attribute, \"pads\")\n has_padding = False\n if pads is not None:\n pads = list(pads.ints)\n has_padding = np.prod(pads) != 0\n if consumer is not None and consumer.op_type == \"MultiThreshold\":\n mt_out = consumer.output[0]\n mt_odt = model.get_tensor_datatype(mt_out)\n if mt_odt.signed() and has_padding:\n warnings.warn(\n \"Skipping padded MaxPool + signed-output MultiThreshold\"\n )\n continue\n # check for non-decreasing thresholds and nonnegative\n # scale factor in MultiThreshold\n # otherwise we cannot do the reordering\n T = model.get_initializer(consumer.input[1])\n T_sorted = np.sort(T, axis=1)\n assert (\n T == T_sorted\n ).all(), \"MultiThreshold must have non-decreasing thresholds\"\n mt_inst = getCustomOp(consumer)\n if mt_inst.get_nodeattr(\"out_scale\") < 0:\n warnings.warn(\"Skipping MultiThreshold with negative out_scale\")\n continue\n\n # remove old nodes\n graph.node.remove(n)\n graph.node.remove(consumer)\n\n # swap conections\n group_in = n.input[0]\n # new tensor because dims change\n group_middle = model.make_new_valueinfo_name()\n group_out = consumer.output[0]\n\n consumer.input[0] = group_in\n consumer.output[0] = group_middle\n\n n.input[0] = group_middle\n n.output[0] = group_out\n\n # insert them back in\n graph.node.insert(node_ind - 1, consumer)\n graph.node.insert(node_ind, n)\n\n graph_modified = True\n\n model = model.transform(InferShapes())\n return (model, graph_modified)\n\n\nclass MoveFlattenPastTopK(Transformation):\n \"\"\"Move flatten node past a succeeding topk node, if the \"axis\" attribute in topk\n is set to -1 and the data layout before the flatten is NHWC with H=W=1\"\"\"\n\n def apply(self, model):\n graph = model.graph\n node_ind = 0\n graph_modified = False\n for n in graph.node:\n node_ind += 1\n if n.op_type == \"Flatten\":\n consumer = model.find_consumer(n.output[0])\n if consumer is not None and consumer.op_type == \"TopK\":\n axis = get_by_name(consumer.attribute, \"axis\")\n if axis is None or axis.i != -1:\n continue\n start_name = n.input[0]\n data_layout = model.get_tensor_layout(start_name)\n if data_layout != DataLayout.NHWC:\n warnings.warn(\n \"\"\"Transformation can't be applied. The input\n to flatten has to have DataLayout.NHWC\"\"\"\n )\n continue\n (b, h, w, c) = model.get_tensor_shape(start_name)\n if h != 1 or w != 1:\n continue\n # get parameter k from topk\n k = model.get_tensor_shape(consumer.output[1])[-1]\n\n # swap conections\n # new tensor because dims change\n middle_name = model.make_new_valueinfo_name()\n topk_indices = oh.make_tensor_value_info(\n middle_name, TensorProto.INT64, [b, h, w, k]\n )\n end_name = consumer.output[1]\n graph.value_info.append(topk_indices)\n\n # remove old nodes\n graph.node.remove(n)\n graph.node.remove(consumer)\n\n # set inputs and outputs correctly\n consumer.input[0] = start_name\n consumer.output[1] = middle_name\n model.set_tensor_shape(consumer.output[0], (b, h, w, k))\n\n n.input[0] = middle_name\n n.output[0] = end_name\n\n # insert them back in\n graph.node.insert(node_ind - 1, consumer)\n graph.node.insert(node_ind, n)\n\n graph_modified = True\n\n model = model.transform(InferShapes())\n return (model, graph_modified)\n\n\nclass MoveFlattenPastAffine(Transformation):\n \"\"\"Moves a node that implements a (1, -1) reshape past a MatMul, Mul or Add node.\"\"\"\n\n def apply(self, model):\n graph = model.graph\n graph_modified = False\n node_ind = 0\n for n in graph.node:\n node_ind += 1\n if (\n n.op_type == \"Flatten\"\n and not model.is_fork_node(n)\n and not model.is_join_node(n)\n ):\n consumer = model.find_consumer(n.output[0])\n if (\n consumer is not None\n and (\n consumer.op_type == \"MatMul\"\n or consumer.op_type == \"Mul\"\n or consumer.op_type == \"Add\"\n )\n and not model.is_join_node(consumer)\n ):\n # move flatten past operation and rewire tensors\n start_name = n.input[0]\n # check if datalyout is set to NHWC and H=W=1\n datalayout = model.get_tensor_layout(start_name)\n if datalayout == DataLayout.NHWC:\n (b, h, w, c) = model.get_tensor_shape(start_name)\n if h != 1 or w != 1:\n warnings.warn(\n \"\"\"The Transformation can only be performed if\n H=W=1.\"\"\"\n )\n continue\n else:\n warnings.warn(\n \"\"\"The Transformation can only be performed on\n operations that operate on data layout NHWC.\"\"\"\n )\n continue\n middle_name = n.output[0]\n end_name = consumer.output[0]\n op_param_name = consumer.input[1]\n A = model.get_initializer(op_param_name)\n if A is None:\n warnings.warn(\"Param is not constant, skipping\")\n continue\n op_in_dt = model.get_tensor_datatype(consumer.input[0])\n op_out_dt = model.get_tensor_datatype(consumer.output[0])\n start_shape = model.get_tensor_shape(start_name)\n dummy_in = np.random.uniform(low=0, high=1, size=(start_shape))\n\n if consumer.op_type == \"MatMul\":\n dummy_out = np.matmul(dummy_in, A)\n elif consumer.op_type == \"Mul\":\n dummy_out = dummy_in * A\n elif consumer.op_type == \"Add\":\n dummy_out = dummy_in + A\n\n new_op = oh.make_node(\n consumer.op_type,\n [start_name, op_param_name],\n [middle_name],\n name=consumer.name,\n )\n new_flatten = oh.make_node(\"Flatten\", [middle_name], [end_name])\n graph.node.insert(node_ind, new_op)\n graph.node.insert(node_ind + 1, new_flatten)\n model.set_tensor_shape(middle_name, dummy_out.shape)\n # because a flatten node doesn't change the datatype we need\n # only the datatype of the op node\n model.set_tensor_datatype(start_name, op_in_dt)\n model.set_tensor_datatype(middle_name, op_out_dt)\n model.set_tensor_datatype(end_name, op_out_dt)\n # set datalayout\n model.set_tensor_layout(start_name, DataLayout.NHWC)\n model.set_tensor_layout(middle_name, DataLayout.NHWC)\n # remove old nodes\n graph.node.remove(n)\n graph.node.remove(consumer)\n graph_modified = True\n\n model = model.transform(InferShapes())\n model = model.transform(InferDataTypes())\n model = model.transform(InferDataLayouts())\n return (model, graph_modified)\n\n\nclass MoveTransposePastScalarMul(Transformation):\n \"\"\"Moves a Transpose node past a scalar Mul node\"\"\"\n\n def apply(self, model):\n graph = model.graph\n node_ind = 0\n graph_modified = False\n for n in graph.node:\n node_ind += 1\n if (\n n.op_type == \"Transpose\"\n and not model.is_fork_node(n)\n and not model.is_join_node(n)\n ):\n consumer = model.find_consumer(n.output[0])\n if (\n consumer is not None\n and consumer.op_type == \"Mul\"\n and not model.is_join_node(consumer)\n ):\n mul_weight_name = consumer.input[1]\n A = model.get_initializer(mul_weight_name)\n if A is None:\n warnings.warn(\"Mul param is not constant, skipping\")\n continue\n transp_node = n\n mul_node = consumer\n start_name = transp_node.input[0]\n middle_name = transp_node.output[0]\n end_name = mul_node.output[0]\n transp_in_shape = model.get_tensor_shape(start_name)\n transp_out_shape = model.get_tensor_shape(middle_name)\n transp_in_layout = model.get_tensor_layout(start_name)\n transp_out_layout = model.get_tensor_layout(middle_name)\n if transp_in_layout is None or transp_out_layout is None:\n warnings.warn(\n \"\"\"Datalayout is not set for tensors.\n Transformation can't be applied.\"\"\"\n )\n continue\n if all(x == 1 for x in A.shape):\n # if the mul is scalar, we can simply swap the order of ops\n # rewire transpose input to be mul input\n mul_node.input[0] = start_name\n model.set_tensor_shape(start_name, transp_in_shape)\n model.set_tensor_layout(start_name, transp_in_layout)\n mul_node.output[0] = middle_name\n model.set_tensor_shape(middle_name, transp_in_shape)\n model.set_tensor_layout(middle_name, transp_in_layout)\n transp_node.input[0] = middle_name\n transp_node.output[0] = end_name\n model.set_tensor_shape(end_name, transp_out_shape)\n model.set_tensor_layout(end_name, transp_out_layout)\n graph.node.remove(transp_node)\n graph.node.insert(node_ind, transp_node)\n graph_modified = True\n\n if graph_modified is True:\n model = model.transform(InferDataLayouts())\n model = model.transform(InferShapes())\n return (model, graph_modified)\n" ]
[ [ "numpy.array_equal", "numpy.matmul", "numpy.sort", "numpy.ones", "numpy.prod", "numpy.random.uniform", "numpy.zeros" ] ]
hlang8160/cnn-text-tf-hl
[ "dc5eb2d111e08e76e752a15ec98b3d8cb2c99e7c" ]
[ "train.py" ]
[ "#! /usr/bin/env python\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport time\nimport datetime\nimport data_helpers\nfrom text_cnn import TextCNN\nfrom tensorflow.contrib import learn\n\n# Parameters\n# ==================================================\n\n# Data loading params\ntf.flags.DEFINE_float(\"dev_sample_percentage\", .1,\n \"Percentage of the training data to use for validation\")\ntf.flags.DEFINE_string(\"positive_data_file\", \"./data/rt-polaritydata/rt-polarity.pos\",\n \"Data source for the positive data.\")\ntf.flags.DEFINE_string(\"negative_data_file\", \"./data/rt-polaritydata/rt-polarity.neg\",\n \"Data source for the negative data.\")\n\n# Model Hyperparameters\ntf.flags.DEFINE_integer(\"embedding_dim\", 128,\n \"Dimensionality of character embedding (default: 128)\")\ntf.flags.DEFINE_string(\"filter_sizes\", \"3,4,5\",\n \"Comma-separated filter sizes (default: '3,4,5')\")\ntf.flags.DEFINE_integer(\n \"num_filters\", 128, \"Number of filters per filter size (default: 128)\")\ntf.flags.DEFINE_float(\"dropout_keep_prob\", 0.5,\n \"Dropout keep probability (default: 0.5)\")\ntf.flags.DEFINE_float(\"l2_reg_lambda\", 0.0,\n \"L2 regularization lambda (default: 0.0)\")\n\n# Training parameters\ntf.flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\ntf.flags.DEFINE_integer(\n \"num_epochs\", 200, \"Number of training epochs (default: 200)\")\ntf.flags.DEFINE_integer(\"evaluate_every\", 100,\n \"Evaluate model on dev set after this many steps (default: 100)\")\ntf.flags.DEFINE_integer(\"checkpoint_every\", 100,\n \"Save model after this many steps (default: 100)\")\ntf.flags.DEFINE_integer(\"num_checkpoints\", 5,\n \"Number of checkpoints to store (default: 5)\")\n# Misc Parameters\ntf.flags.DEFINE_boolean(\"allow_soft_placement\", True,\n \"Allow device soft device placement\")\ntf.flags.DEFINE_boolean(\"log_device_placement\", False,\n \"Log placement of ops on devices\")\n\nFLAGS = tf.flags.FLAGS\nFLAGS._parse_flags()\nprint(\"\\nParameters:\")\nfor attr, value in sorted(FLAGS.__flags.items()):\n print(\"{}={}\".format(attr.upper(), value))\nprint(\"\")\n\n\n# Data Preparation\n# ==================================================\n\n# Load data\nprint(\"Loading data...\")\nx_text, y = data_helpers.load_data_and_labels(\n FLAGS.positive_data_file, FLAGS.negative_data_file)\n\n# Build vocabulary\nmax_document_length = max([len(x.split(\" \")) for x in x_text])\nvocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)\nx = np.array(list(vocab_processor.fit_transform(x_text)))\n\n# Randomly shuffle data\nnp.random.seed(10)\nshuffle_indices = np.random.permutation(np.arange(len(y)))\nx_shuffled = x[shuffle_indices]\ny_shuffled = y[shuffle_indices]\n\n# Split train/test set\n# TODO: This is very crude, should use cross-validation\ndev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))\nx_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]\ny_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]\nprint(\"Vocabulary Size: {:d}\".format(len(vocab_processor.vocabulary_)))\nprint(\"Train/Dev split: {:d}/{:d}\".format(len(y_train), len(y_dev)))\n\n\n# Training\n# ==================================================\n\nwith tf.Graph().as_default():\n session_conf = tf.ConfigProto(\n allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n sess = tf.Session(config=session_conf)\n with sess.as_default():\n cnn = TextCNN(\n sequence_length=x_train.shape[1],\n num_classes=y_train.shape[1],\n vocab_size=len(vocab_processor.vocabulary_),\n embedding_size=FLAGS.embedding_dim,\n filter_sizes=list(map(int, FLAGS.filter_sizes.split(\",\"))),\n num_filters=FLAGS.num_filters,\n l2_reg_lambda=FLAGS.l2_reg_lambda)\n\n # Define Training procedure\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(1e-3)\n grads_and_vars = optimizer.compute_gradients(cnn.loss)\n train_op = optimizer.apply_gradients(\n grads_and_vars, global_step=global_step)\n\n # Keep track of gradient values and sparsity (optional)\n grad_summaries = []\n for g, v in grads_and_vars:\n if g is not None:\n grad_hist_summary = tf.summary.histogram(\n \"{}/grad/hist\".format(v.name), g)\n sparsity_summary = tf.summary.scalar(\n \"{}/grad/sparsity\".format(v.name), tf.nn.zero_fraction(g))\n grad_summaries.append(grad_hist_summary)\n grad_summaries.append(sparsity_summary)\n grad_summaries_merged = tf.summary.merge(grad_summaries)\n\n # Output directory for models and summaries\n timestamp = str(int(time.time()))\n out_dir = os.path.abspath(os.path.join(\n os.path.curdir, \"runs\", timestamp))\n print(\"Writing to {}\\n\".format(out_dir))\n\n # Summaries for loss and accuracy\n loss_summary = tf.summary.scalar(\"loss\", cnn.loss)\n acc_summary = tf.summary.scalar(\"accuracy\", cnn.accuracy)\n\n # Train Summaries\n train_summary_op = tf.summary.merge(\n [loss_summary, acc_summary, grad_summaries_merged])\n train_summary_dir = os.path.join(out_dir, \"summaries\", \"train\")\n train_summary_writer = tf.summary.FileWriter(\n train_summary_dir, sess.graph)\n\n # Dev summaries\n dev_summary_op = tf.summary.merge([loss_summary, acc_summary])\n dev_summary_dir = os.path.join(out_dir, \"summaries\", \"dev\")\n dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)\n\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n checkpoint_dir = os.path.abspath(os.path.join(out_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n saver = tf.train.Saver(tf.global_variables(),\n max_to_keep=FLAGS.num_checkpoints)\n\n # Write vocabulary\n vocab_processor.save(os.path.join(out_dir, \"vocab\"))\n\n # Initialize all variables\n sess.run(tf.global_variables_initializer())\n\n def train_step(x_batch, y_batch):\n \"\"\"\n A single training step\n \"\"\"\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(\n time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)\n\n def dev_step(x_batch, y_batch, writer=None):\n \"\"\"\n Evaluates model on a dev set\n \"\"\"\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: 1.0\n }\n step, summaries, loss, accuracy = sess.run(\n [global_step, dev_summary_op, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(\n time_str, step, loss, accuracy))\n if writer:\n writer.add_summary(summaries, step)\n\n # Generate batches\n batches = data_helpers.batch_iter(\n list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)\n # Training loop. For each batch...\n for batch in batches:\n x_batch, y_batch = zip(*batch)\n train_step(x_batch, y_batch)\n current_step = tf.train.global_step(sess, global_step)\n if current_step % FLAGS.evaluate_every == 0:\n print(\"\\nEvaluation:\")\n dev_step(x_dev, y_dev, writer=dev_summary_writer)\n print(\"\")\n if current_step % FLAGS.checkpoint_every == 0:\n path = saver.save(sess, checkpoint_prefix,\n global_step=current_step)\n print(\"Saved model checkpoint to {}\\n\".format(path))\n" ]
[ [ "tensorflow.flags.DEFINE_boolean", "tensorflow.Graph", "tensorflow.train.global_step", "tensorflow.summary.FileWriter", "tensorflow.summary.scalar", "numpy.random.seed", "tensorflow.Variable", "tensorflow.flags.DEFINE_string", "tensorflow.global_variables", "tensorflow.ConfigProto", "tensorflow.global_variables_initializer", "tensorflow.nn.zero_fraction", "tensorflow.Session", "tensorflow.flags.DEFINE_float", "tensorflow.train.AdamOptimizer", "tensorflow.contrib.learn.preprocessing.VocabularyProcessor", "tensorflow.summary.merge", "tensorflow.flags.DEFINE_integer" ] ]
Kundjanasith/EMSE-DeepCom
[ "bc29e119f35a03860303c272700d437b699a8e7f" ]
[ "source code/translation_model.py" ]
[ "import tensorflow as tf\nimport os\nimport pickle\nimport re\nimport sys\nimport math\nimport shutil\nimport itertools\nimport utils, evaluation\nfrom seq2seq_model import Seq2SeqModel\nfrom subprocess import Popen, PIPE\nimport time\n\n\nclass TranslationModel:\n def __init__(self, encoders, decoders, checkpoint_dir, learning_rate, learning_rate_decay_factor,\n batch_size, keep_best=1, dev_prefix=None, score_function='corpus_scores', name=None, ref_ext=None,\n pred_edits=False, dual_output=False, binary=None, truncate_lines=True, ensemble=False,\n checkpoints=None, beam_size=1, len_normalization=1, early_stopping=True, **kwargs):\n\n self.batch_size = batch_size\n self.character_level = {}\n self.binary = []\n\n for encoder_or_decoder in encoders + decoders:\n encoder_or_decoder.ext = encoder_or_decoder.ext or encoder_or_decoder.name\n self.character_level[encoder_or_decoder.ext] = encoder_or_decoder.character_level\n self.binary.append(encoder_or_decoder.get('binary', False))\n\n self.char_output = decoders[0].character_level\n\n self.src_ext = [encoder.ext for encoder in encoders]\n self.trg_ext = [decoder.ext for decoder in decoders]\n\n self.extensions = self.src_ext + self.trg_ext\n\n self.ref_ext = ref_ext\n if self.ref_ext is not None:\n self.binary.append(False)\n\n self.pred_edits = pred_edits\n self.dual_output = dual_output\n\n self.dev_prefix = dev_prefix\n self.name = name\n\n self.max_input_len = [encoder.max_len for encoder in encoders]\n self.max_output_len = [decoder.max_len for decoder in decoders]\n\n if truncate_lines:\n self.max_len = None # we let seq2seq.get_batch handle long lines (by truncating them)\n else: # the line reader will drop lines that are too long\n self.max_len = dict(zip(self.extensions, self.max_input_len + self.max_output_len))\n\n self.learning_rate = tf.Variable(learning_rate, trainable=False, name='learning_rate', dtype=tf.float32)\n self.learning_rate_decay_op = self.learning_rate.assign(self.learning_rate * learning_rate_decay_factor)\n\n with tf.device('/cpu:0'):\n self.global_step = tf.Variable(0, trainable=False, name='global_step')\n self.baseline_step = tf.Variable(0, trainable=False, name='baseline_step')\n\n self.filenames = utils.get_filenames(extensions=self.extensions, dev_prefix=dev_prefix, name=name,\n ref_ext=ref_ext, binary=self.binary, **kwargs)\n utils.debug('reading vocabularies')\n self.vocabs = None\n self.src_vocab, self.trg_vocab = None, None\n self.read_vocab()\n\n for encoder_or_decoder, vocab in zip(encoders + decoders, self.vocabs):\n if vocab:\n encoder_or_decoder.vocab_size = len(vocab.reverse)\n\n utils.debug('creating model')\n\n self.models = []\n if ensemble and checkpoints is not None:\n for i, _ in enumerate(checkpoints, 1):\n with tf.variable_scope('model_{}'.format(i)):\n model = Seq2SeqModel(encoders, decoders, self.learning_rate, self.global_step, name=name,\n pred_edits=pred_edits, dual_output=dual_output,\n baseline_step=self.baseline_step, **kwargs)\n self.models.append(model)\n self.seq2seq_model = self.models[0]\n else:\n self.seq2seq_model = Seq2SeqModel(encoders, decoders, self.learning_rate, self.global_step, name=name,\n pred_edits=pred_edits, dual_output=dual_output,\n baseline_step=self.baseline_step, **kwargs)\n self.models.append(self.seq2seq_model)\n\n self.seq2seq_model.create_beam_op(self.models, beam_size, len_normalization, early_stopping)\n\n self.batch_iterator = None\n self.dev_batches = None\n self.train_size = None\n self.saver = None\n self.keep_best = keep_best\n self.checkpoint_dir = checkpoint_dir\n self.epoch = None\n\n self.training = utils.AttrDict() # used to keep track of training\n\n try:\n self.reversed_scores = getattr(evaluation, score_function).reversed # the lower the better\n except AttributeError:\n self.reversed_scores = False # the higher the better\n\n def read_data(self, max_train_size, max_dev_size, read_ahead=10, batch_mode='standard', shuffle=True,\n crash_test=False, use_unknown=True, **kwargs):\n utils.debug('reading training data')\n self.batch_iterator, self.train_size = utils.get_batch_iterator(\n self.filenames.train, self.extensions, self.vocabs, self.batch_size,\n max_size=max_train_size, character_level=self.character_level, max_seq_len=self.max_len,\n read_ahead=read_ahead, mode=batch_mode, shuffle=shuffle, binary=self.binary, crash_test=crash_test, use_unknown=use_unknown\n )\n\n utils.debug('reading development data')\n\n dev_sets = [\n utils.read_dataset(dev, self.extensions, self.vocabs, max_size=max_dev_size,\n character_level=self.character_level, binary=self.binary, use_unknown=use_unknown)[0]\n for dev in self.filenames.dev\n ]\n # subset of the dev set whose loss is periodically evaluated\n self.dev_batches = [utils.get_batches(dev_set, batch_size=self.batch_size) for dev_set in dev_sets]\n\n def read_vocab(self):\n # don't try reading vocabulary for encoders that take pre-computed features\n self.vocabs = [\n None if binary else utils.initialize_vocabulary(vocab_path)\n for vocab_path, binary in zip(self.filenames.vocab, self.binary)\n ]\n self.src_vocab, self.trg_vocab = self.vocabs[:len(self.src_ext)], self.vocabs[len(self.src_ext):]\n\n def eval_step(self):\n # compute loss on dev set\n for prefix, dev_batches in zip(self.dev_prefix, self.dev_batches):\n eval_loss = sum(\n self.seq2seq_model.step(batch, update_model=False).loss * len(batch)\n for batch in dev_batches\n )\n eval_loss /= sum(map(len, dev_batches))\n\n utils.log(\" {} eval: loss {:.2f}\".format(prefix, eval_loss))\n\n def decode_sentence(self, sentence_tuple, remove_unk=False):\n return next(self.decode_batch([sentence_tuple], remove_unk))\n\n def decode_batch(self, sentence_tuples, batch_size, remove_unk=False, fix_edits=True):\n if batch_size == 1:\n batches = ([sentence_tuple] for sentence_tuple in sentence_tuples) # lazy\n else:\n batch_count = int(math.ceil(len(sentence_tuples) / batch_size))\n batches = [sentence_tuples[i * batch_size:(i + 1) * batch_size] for i in range(batch_count)]\n\n def map_to_ids(sentence_tuple):\n token_ids = [\n sentence if vocab is None else\n utils.sentence_to_token_ids(sentence, vocab.vocab, ext, character_level=self.character_level.get(ext))\n for ext, vocab, sentence in zip(self.extensions, self.vocabs, sentence_tuple)\n ]\n return token_ids\n\n for batch_id, batch in enumerate(batches):\n token_ids = list(map(map_to_ids, batch))\n batch_token_ids, attn_weights = self.seq2seq_model.greedy_decoding(token_ids)\n batch_token_ids = zip(*batch_token_ids)\n\n for src_tokens, trg_token_ids, attn_weight in zip(batch, batch_token_ids, attn_weights):\n trg_tokens = []\n\n for trg_token_ids_, vocab in zip(trg_token_ids, self.trg_vocab):\n trg_token_ids_ = list(trg_token_ids_) # from np array to list\n if utils.EOS_ID in trg_token_ids_:\n trg_token_ids_ = trg_token_ids_[:trg_token_ids_.index(utils.EOS_ID)]\n\n trg_tokens_ = [vocab.reverse[i] if i < len(vocab.reverse) else utils._UNK\n for i in trg_token_ids_]\n trg_tokens.append(trg_tokens_)\n\n if self.pred_edits:\n # first output is ops, second output is words\n raw_hypothesis = ' '.join('_'.join(tokens) for tokens in zip(*trg_tokens))\n trg_tokens = utils.reverse_edits(src_tokens[0].split('\\t')[1].split(), trg_tokens, fix=fix_edits)\n trg_tokens = [token for token in trg_tokens if token not in utils._START_VOCAB]\n # FIXME: char-level\n else:\n trg_tokens = trg_tokens[0]\n raw_hypothesis = ''.join(trg_tokens) if self.char_output else ' '.join(trg_tokens)\n\n if remove_unk:\n trg_tokens = [token for token in trg_tokens if token != utils._UNK]\n\n if self.char_output:\n hypothesis = ''.join(trg_tokens)\n else:\n hypothesis = ' '.join(trg_tokens).replace('@@ ', '') # merge subwords units\n\n yield hypothesis, raw_hypothesis, attn_weight\n\n def align(self, output=None, align_encoder_id=0, **kwargs):\n # if self.binary and any(self.binary):\n # raise NotImplementedError\n\n if len(self.filenames.test) != len(self.extensions):\n raise Exception('wrong number of input files')\n\n binary = self.binary and any(self.binary)\n\n paths = self.filenames.test or [None]\n lines = utils.read_lines(paths, binary=self.binary)\n\n for line_id, lines in enumerate(lines):\n token_ids = [\n sentence if vocab is None else\n utils.sentence_to_token_ids(sentence, vocab.vocab, character_level=self.character_level.get(ext))\n for ext, vocab, sentence in zip(self.extensions, self.vocabs, lines)\n ]\n\n _, weights = self.seq2seq_model.step(data=[token_ids], align=True, update_model=False)\n\n trg_vocab = self.trg_vocab[0]\n trg_token_ids = token_ids[len(self.src_ext)]\n trg_tokens = [trg_vocab.reverse[i] if i < len(trg_vocab.reverse) else utils._UNK for i in trg_token_ids]\n\n weights = weights.squeeze()\n max_len = weights.shape[1]\n\n if binary:\n src_tokens = None\n else:\n src_tokens = lines[align_encoder_id].split()[:max_len - 1] + [utils._EOS]\n trg_tokens = trg_tokens[:weights.shape[0] - 1] + [utils._EOS]\n\n output_file = '{}.{}.svg'.format(output, line_id + 1) if output is not None else None\n\n utils.heatmap(src_tokens, trg_tokens, weights, output_file=output_file)\n\n def decode(self, output=None, remove_unk=False, raw_output=False, max_test_size=None, **kwargs):\n utils.log('starting decoding')\n\n # empty `test` means that we read from standard input, which is not possible with multiple encoders\n # assert len(self.src_ext) == 1 or self.filenames.test\n # check that there is the right number of files for decoding\n # assert not self.filenames.test or len(self.filenames.test) == len(self.src_ext)\n\n output_file = None\n try:\n output_file = sys.stdout if output is None else open(output, 'w')\n paths = self.filenames.test or [None]\n lines = utils.read_lines(paths, binary=self.binary)\n\n if max_test_size:\n lines = itertools.islice(lines, max_test_size)\n\n if not self.filenames.test: # interactive mode\n batch_size = 1\n else:\n batch_size = self.batch_size\n lines = list(lines)\n\n hypothesis_iter = self.decode_batch(lines, batch_size, remove_unk=remove_unk)\n\n for hypothesis, raw, attn in hypothesis_iter:\n if raw_output:\n hypothesis = raw\n\n output_file.write(hypothesis + '\\n')\n output_file.flush()\n finally:\n if output_file is not None:\n output_file.close()\n\n def evaluate(self, score_function, on_dev=True, output=None, remove_unk=False, max_dev_size=None,\n raw_output=False, fix_edits=True, max_test_size=None, post_process_script=None, **kwargs):\n \"\"\"\n Decode a dev or test set, and perform evaluation with respect to gold standard, using the provided\n scoring function. If `output` is defined, also save the decoding output to this file.\n When evaluating development data (`on_dev` to True), several dev sets can be specified (`dev_prefix` parameter\n in configuration files), and a score is computed for each of them.\n\n :param score_function: name of the scoring function used to score and rank models (typically 'bleu_score')\n :param on_dev: if True, evaluate the dev corpus, otherwise evaluate the test corpus\n :param output: save the hypotheses to this file\n :param remove_unk: remove the UNK symbols from the output\n :param max_dev_size: maximum number of lines to read from dev files\n :param max_test_size: maximum number of lines to read from test files\n :param raw_output: save raw decoder output (don't do post-processing like UNK deletion or subword\n concatenation). The evaluation is still done with the post-processed output.\n :param fix_edits: when predicting edit operations, pad shorter hypotheses with KEEP symbols.\n :return: scores of each corpus to evaluate\n \"\"\"\n utils.log('starting decoding')\n\n if on_dev:\n filenames = self.filenames.dev\n else:\n filenames = [self.filenames.test]\n\n # convert `output` into a list, for zip\n if isinstance(output, str):\n output = [output]\n elif output is None:\n output = [None] * len(filenames)\n\n scores = []\n new_lines = []\n for filenames_, output_, prefix in zip(filenames, output, self.dev_prefix): # evaluation on multiple corpora\n extensions = list(self.extensions)\n if self.ref_ext is not None:\n extensions.append(self.ref_ext)\n\n lines = list(utils.read_lines(filenames_, binary=self.binary))\n if on_dev and max_dev_size:\n lines = lines[:max_dev_size]\n elif not on_dev and max_test_size:\n lines = lines[:max_test_size]\n\n hypotheses = []\n references = []\n\n output_file = None\n\n try:\n if output_ is not None:\n output_file = open(output_, 'w')\n\n lines_ = list(zip(*lines))\n\n src_sentences = list(zip(*lines_[:len(self.src_ext)]))\n trg_sentences = list(zip(*lines_[len(self.src_ext):]))\n\n hypothesis_iter = self.decode_batch(lines, self.batch_size, remove_unk=remove_unk,\n fix_edits=fix_edits)\n #ref_file_path = '../data/test/ref.out'\n #ref_file = open(ref_file_path, 'w')\n #gen_file_path = \"../data/test/hyp.out\"\n #gen_file = open(gen_file_path, 'w')\n for i, (sources, hypothesis, reference) in enumerate(zip(src_sentences, hypothesis_iter,\n trg_sentences)):\n if self.ref_ext is not None and on_dev:\n reference = reference[-1]\n else:\n reference = reference[0] # single output for now\n\n hypothesis, raw, attn = hypothesis\n\n hypotheses.append(hypothesis)\n reference = reference.strip()\n references.append(reference)\n if output_file is not None:\n if raw_output:\n hypothesis = raw\n\n output_file.write(hypothesis +'\\n')\n output_file.flush()\n #ref_file.write(reference + '\\n')\n #gen_file.write(hypothesis + '\\n' )\n #gen_file.flush() \n\n finally:\n if output_file is not None:\n output_file.close()\n\n if post_process_script is not None:\n data = '\\n'.join(hypotheses).encode()\n data = Popen([post_process_script], stdout=PIPE, stdin=PIPE).communicate(input=data)[0].decode()\n hypotheses = data.splitlines()\n print(\"hypotheses:%d, references: %d\" % (len(hypotheses), len(references)))\n # default scoring function is utils.bleu_score\n score, avg_score = getattr(evaluation, score_function)(hypotheses, references)\n\n # print scoring information\n score_info = [prefix, 'score={:.4f} avg_score={:.4f}'.format(score, avg_score)]\n\n # if score_summary:\n # score_info.append(score_summary)\n\n if self.name is not None:\n score_info.insert(0, self.name)\n\n utils.log(' '.join(map(str, score_info)))\n scores.append(score)\n\n return scores\n\n def train(self, baseline_steps=0, loss_function='xent', **kwargs):\n self.init_training(**kwargs)\n\n utils.log('starting training')\n while True:\n try:\n self.train_step(loss_function=loss_function, **kwargs)\n except (utils.FinishedTrainingException, KeyboardInterrupt):\n utils.log('exiting...')\n self.save()\n return\n except utils.EvalException:\n self.save()\n step, score = self.training.scores[-1]\n self.manage_best_checkpoints(step, score)\n except utils.CheckpointException:\n self.save()\n\n def init_training(self, sgd_after_n_epoch=None, **kwargs):\n self.read_data(**kwargs)\n self.epoch = self.batch_size * self.global_step // self.train_size\n\n global_step = self.global_step.eval()\n epoch = self.epoch.eval()\n if sgd_after_n_epoch is not None and epoch >= sgd_after_n_epoch: # already switched to SGD\n self.training.use_sgd = True\n else:\n self.training.use_sgd = False\n\n if kwargs.get('batch_mode') != 'random' and not kwargs.get('shuffle'):\n # read all the data up to this step (only if the batch iteration method is deterministic)\n for _ in range(global_step):\n next(self.batch_iterator)\n\n # those parameters are used to track the progress of training\n self.training.time = 0\n self.training.steps = 0\n self.training.loss = 0\n self.training.baseline_loss = 0\n self.training.losses = []\n self.training.last_decay = global_step\n self.training.scores = []\n\n def train_step(self, steps_per_checkpoint, model_dir, steps_per_eval=None, max_steps=0,\n max_epochs=0, eval_burn_in=0, decay_if_no_progress=None, decay_after_n_epoch=None,\n decay_every_n_epoch=None, sgd_after_n_epoch=None, sgd_learning_rate=None, min_learning_rate=None,\n loss_function='xent', **kwargs):\n if min_learning_rate is not None and self.learning_rate.eval() < min_learning_rate:\n utils.debug('learning rate is too small: stopping')\n raise utils.FinishedTrainingException\n if 0 < max_steps <= self.global_step.eval() or 0 < max_epochs <= self.epoch.eval():\n raise utils.FinishedTrainingException\n\n start_time = time.time()\n\n step_function = self.seq2seq_model.step\n\n res = step_function(next(self.batch_iterator), update_model=True, use_sgd=self.training.use_sgd,\n update_baseline=True)\n\n self.training.loss += res.loss\n self.training.baseline_loss += getattr(res, 'baseline_loss', 0)\n\n self.training.time += time.time() - start_time\n self.training.steps += 1\n\n global_step = self.global_step.eval()\n epoch = self.epoch.eval()\n\n if decay_after_n_epoch is not None and self.batch_size * global_step >= decay_after_n_epoch * self.train_size:\n if decay_every_n_epoch is not None and (self.batch_size * (global_step - self.training.last_decay)\n >= decay_every_n_epoch * self.train_size):\n self.learning_rate_decay_op.eval()\n utils.debug(' decaying learning rate to: {:.3g}'.format(self.learning_rate.eval()))\n self.training.last_decay = global_step\n\n if sgd_after_n_epoch is not None and epoch >= sgd_after_n_epoch:\n if not self.training.use_sgd:\n utils.debug('epoch {}, starting to use SGD'.format(epoch + 1))\n self.training.use_sgd = True\n if sgd_learning_rate is not None:\n self.learning_rate.assign(sgd_learning_rate).eval()\n self.training.last_decay = global_step # reset learning rate decay\n\n if steps_per_checkpoint and global_step % steps_per_checkpoint == 0:\n loss = self.training.loss / self.training.steps\n baseline_loss = self.training.baseline_loss / self.training.steps\n step_time = self.training.time / self.training.steps\n\n summary = 'step {} epoch {} learning rate {:.3g} step-time {:.3f} loss {:.3f}'.format(\n global_step, epoch + 1, self.learning_rate.eval(), step_time, loss)\n\n if self.name is not None:\n summary = '{} {}'.format(self.name, summary)\n\n utils.log(summary)\n\n if decay_if_no_progress and len(self.training.losses) >= decay_if_no_progress:\n if loss >= max(self.training.losses[:decay_if_no_progress]):\n self.learning_rate_decay_op.eval()\n\n self.training.losses.append(loss)\n self.training.loss, self.training.time, self.training.steps, self.training.baseline_loss = 0, 0, 0, 0\n self.eval_step()\n\n if steps_per_eval and global_step % steps_per_eval == 0 and 0 <= eval_burn_in <= global_step:\n eval_dir = 'eval' if self.name is None else 'eval_{}'.format(self.name)\n eval_output = os.path.join(model_dir, eval_dir)\n\n os.makedirs(eval_output, exist_ok=True)\n\n # if there are several dev files, we define several output files\n output = [\n os.path.join(eval_output, '{}.{}.out'.format(prefix, global_step))\n for prefix in self.dev_prefix\n ]\n\n kwargs_ = dict(kwargs)\n kwargs_['output'] = output\n score, *_ = self.evaluate(on_dev=True, **kwargs_)\n self.training.scores.append((global_step, score))\n\n if steps_per_eval and global_step % steps_per_eval == 0:\n raise utils.EvalException\n elif steps_per_checkpoint and global_step % steps_per_checkpoint == 0:\n raise utils.CheckpointException\n\n def manage_best_checkpoints(self, step, score):\n score_filename = os.path.join(self.checkpoint_dir, 'scores.txt')\n # try loading previous scores\n try:\n with open(score_filename) as f:\n # list of pairs (score, step)\n scores = [(float(line.split()[0]), int(line.split()[1])) for line in f]\n except IOError:\n scores = []\n\n if any(step_ >= step for _, step_ in scores):\n utils.warn('inconsistent scores.txt file')\n\n best_scores = sorted(scores, reverse=not self.reversed_scores)[:self.keep_best]\n\n def full_path(filename):\n return os.path.join(self.checkpoint_dir, filename)\n\n lower = (lambda x, y: y < x) if self.reversed_scores else (lambda x, y: x < y)\n\n if any(lower(score_, score) for score_, _ in best_scores) or not best_scores:\n # if this checkpoint is in the top, save it under a special name\n\n prefix = 'translate-{}.'.format(step)\n dest_prefix = 'best-{}.'.format(step)\n\n for filename in os.listdir(self.checkpoint_dir):\n if filename.startswith(prefix):\n dest_filename = filename.replace(prefix, dest_prefix)\n shutil.copy(full_path(filename), full_path(dest_filename))\n\n # also copy to `best` if this checkpoint is the absolute best\n if all(lower(score_, score) for score_, _ in best_scores):\n dest_filename = filename.replace(prefix, 'best.')\n shutil.copy(full_path(filename), full_path(dest_filename))\n\n best_scores = sorted(best_scores + [(score, step)], reverse=not self.reversed_scores)\n\n for _, step_ in best_scores[self.keep_best:]:\n # remove checkpoints that are not in the top anymore\n prefix = 'best-{}'.format(step_)\n for filename in os.listdir(self.checkpoint_dir):\n if filename.startswith(prefix):\n os.remove(full_path(filename))\n\n # save scores\n scores.append((score, step))\n\n with open(score_filename, 'w') as f:\n for score_, step_ in scores:\n f.write('{:.2f} {}\\n'.format(score_, step_))\n\n def initialize(self, checkpoints=None, reset=False, reset_learning_rate=False, max_to_keep=1,\n keep_every_n_hours=0, sess=None, use_transfer=False, api_params=None, **kwargs):\n \"\"\"\n :param checkpoints: list of checkpoints to load (instead of latest checkpoint)\n :param reset: don't load latest checkpoint, reset learning rate and global step\n :param reset_learning_rate: reset the learning rate to its initial value\n :param max_to_keep: keep this many latest checkpoints at all times\n :param keep_every_n_hours: and keep checkpoints every n hours\n \"\"\"\n sess = sess or tf.get_default_session()\n\n if keep_every_n_hours <= 0 or keep_every_n_hours is None:\n keep_every_n_hours = float('inf')\n\n self.saver = tf.train.Saver(max_to_keep=max_to_keep, keep_checkpoint_every_n_hours=keep_every_n_hours,\n sharded=False)\n\n sess.run(tf.global_variables_initializer())\n blacklist = ['dropout_keep_prob']\n\n if reset_learning_rate or reset:\n blacklist.append('learning_rate')\n if reset:\n blacklist.append('global_step')\n\n params = {k: kwargs.get(k) for k in ('variable_mapping', 'reverse_mapping')}\n\n if checkpoints and len(self.models) > 1:\n assert len(self.models) == len(checkpoints)\n for i, checkpoint in enumerate(checkpoints, 1):\n load_checkpoint(sess, None, checkpoint, blacklist=blacklist, prefix='model_{}'.format(i), **params)\n elif checkpoints: # load partial checkpoints\n for checkpoint in checkpoints: # checkpoint files to load\n load_checkpoint(sess, None, checkpoint, blacklist=blacklist, **params)\n elif not reset:\n load_checkpoint(sess, self.checkpoint_dir, blacklist=blacklist, **params)\n print(use_transfer)\n if api_params and use_transfer:\n param_variables = tf.global_variables()\n for v in param_variables:\n if 'api' in v.name and v.name in api_params.keys():\n # print(\"Assign param %s with api model\" % v.name)\n sess.run(v.assign(api_params[v.name]))\n utils.debug('Assign param: {} with api model'.format(v.name))\n\n utils.debug('global step: {}'.format(self.global_step.eval()))\n utils.debug('baseline step: {}'.format(self.baseline_step.eval()))\n\n def save(self):\n save_checkpoint(tf.get_default_session(), self.saver, self.checkpoint_dir, self.global_step)\n\n\n# hard-coded variables which can also be defined in config file (variable_mapping and reverse_mapping)\nglobal_variable_mapping = [] # map old names to new names\nglobal_reverse_mapping = [ # map new names to old names\n (r'decoder_(.*?)/.*/initial_state_projection/', r'decoder_\\1/initial_state_projection/'),\n]\n\n\ndef load_checkpoint(sess, checkpoint_dir, filename=None, blacklist=(), prefix=None, variable_mapping=None,\n reverse_mapping=None):\n \"\"\"\n if `filename` is None, we load last checkpoint, otherwise\n we ignore `checkpoint_dir` and load the given checkpoint file.\n \"\"\"\n variable_mapping = variable_mapping or []\n reverse_mapping = reverse_mapping or []\n\n variable_mapping = list(variable_mapping) + global_variable_mapping\n reverse_mapping = list(reverse_mapping) + global_reverse_mapping\n\n if filename is None:\n # load last checkpoint\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt is not None:\n filename = ckpt.model_checkpoint_path\n else:\n checkpoint_dir = os.path.dirname(filename)\n\n vars_ = []\n var_names = []\n for var in tf.global_variables():\n if prefix is None or var.name.startswith(prefix):\n name = var.name if prefix is None else var.name[len(prefix) + 1:]\n vars_.append(var)\n var_names.append(name)\n\n var_file = os.path.join(checkpoint_dir, 'vars.pkl')\n if os.path.exists(var_file):\n with open(var_file, 'rb') as f:\n old_names = pickle.load(f)\n else:\n old_names = list(var_names)\n\n name_mapping = {}\n for name in old_names:\n name_ = name\n for key, value in variable_mapping:\n name_ = re.sub(key, value, name_)\n name_mapping[name] = name_\n\n var_names_ = []\n for name in var_names:\n name_ = name\n for key, value in reverse_mapping:\n name_ = re.sub(key, value, name_)\n if name_ in list(name_mapping.values()):\n name = name_\n var_names_.append(name)\n vars_ = dict(zip(var_names_, vars_))\n\n variables = {old_name[:-2]: vars_[new_name] for old_name, new_name in name_mapping.items()\n if new_name in vars_ and not any(prefix in new_name for prefix in blacklist)}\n\n if filename is not None:\n utils.log('reading model parameters from {}'.format(filename))\n tf.train.Saver(variables).restore(sess, filename)\n\n utils.debug('retrieved parameters ({})'.format(len(variables)))\n for var in sorted(variables.values(), key=lambda var: var.name):\n utils.debug(' {} {}'.format(var.name, var.get_shape()))\n\n\ndef save_checkpoint(sess, saver, checkpoint_dir, step=None, name=None):\n var_file = os.path.join(checkpoint_dir, 'vars.pkl')\n name = name or 'translate'\n os.makedirs(checkpoint_dir, exist_ok=True)\n\n with open(var_file, 'wb') as f:\n var_names = [var.name for var in tf.global_variables()]\n pickle.dump(var_names, f)\n\n utils.log('saving model to {}'.format(checkpoint_dir))\n checkpoint_path = os.path.join(checkpoint_dir, name)\n saver.save(sess, checkpoint_path, step, write_meta_graph=False)\n\n utils.log('finished saving model')\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "tensorflow.get_default_session", "tensorflow.device", "tensorflow.Variable", "tensorflow.global_variables", "tensorflow.global_variables_initializer", "tensorflow.train.Saver" ] ]
mkomaiha/NERS570-Sudoku
[ "2448de19dd8ae97292b74937d397846d10176a8b" ]
[ "tests/test_hsolver.py" ]
[ "from context import HS\nimport numpy as np\nfrom time import time\nimport unittest\nfrom parameterized import parameterized\nfrom random import randint\nimport logging\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass SolveTestSuite(unittest.TestCase):\n \"\"\"Solve test cases.\"\"\"\n @parameterized.expand([\n (\"Basic\", 0, 11),\n (\"Medium\", 1, 11),\n (\"Hard\", 2, 11),\n (\"Extreme\", 3, 11),\n # (\"ExtremeEasy\", 3, 1),\n # (\"ExtremeRandom\", 3)\n ])\n def test_solve(self, name, difficulty, boardId=randint(1, 10000)):\n board = HS(difficulty, boardId)\n totalTime = 0\n nRepeats = 10\n for i in range(nRepeats):\n start = time()\n board.solve()\n totalTime += time() - start\n assert(np.all(board.solved == board.solution))\n board.reset()\n LOGGER.info(f\"{name} - {board.difficulty} ({board.boardId})\")\n LOGGER.info(f\"Elapsed {totalTime/nRepeats}\")\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.all" ] ]
mickolka/pytorch-0.4-yolov3
[ "19072e7ca9c1add6ffbe2aebae075a3b46712664" ]
[ "region_layer.py" ]
[ "import math\nimport numpy as np\nimport sys\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom utils import bbox_iou, multi_bbox_ious, convert2cpu\n\nclass RegionLayer(nn.Module):\n def __init__(self, num_classes=0, anchors=[], num_anchors=1, use_cuda=None):\n super(RegionLayer, self).__init__()\n use_cuda = torch.cuda.is_available() and (True if use_cuda is None else use_cuda)\n self.device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n self.num_classes = num_classes\n self.num_anchors = num_anchors\n self.anchor_step = len(anchors)//num_anchors\n #self.anchors = torch.stack(torch.FloatTensor(anchors).split(self.anchor_step)).to(self.device)\n self.anchors = torch.FloatTensor(anchors).view(self.num_anchors, self.anchor_step).to(self.device)\n self.rescore = 1\n self.coord_scale = 1\n self.noobject_scale = 1\n self.object_scale = 5\n self.class_scale = 1\n self.thresh = 0.6\n self.seen = 0\n\n def build_targets(self, pred_boxes, target, nH, nW):\n nB = target.size(0)\n nA = self.num_anchors\n conf_mask = torch.ones (nB, nA, nH, nW) * self.noobject_scale\n coord_mask = torch.zeros(nB, nA, nH, nW)\n cls_mask = torch.zeros(nB, nA, nH, nW)\n tcoord = torch.zeros( 4, nB, nA, nH, nW)\n tconf = torch.zeros(nB, nA, nH, nW)\n tcls = torch.zeros(nB, nA, nH, nW)\n\n nAnchors = nA*nH*nW\n nPixels = nH*nW\n nGT = 0 # number of ground truth\n nRecall = 0\n # it works faster on CPU than on GPU.\n anchors = self.anchors.to(\"cpu\")\n\n if self.seen < 12800:\n tcoord[0].fill_(0.5)\n tcoord[1].fill_(0.5)\n coord_mask.fill_(1)\n\n for b in range(nB):\n cur_pred_boxes = pred_boxes[b*nAnchors:(b+1)*nAnchors].t()\n cur_ious = torch.zeros(nAnchors)\n tbox = target[b].view(-1,5).to(\"cpu\")\n for t in range(50):\n if tbox[t][1] == 0:\n break\n gx, gw = [ i * nW for i in (tbox[t][1], tbox[t][3]) ]\n gy, gh = [ i * nH for i in (tbox[t][2], tbox[t][4]) ]\n cur_gt_boxes = torch.FloatTensor([gx, gy, gw, gh]).repeat(nAnchors,1).t()\n cur_ious = torch.max(cur_ious, multi_bbox_ious(cur_pred_boxes, cur_gt_boxes, x1y1x2y2=False))\n ignore_ix = cur_ious>self.thresh\n conf_mask[b][ignore_ix.view(nA,nH,nW)] = 0\n\n for t in range(50):\n if tbox[t][1] == 0:\n break\n nGT += 1\n gx, gw = [ i * nW for i in (tbox[t][1], tbox[t][3]) ]\n gy, gh = [ i * nH for i in (tbox[t][2], tbox[t][4]) ]\n gw, gh = gw.float(), gh.float()\n gi, gj = int(gx), int(gy)\n\n tmp_gt_boxes = torch.FloatTensor([0, 0, gw, gh]).repeat(nA,1).t()\n anchor_boxes = torch.cat((torch.zeros(nA, 2), anchors),1).t()\n tmp_ious = multi_bbox_ious(tmp_gt_boxes, anchor_boxes, x1y1x2y2=False)\n best_iou, best_n = torch.max(tmp_ious, 0)\n\n if self.anchor_step == 4: # this part is not tested.\n tmp_ious_mask = (tmp_ious==best_iou)\n if tmp_ious_mask.sum() > 0:\n gt_pos = torch.FloatTensor([gi, gj, gx, gy]).repeat(nA,1).t()\n an_pos = anchor_boxes[4:6] # anchor_boxes are consisted of [0 0 aw ah ax ay]\n dist = pow(((gt_pos[0]+an_pos[0])-gt_pos[2]),2) + pow(((gt_pos[1]+an_pos[1])-gt_pos[3]),2)\n dist[1-tmp_ious_mask]=10000 # set the large number for the small ious\n _, best_n = torch.min(dist,0)\n\n gt_box = torch.FloatTensor([gx, gy, gw, gh])\n pred_box = pred_boxes[b*nAnchors+best_n*nPixels+gj*nW+gi]\n iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False)\n\n coord_mask[b][best_n][gj][gi] = 1\n cls_mask [b][best_n][gj][gi] = 1\n conf_mask [b][best_n][gj][gi] = self.object_scale\n tcoord [0][b][best_n][gj][gi] = gx - gi\n tcoord [1][b][best_n][gj][gi] = gy - gj\n tcoord [2][b][best_n][gj][gi] = math.log(gw/anchors[best_n][0])\n tcoord [3][b][best_n][gj][gi] = math.log(gh/anchors[best_n][1])\n tcls [b][best_n][gj][gi] = tbox[t][0]\n tconf [b][best_n][gj][gi] = iou if self.rescore else 1.\n if iou > 0.5:\n nRecall += 1\n\n return nGT, nRecall, coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls\n\n def get_mask_boxes(self, output):\n if not isinstance(self.anchors, torch.Tensor):\n self.anchors = torch.FloatTensor(self.anchors).view(self.num_anchors, self.anchor_step).to(self.device)\n masked_anchors = self.anchors.view(-1)\n num_anchors = torch.IntTensor([self.num_anchors]).to(self.device)\n return {'x':output, 'a':masked_anchors, 'n':num_anchors}\n\n def forward(self, output, target):\n #output : BxAs*(4+1+num_classes)*H*W\n t0 = time.time()\n nB = output.data.size(0) # batch size\n nA = self.num_anchors\n nC = self.num_classes\n nH = output.data.size(2)\n nW = output.data.size(3)\n cls_anchor_dim = nB*nA*nH*nW\n\n if not isinstance(self.anchors, torch.Tensor):\n self.anchors = torch.FloatTensor(self.anchors).view(self.num_anchors, self.anchor_step).to(self.device)\n\n output = output.view(nB, nA, (5+nC), nH, nW)\n cls_grid = torch.linspace(5,5+nC-1,nC).long().to(self.device)\n ix = torch.LongTensor(range(0,5)).to(self.device)\n pred_boxes = torch.FloatTensor(4, cls_anchor_dim).to(self.device)\n\n coord = output.index_select(2, ix[0:4]).view(nB*nA, -1, nH*nW).transpose(0,1).contiguous().view(-1,cls_anchor_dim) # x, y, w, h\n coord[0:2] = coord[0:2].sigmoid() # x, y\n conf = output.index_select(2, ix[4]).view(nB, nA, nH, nW).sigmoid()\n cls = output.index_select(2, cls_grid)\n cls = cls.view(nB*nA, nC, nH*nW).transpose(1,2).contiguous().view(cls_anchor_dim, nC)\n\n t1 = time.time()\n grid_x = torch.linspace(0, nW-1, nW).repeat(nB*nA, nH, 1).view(cls_anchor_dim).to(self.device)\n grid_y = torch.linspace(0, nH-1, nH).repeat(nW,1).t().repeat(nB*nA, 1, 1).view(cls_anchor_dim).to(self.device)\n anchor_w = self.anchors.index_select(1, ix[0]).repeat(1, nB*nH*nW).view(cls_anchor_dim)\n anchor_h = self.anchors.index_select(1, ix[1]).repeat(1, nB*nH*nW).view(cls_anchor_dim)\n\n pred_boxes[0] = coord[0] + grid_x\n pred_boxes[1] = coord[1] + grid_y\n pred_boxes[2] = coord[2].exp() * anchor_w\n pred_boxes[3] = coord[3].exp() * anchor_h\n # for build_targets. it works faster on CPU than on GPU\n pred_boxes = convert2cpu(pred_boxes.transpose(0,1).contiguous().view(-1,4)).detach()\n\n t2 = time.time()\n nGT, nRecall, coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls = \\\n self.build_targets(pred_boxes, target.detach(), nH, nW)\n\n cls_mask = (cls_mask == 1)\n tcls = tcls[cls_mask].long().view(-1)\n cls_mask = cls_mask.view(-1, 1).repeat(1,nC).to(self.device)\n cls = cls[cls_mask].view(-1, nC)\n\n nProposals = int((conf > 0.25).sum())\n\n tcoord = tcoord.view(4, cls_anchor_dim).to(self.device)\n tconf, tcls = tconf.to(self.device), tcls.to(self.device)\n coord_mask, conf_mask = coord_mask.view(cls_anchor_dim).to(self.device), conf_mask.sqrt().to(self.device)\n\n t3 = time.time()\n loss_coord = self.coord_scale * nn.MSELoss(size_average=False)(coord*coord_mask, tcoord*coord_mask)/2\n # sqrt(object_scale)/2 is almost equal to 1.\n loss_conf = nn.MSELoss(size_average=False)(conf*conf_mask, tconf*conf_mask)/2 \n loss_cls = self.class_scale * nn.CrossEntropyLoss(size_average=False)(cls, tcls) if cls.size(0) > 0 else 0\n loss = loss_coord + loss_conf + loss_cls\n t4 = time.time()\n if False:\n print('-'*30)\n print(' activation : %f' % (t1 - t0))\n print(' create pred_boxes : %f' % (t2 - t1))\n print(' build targets : %f' % (t3 - t2))\n print(' create loss : %f' % (t4 - t3))\n print(' total : %f' % (t4 - t0))\n print('%d: nGT %3d, nRC %3d, nPP %3d, loss: box %6.3f, conf %6.3f, class %6.3f, total %7.3f' \n % (self.seen, nGT, nRecall, nProposals, loss_coord, loss_conf, loss_cls, loss))\n if math.isnan(loss.item()):\n print(conf, tconf)\n sys.exit(0)\n return loss\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.linspace", "torch.ones", "torch.max", "torch.zeros", "torch.min", "torch.FloatTensor", "torch.cuda.is_available", "torch.IntTensor", "torch.device", "torch.nn.MSELoss" ] ]
dquail/GVFMinecraft
[ "5eae9ea9974ec604194b32cdb235765ea3fe7fb3" ]
[ "python/display.py" ]
[ "from constants import *\nimport sys\nimport cv2\nif sys.version_info[0] == 2:\n # Workaround for https://github.com/PythonCharmers/python-future/issues/262\n from Tkinter import *\nelse:\n from tkinter import *\n\nfrom PIL import ImageTk\nfrom PIL import Image\n\n\nimport matplotlib, sys\nimport matplotlib.pyplot as plt\n\nmatplotlib.use('TkAgg')\nimport numpy as np\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\n\n\nvideo_width = 432\nvideo_height = 240\n\nDISPLAY_WIDTH = WIDTH\nDISPLAY_HEIGHT = 432 + video_height\n\nclass Display(object):\n def __init__(self):\n plt.ion() #turn matplot interactive on\n self.root = Tk()\n self.root.wm_title(\"GVF Knowledge\")\n\n\n #Data 1\n self.tAFigure = Figure(figsize=(4.3,2), dpi=100)\n #self.a = self.tAFigure.add_subplot(111)\n self.taPlot = self.tAFigure.add_subplot(111)\n self.taPlot.set_ylim(-0.05, 1.05)\n timeStepValues = np.arange(-50, 0, 1) #The last 50\n self.taPredictions = [0.0] * 50\n self.taPredictionLine, = self.taPlot.plot(timeStepValues, self.taPredictions, 'g', label = \"TA(predict)\")\n self.taActualValues = [0.0] * 50\n self.taActualLine, = self.taPlot.plot(timeStepValues, self.taActualValues, 'b', label=\"TA(actual)\")\n\n self.taPlot.legend()\n self.taCanvas = FigureCanvasTkAgg(self.tAFigure, master=self.root) #canvas.get_tk_widget().grid(row=1,column=4,columnspan=3,rowspan=20)\n\n self.taCanvas.draw()\n self.taCanvas.get_tk_widget().pack(side = \"top\", anchor = \"w\")\n\n\n\n\n self.canvas = Canvas(self.root, borderwidth=0, highlightthickness=0, width=WIDTH, height=HEIGHT, bg=\"black\")\n #self.canvas.config(width=WIDTH, height=HEIGHT)\n self.canvas.pack(padx=0, pady=0)\n #self.root_frame.pack()\n\n #Did touch display\n self.didTouch = StringVar()\n self.didTouchLabel = Label(self.root, textvariable = self.didTouch, font = 'Helvetica 18 bold')\n self.didTouchLabel.pack()\n\n #Touch prediction\n self.touchPrediction = StringVar()\n self.touchPredictionLabel = Label(self.root, textvariable = self.touchPrediction)\n self.touchPredictionLabel.pack(side = \"top\", anchor = \"w\")\n\n #Turn left and touch prediction\n self.turnLeftAndTouchPrediction = StringVar()\n self.turnLeftAndTouchPredictionLabel = Label(self.root, textvariable = self.turnLeftAndTouchPrediction)\n self.turnLeftAndTouchPredictionLabel.pack(side = \"top\", anchor = \"w\")\n\n #Turn right and touch prediction\n self.turnRightAndTouchPrediction = StringVar()\n self.turnRightAndTouchPredictionLabel = Label(self.root, textvariable = self.turnRightAndTouchPrediction)\n self.turnRightAndTouchPredictionLabel.pack(side = \"top\", anchor = \"w\")\n\n #Touch behind prediction\n self.touchBehindPrediction = StringVar()\n self.touchBehindPredictionLabel = Label(self.root, textvariable = self.touchBehindPrediction)\n self.touchBehindPredictionLabel.pack(side = \"top\", anchor = \"w\")\n\n\n #Wall Adjacent prediction\n self.isWallAdjacentPrediction = StringVar()\n self.isWallAdjacentPredictionLabel = Label(self.root, textvariable = self.isWallAdjacentPrediction)\n self.isWallAdjacentPredictionLabel.pack(side = \"top\", anchor = \"w\")\n\n #Distance to adjacent prediction\n self.distanceToAdjacent = StringVar()\n self.distanceToAdjacentLabel = Label(self.root, textvariable = self.distanceToAdjacent)\n self.distanceToAdjacentLabel.pack(side = \"top\", anchor = \"w\")\n\n #Number of Steps Left\n self.numberOfStepsLeft = StringVar()\n self.numberOfStepsLeftLabel = Label(self.root, textvariable=self.numberOfStepsLeft)\n self.numberOfStepsLeftLabel.pack(side=\"top\", anchor=\"w\")\n\n #Number of Steps Right\n self.numberOfStepsRight = StringVar()\n self.numberOfStepsRightLabel = Label(self.root, textvariable=self.numberOfStepsRight)\n self.numberOfStepsRightLabel.pack(side=\"top\", anchor=\"w\")\n\n #Number of Steps Back\n self.numberOfStepsBack = StringVar()\n self.numberOfStepsBackLabel = Label(self.root, textvariable=self.numberOfStepsBack)\n self.numberOfStepsBackLabel.pack(side=\"top\", anchor=\"w\")\n\n #Number of steps\n self.numberOfSteps = StringVar()\n self.numberOfStepsLabel = Label(self.root, textvariable = self.numberOfSteps)\n self.numberOfStepsLabel.pack(side = \"top\", anchor = \"w\")\n\n self.reset()\n\n\n\n def reset(self):\n self.canvas.delete(\"all\")\n\n self.image = Image.new('RGB', (WIDTH, HEIGHT))\n self.photoImage = None\n self.image_handle = None\n self.current_frame = 0\n\n def update(self, image,\n numberOfSteps,\n currentTouchPrediction,\n didTouch,\n turnLeftAndTouchPrediction,\n wallInFront,\n wallOnLeft,\n turnRightAndTouchPrediction,\n wallOnRight,\n touchBehindPrediction,\n wallBehind,\n touchAdjacentPrediction,\n distanceToAdjacent,\n distanceToAdjacentPrediction,\n distanceToLeft,\n distanceToLeftPrediction,\n distanceToRight,\n distanceToRightPrediction,\n distanceBack,\n distanceBackPrediction,\n wallAdjacent):\n\n #Update labels\n self.touchPrediction.set(\"T: \" + str(currentTouchPrediction))\n if wallInFront:\n self.touchPredictionLabel.config(fg = 'blue')\n else:\n self.touchPredictionLabel.config(fg = 'red')\n\n self.turnLeftAndTouchPrediction.set(\"TL: \" + str(turnLeftAndTouchPrediction))\n if wallOnLeft:\n self.turnLeftAndTouchPredictionLabel.config(fg='blue')\n else:\n self.turnLeftAndTouchPredictionLabel.config(fg = 'red')\n\n self.turnRightAndTouchPrediction.set(\"TR: \" + str(turnRightAndTouchPrediction))\n if wallOnRight:\n self.turnRightAndTouchPredictionLabel.config(fg='blue')\n else:\n self.turnRightAndTouchPredictionLabel.config(fg='red')\n\n self.touchBehindPrediction.set(\"TB: \" + str(touchBehindPrediction))\n if wallBehind:\n self.touchBehindPredictionLabel.config(fg='blue')\n else:\n self.touchBehindPredictionLabel.config(fg='red')\n\n self.isWallAdjacentPrediction.set(\"TA: \" + str(touchAdjacentPrediction))\n if wallAdjacent:\n self.isWallAdjacentPredictionLabel.config(fg = 'blue')\n else:\n self.isWallAdjacentPredictionLabel.config(fg = 'red')\n\n self.distanceToAdjacent.set(\"DTA: \" + str(round(distanceToAdjacentPrediction, 1)) + \" (\" + str(distanceToAdjacent) + \")\")\n self.numberOfStepsLeft.set(\"DTL: \" + str(round(distanceToLeftPrediction, 1)) + \" (\" + str(distanceToLeft) + \")\")\n self.numberOfStepsRight.set(\"DTR: \" + str(round(distanceToRightPrediction, 1)) + \" (\" + str(distanceToRight) + \")\")\n self.numberOfStepsBack.set(\"DTB: \" + str(round(distanceBackPrediction)) + \" (\" + str(distanceBack) + \")\")\n\n self.numberOfSteps.set(\"Step: \" + str(numberOfSteps))\n if didTouch:\n self.didTouch.set(\"TOUCHED\")\n else:\n self.didTouch.set(\"\")\n\n #Update image\n #change from BGR to RGB\n l = len(image)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # convert the cv2 images to PIL format...\n self.image = Image.fromarray(image)\n\n # ...and then to ImageTk format\n self.photoImage = ImageTk.PhotoImage(self.image)\n\n\n # And update/create the canvas image:\n if self.image_handle is None:\n self.image_handle = self.canvas.create_image(WIDTH/2,HEIGHT/2,\n image=self.photoImage)\n else:\n self.canvas.itemconfig(self.image_handle, image=self.photoImage)\n\n self.taPredictions.pop(0)\n self.taPredictions.append(currentTouchPrediction)\n self.taActualValues.pop(0)\n if (wallInFront):\n touchActual = 1.0\n else:\n touchActual = 0.0\n self.taActualValues.append(touchActual)\n self.taPredictionLine.set_ydata(self.taPredictions)\n self.taActualLine.set_ydata(self.taActualValues)\n self.taCanvas.draw()\n self.root.update()\n\n" ]
[ [ "matplotlib.figure.Figure", "numpy.arange", "matplotlib.use", "matplotlib.pyplot.ion", "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg" ] ]
jdkent/brainiak
[ "04719aaa651e9cdab4d77e008495edac60b035b6" ]
[ "tests/funcalign/test_srm.py" ]
[ "# Copyright 2016 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom sklearn.exceptions import NotFittedError\nimport pytest\n\n\ndef test_can_instantiate():\n import brainiak.funcalign.srm\n s = brainiak.funcalign.srm.SRM()\n assert s, \"Invalid SRM instance!\"\n\n import numpy as np\n np.random.seed(0)\n\n voxels = 100\n samples = 500\n subjects = 2\n features = 3\n\n s = brainiak.funcalign.srm.SRM(n_iter=5, features=features)\n assert s, \"Invalid SRM instance!\"\n\n # Create a Shared response S with K = 3\n theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)\n z = np.linspace(-2, 2, samples)\n r = z**2 + 1\n x = r * np.sin(theta)\n y = r * np.cos(theta)\n\n S = np.vstack((x, y, z))\n\n X = []\n W = []\n Q, R = np.linalg.qr(np.random.random((voxels, features)))\n W.append(Q)\n X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))\n\n # Check that transform does NOT run before fitting the model\n with pytest.raises(NotFittedError):\n s.transform(X)\n print(\"Test: transforming before fitting the model\")\n\n # Check that it does NOT run with 1 subject\n with pytest.raises(ValueError):\n s.fit(X)\n print(\"Test: running SRM with 1 subject\")\n\n for subject in range(1, subjects):\n Q, R = np.linalg.qr(np.random.random((voxels, features)))\n W.append(Q)\n X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))\n\n # Check that runs with 2 subject\n s.fit(X)\n from pathlib import Path\n sr_v0_4 = np.load(Path(__file__).parent / \"sr_v0_4.npz\")['sr']\n assert(np.allclose(sr_v0_4, s.s_))\n\n assert len(s.w_) == subjects, (\n \"Invalid computation of SRM! (wrong # subjects in W)\")\n for subject in range(subjects):\n assert s.w_[subject].shape[0] == voxels, (\n \"Invalid computation of SRM! (wrong # voxels in W)\")\n assert s.w_[subject].shape[1] == features, (\n \"Invalid computation of SRM! (wrong # features in W)\")\n ortho = np.linalg.norm(s.w_[subject].T.dot(s.w_[subject])\n - np.eye(s.w_[subject].shape[1]),\n 'fro')\n assert ortho < 1e-7, \"A Wi mapping is not orthonormal in SRM.\"\n difference = np.linalg.norm(X[subject] - s.w_[subject].dot(s.s_),\n 'fro')\n datanorm = np.linalg.norm(X[subject], 'fro')\n assert difference/datanorm < 1.0, \"Model seems incorrectly computed.\"\n assert s.s_.shape[0] == features, (\n \"Invalid computation of SRM! (wrong # features in S)\")\n assert s.s_.shape[1] == samples, (\n \"Invalid computation of SRM! (wrong # samples in S)\")\n\n # Check that it does run to compute the shared response after the model\n # computation\n new_s = s.transform(X)\n\n assert len(new_s) == subjects, (\n \"Invalid computation of SRM! (wrong # subjects after transform)\")\n for subject in range(subjects):\n assert new_s[subject].shape[0] == features, (\n \"Invalid computation of SRM! (wrong # features after transform)\")\n assert new_s[subject].shape[1] == samples, (\n \"Invalid computation of SRM! (wrong # samples after transform)\")\n\n # Check that it does NOT run with non-matching number of subjects\n with pytest.raises(ValueError):\n s.transform(X[1])\n print(\"Test: transforming with non-matching number of subjects\")\n\n # Check that it does not run without enough samples (TRs).\n with pytest.raises(ValueError):\n s.set_params(features=(samples+1))\n s.fit(X)\n print(\"Test: not enough samples\")\n\n # Check that it does not run with different number of samples (TRs)\n S2 = S[:, :-2]\n X.append(Q.dot(S2))\n with pytest.raises(ValueError):\n s.fit(X)\n print(\"Test: different number of samples per subject\")\n\n\ndef test_new_subject():\n import brainiak.funcalign.srm\n s = brainiak.funcalign.srm.SRM()\n assert s, \"Invalid SRM instance!\"\n\n import numpy as np\n np.random.seed(0)\n\n voxels = 100\n samples = 500\n subjects = 3\n features = 3\n\n s = brainiak.funcalign.srm.SRM(n_iter=5, features=features)\n assert s, \"Invalid SRM instance!\"\n\n # Create a Shared response S with K = 3\n theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)\n z = np.linspace(-2, 2, samples)\n r = z**2 + 1\n x = r * np.sin(theta)\n y = r * np.cos(theta)\n\n S = np.vstack((x, y, z))\n\n X = []\n W = []\n Q, R = np.linalg.qr(np.random.random((voxels, features)))\n W.append(Q)\n X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))\n\n for subject in range(1, subjects):\n Q, R = np.linalg.qr(np.random.random((voxels, features)))\n W.append(Q)\n X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))\n\n # Check that transform does NOT run before fitting the model\n with pytest.raises(NotFittedError):\n s.transform_subject(X)\n print(\"Test: transforming before fitting the model\")\n\n # Check that runs with 3 subject\n s.fit(X)\n\n # Check that you get an error when the data is the wrong shape\n with pytest.raises(ValueError):\n s.transform_subject(X[0].T)\n\n # Check that it does run to compute a new subject\n new_w = s.transform_subject(X[0])\n assert new_w.shape[1] == features, (\n \"Invalid computation of SRM! (wrong # features for new subject)\")\n assert new_w.shape[0] == voxels, (\n \"Invalid computation of SRM! (wrong # voxels for new subject)\")\n\n # Check that these analyses work with the deterministic SRM too\n ds = brainiak.funcalign.srm.DetSRM(n_iter=5, features=features)\n\n # Check that transform does NOT run before fitting the model\n with pytest.raises(NotFittedError):\n ds.transform_subject(X)\n print(\"Test: transforming before fitting the model\")\n\n # Check that runs with 3 subject\n ds.fit(X)\n\n # Check that you get an error when the data is the wrong shape\n with pytest.raises(ValueError):\n ds.transform_subject(X[0].T)\n\n # Check that it does run to compute a new subject\n new_w = ds.transform_subject(X[0])\n assert new_w.shape[1] == features, (\n \"Invalid computation of SRM! (wrong # features for new subject)\")\n assert new_w.shape[0] == voxels, (\n \"Invalid computation of SRM! (wrong # voxels for new subject)\")\n\n\ndef test_det_srm():\n import brainiak.funcalign.srm\n model = brainiak.funcalign.srm.DetSRM()\n assert model, \"Invalid DetSRM instance!\"\n\n import numpy as np\n\n voxels = 100\n samples = 500\n subjects = 2\n features = 3\n\n model = brainiak.funcalign.srm.DetSRM(n_iter=5, features=features)\n assert model, \"Invalid DetSRM instance!\"\n\n # Create a Shared response S with K = 3\n theta = np.linspace(-4 * np.pi, 4 * np.pi, samples)\n z = np.linspace(-2, 2, samples)\n r = z**2 + 1\n x = r * np.sin(theta)\n y = r * np.cos(theta)\n\n S = np.vstack((x, y, z))\n\n X = []\n W = []\n Q, R = np.linalg.qr(np.random.random((voxels, features)))\n W.append(Q)\n X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))\n\n # Check that transform does NOT run before fitting the model\n with pytest.raises(NotFittedError):\n model.transform(X)\n print(\"Test: transforming before fitting the model\")\n\n # Check that it does NOT run with 1 subject\n with pytest.raises(ValueError):\n model.fit(X)\n print(\"Test: running DetSRM with 1 subject\")\n\n for subject in range(1, subjects):\n Q, R = np.linalg.qr(np.random.random((voxels, features)))\n W.append(Q)\n X.append(Q.dot(S) + 0.1*np.random.random((voxels, samples)))\n\n # Check that runs with 2 subject\n model.fit(X)\n\n assert len(model.w_) == subjects, (\n \"Invalid computation of DetSRM! (wrong # subjects in W)\")\n for subject in range(subjects):\n assert model.w_[subject].shape[0] == voxels, (\n \"Invalid computation of DetSRM! (wrong # voxels in W)\")\n assert model.w_[subject].shape[1] == features, (\n \"Invalid computation of DetSRM! (wrong # features in W)\")\n ortho = np.linalg.norm(model.w_[subject].T.dot(model.w_[subject])\n - np.eye(model.w_[subject].shape[1]),\n 'fro')\n assert ortho < 1e-7, \"A Wi mapping is not orthonormal in DetSRM.\"\n difference = np.linalg.norm(X[subject]\n - model.w_[subject].dot(model.s_),\n 'fro')\n datanorm = np.linalg.norm(X[subject], 'fro')\n assert difference/datanorm < 1.0, \"Model seems incorrectly computed.\"\n assert model.s_.shape[0] == features, (\n \"Invalid computation of DetSRM! (wrong # features in S)\")\n assert model.s_.shape[1] == samples, (\n \"Invalid computation of DetSRM! (wrong # samples in S)\")\n\n # Check that it does run to compute the shared response after the model\n # computation\n new_s = model.transform(X)\n\n assert len(new_s) == subjects, (\n \"Invalid computation of DetSRM! (wrong # subjects after transform)\")\n for subject in range(subjects):\n assert new_s[subject].shape[0] == features, (\n \"Invalid computation of DetSRM! (wrong # features after \"\n \"transform)\")\n assert new_s[subject].shape[1] == samples, (\n \"Invalid computation of DetSRM! (wrong # samples after transform)\")\n\n # Check that it does run to compute a new subject\n new_w = model.transform_subject(X[0])\n assert new_w.shape[1] == features, (\n \"Invalid computation of SRM! (wrong # features for new subject)\")\n assert new_w.shape[0] == voxels, (\n \"Invalid computation of SRM! (wrong # voxels for new subject)\")\n\n # Check that it does NOT run with non-matching number of subjects\n with pytest.raises(ValueError):\n model.transform(X[1])\n print(\"Test: transforming with non-matching number of subjects\")\n\n # Check that it does not run without enough samples (TRs).\n with pytest.raises(ValueError):\n model.set_params(features=(samples+1))\n model.fit(X)\n print(\"Test: not enough samples\")\n\n # Check that it does not run with different number of samples (TRs)\n S2 = S[:, :-2]\n X.append(Q.dot(S2))\n with pytest.raises(ValueError):\n model.fit(X)\n print(\"Test: different number of samples per subject\")\n" ]
[ [ "numpy.random.random", "numpy.allclose", "numpy.random.seed", "numpy.linspace", "numpy.eye", "numpy.cos", "numpy.linalg.norm", "numpy.sin", "numpy.vstack" ] ]
ryanfadholi/kmeans-py
[ "c81cbdf1bd27f99ee2b037dd057e5b9f87fe74d4" ]
[ "main.py" ]
[ "import numpy as np \n\nfrom simplekmeans import kmeans\n\nk_value = 5\ndatapoints = np.loadtxt(\"example.txt\")\nkmeans(k_value, datapoints)" ]
[ [ "numpy.loadtxt" ] ]
gabrielclow/pandas
[ "6ef4be3f8f269f147b5abedecf7da6f19af305d3" ]
[ "pandas/tests/plotting/test_datetimelike.py" ]
[ "\"\"\" Test cases for time series specific (freq conversion, etc) \"\"\"\n\nfrom datetime import datetime, timedelta, date, time\nimport pickle\n\nimport pytest\nfrom pandas.compat import lrange, zip\n\nimport numpy as np\nfrom pandas import Index, Series, DataFrame, NaT\nfrom pandas.compat import is_platform_mac, PY3\nfrom pandas.core.indexes.datetimes import date_range, bdate_range\nfrom pandas.core.indexes.timedeltas import timedelta_range\nfrom pandas.tseries.offsets import DateOffset\nfrom pandas.core.indexes.period import period_range, Period, PeriodIndex\nfrom pandas.core.resample import DatetimeIndex\n\nfrom pandas.util.testing import assert_series_equal, ensure_clean\nimport pandas.util.testing as tm\nimport pandas.util._test_decorators as td\n\nfrom pandas.tests.plotting.common import (TestPlotBase,\n _skip_if_no_scipy_gaussian_kde)\n\n\n@td.skip_if_no_mpl\nclass TestTSPlot(TestPlotBase):\n\n def setup_method(self, method):\n TestPlotBase.setup_method(self, method)\n\n freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q', 'A']\n idx = [period_range('12/31/1999', freq=x, periods=100) for x in freq]\n self.period_ser = [Series(np.random.randn(len(x)), x) for x in idx]\n self.period_df = [DataFrame(np.random.randn(len(x), 3), index=x,\n columns=['A', 'B', 'C'])\n for x in idx]\n\n freq = ['S', 'T', 'H', 'D', 'W', 'M', 'Q-DEC', 'A', '1B30Min']\n idx = [date_range('12/31/1999', freq=x, periods=100) for x in freq]\n self.datetime_ser = [Series(np.random.randn(len(x)), x) for x in idx]\n self.datetime_df = [DataFrame(np.random.randn(len(x), 3), index=x,\n columns=['A', 'B', 'C'])\n for x in idx]\n\n def teardown_method(self, method):\n tm.close()\n\n @pytest.mark.slow\n def test_ts_plot_with_tz(self):\n # GH2877\n index = date_range('1/1/2011', periods=2, freq='H',\n tz='Europe/Brussels')\n ts = Series([188.5, 328.25], index=index)\n _check_plot_works(ts.plot)\n\n def test_fontsize_set_correctly(self):\n # For issue #8765\n df = DataFrame(np.random.randn(10, 9), index=range(10))\n fig, ax = self.plt.subplots()\n df.plot(fontsize=2, ax=ax)\n for label in (ax.get_xticklabels() + ax.get_yticklabels()):\n assert label.get_fontsize() == 2\n\n @pytest.mark.slow\n def test_frame_inferred(self):\n # inferred freq\n idx = date_range('1/1/1987', freq='MS', periods=100)\n idx = DatetimeIndex(idx.values, freq=None)\n\n df = DataFrame(np.random.randn(len(idx), 3), index=idx)\n _check_plot_works(df.plot)\n\n # axes freq\n idx = idx[0:40].union(idx[45:99])\n df2 = DataFrame(np.random.randn(len(idx), 3), index=idx)\n _check_plot_works(df2.plot)\n\n # N > 1\n idx = date_range('2008-1-1 00:15:00', freq='15T', periods=10)\n idx = DatetimeIndex(idx.values, freq=None)\n df = DataFrame(np.random.randn(len(idx), 3), index=idx)\n _check_plot_works(df.plot)\n\n def test_is_error_nozeroindex(self):\n # GH11858\n i = np.array([1, 2, 3])\n a = DataFrame(i, index=i)\n _check_plot_works(a.plot, xerr=a)\n _check_plot_works(a.plot, yerr=a)\n\n def test_nonnumeric_exclude(self):\n idx = date_range('1/1/1987', freq='A', periods=3)\n df = DataFrame({'A': [\"x\", \"y\", \"z\"], 'B': [1, 2, 3]}, idx)\n\n fig, ax = self.plt.subplots()\n df.plot(ax=ax) # it works\n assert len(ax.get_lines()) == 1 # B was plotted\n self.plt.close(fig)\n\n pytest.raises(TypeError, df['A'].plot)\n\n @pytest.mark.slow\n def test_tsplot(self):\n from pandas.tseries.plotting import tsplot\n\n _, ax = self.plt.subplots()\n ts = tm.makeTimeSeries()\n\n f = lambda *args, **kwds: tsplot(s, self.plt.Axes.plot, *args, **kwds)\n\n for s in self.period_ser:\n _check_plot_works(f, s.index.freq, ax=ax, series=s)\n\n for s in self.datetime_ser:\n _check_plot_works(f, s.index.freq.rule_code, ax=ax, series=s)\n\n for s in self.period_ser:\n _check_plot_works(s.plot, ax=ax)\n\n for s in self.datetime_ser:\n _check_plot_works(s.plot, ax=ax)\n\n _, ax = self.plt.subplots()\n ts.plot(style='k', ax=ax)\n color = (0., 0., 0., 1) if self.mpl_ge_2_0_0 else (0., 0., 0.)\n assert color == ax.get_lines()[0].get_color()\n\n def test_both_style_and_color(self):\n\n ts = tm.makeTimeSeries()\n pytest.raises(ValueError, ts.plot, style='b-', color='#000099')\n\n s = ts.reset_index(drop=True)\n pytest.raises(ValueError, s.plot, style='b-', color='#000099')\n\n @pytest.mark.slow\n def test_high_freq(self):\n freaks = ['ms', 'us']\n for freq in freaks:\n _, ax = self.plt.subplots()\n rng = date_range('1/1/2012', periods=100000, freq=freq)\n ser = Series(np.random.randn(len(rng)), rng)\n _check_plot_works(ser.plot, ax=ax)\n\n def test_get_datevalue(self):\n from pandas.plotting._converter import get_datevalue\n assert get_datevalue(None, 'D') is None\n assert get_datevalue(1987, 'A') == 1987\n assert (get_datevalue(Period(1987, 'A'), 'M') ==\n Period('1987-12', 'M').ordinal)\n assert (get_datevalue('1/1/1987', 'D') ==\n Period('1987-1-1', 'D').ordinal)\n\n @pytest.mark.slow\n def test_ts_plot_format_coord(self):\n def check_format_of_first_point(ax, expected_string):\n first_line = ax.get_lines()[0]\n first_x = first_line.get_xdata()[0].ordinal\n first_y = first_line.get_ydata()[0]\n try:\n assert expected_string == ax.format_coord(first_x, first_y)\n except (ValueError):\n pytest.skip(\"skipping test because issue forming \"\n \"test comparison GH7664\")\n\n annual = Series(1, index=date_range('2014-01-01', periods=3,\n freq='A-DEC'))\n _, ax = self.plt.subplots()\n annual.plot(ax=ax)\n check_format_of_first_point(ax, 't = 2014 y = 1.000000')\n\n # note this is added to the annual plot already in existence, and\n # changes its freq field\n daily = Series(1, index=date_range('2014-01-01', periods=3, freq='D'))\n daily.plot(ax=ax)\n check_format_of_first_point(ax,\n 't = 2014-01-01 y = 1.000000')\n tm.close()\n\n # tsplot\n _, ax = self.plt.subplots()\n from pandas.tseries.plotting import tsplot\n tsplot(annual, self.plt.Axes.plot, ax=ax)\n check_format_of_first_point(ax, 't = 2014 y = 1.000000')\n tsplot(daily, self.plt.Axes.plot, ax=ax)\n check_format_of_first_point(ax, 't = 2014-01-01 y = 1.000000')\n\n @pytest.mark.slow\n def test_line_plot_period_series(self):\n for s in self.period_ser:\n _check_plot_works(s.plot, s.index.freq)\n\n @pytest.mark.slow\n def test_line_plot_datetime_series(self):\n for s in self.datetime_ser:\n _check_plot_works(s.plot, s.index.freq.rule_code)\n\n @pytest.mark.slow\n def test_line_plot_period_frame(self):\n for df in self.period_df:\n _check_plot_works(df.plot, df.index.freq)\n\n @pytest.mark.slow\n def test_line_plot_datetime_frame(self):\n for df in self.datetime_df:\n freq = df.index.to_period(df.index.freq.rule_code).freq\n _check_plot_works(df.plot, freq)\n\n @pytest.mark.slow\n def test_line_plot_inferred_freq(self):\n for ser in self.datetime_ser:\n ser = Series(ser.values, Index(np.asarray(ser.index)))\n _check_plot_works(ser.plot, ser.index.inferred_freq)\n\n ser = ser[[0, 3, 5, 6]]\n _check_plot_works(ser.plot)\n\n def test_fake_inferred_business(self):\n _, ax = self.plt.subplots()\n rng = date_range('2001-1-1', '2001-1-10')\n ts = Series(lrange(len(rng)), rng)\n ts = ts[:3].append(ts[5:])\n ts.plot(ax=ax)\n assert not hasattr(ax, 'freq')\n\n @pytest.mark.slow\n def test_plot_offset_freq(self):\n ser = tm.makeTimeSeries()\n _check_plot_works(ser.plot)\n\n dr = date_range(ser.index[0], freq='BQS', periods=10)\n ser = Series(np.random.randn(len(dr)), dr)\n _check_plot_works(ser.plot)\n\n @pytest.mark.slow\n def test_plot_multiple_inferred_freq(self):\n dr = Index([datetime(2000, 1, 1), datetime(2000, 1, 6), datetime(\n 2000, 1, 11)])\n ser = Series(np.random.randn(len(dr)), dr)\n _check_plot_works(ser.plot)\n\n @pytest.mark.slow\n def test_uhf(self):\n import pandas.plotting._converter as conv\n idx = date_range('2012-6-22 21:59:51.960928', freq='L', periods=500)\n df = DataFrame(np.random.randn(len(idx), 2), idx)\n\n _, ax = self.plt.subplots()\n df.plot(ax=ax)\n axis = ax.get_xaxis()\n\n tlocs = axis.get_ticklocs()\n tlabels = axis.get_ticklabels()\n for loc, label in zip(tlocs, tlabels):\n xp = conv._from_ordinal(loc).strftime('%H:%M:%S.%f')\n rs = str(label.get_text())\n if len(rs):\n assert xp == rs\n\n @pytest.mark.slow\n def test_irreg_hf(self):\n idx = date_range('2012-6-22 21:59:51', freq='S', periods=100)\n df = DataFrame(np.random.randn(len(idx), 2), idx)\n\n irreg = df.iloc[[0, 1, 3, 4]]\n _, ax = self.plt.subplots()\n irreg.plot(ax=ax)\n diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()\n\n sec = 1. / 24 / 60 / 60\n assert (np.fabs(diffs[1:] - [sec, sec * 2, sec]) < 1e-8).all()\n\n _, ax = self.plt.subplots()\n df2 = df.copy()\n df2.index = df.index.astype(object)\n df2.plot(ax=ax)\n diffs = Series(ax.get_lines()[0].get_xydata()[:, 0]).diff()\n assert (np.fabs(diffs[1:] - sec) < 1e-8).all()\n\n def test_irregular_datetime64_repr_bug(self):\n ser = tm.makeTimeSeries()\n ser = ser[[0, 1, 2, 7]]\n\n _, ax = self.plt.subplots()\n\n ret = ser.plot(ax=ax)\n assert ret is not None\n\n for rs, xp in zip(ax.get_lines()[0].get_xdata(), ser.index):\n assert rs == xp\n\n def test_business_freq(self):\n bts = tm.makePeriodSeries()\n _, ax = self.plt.subplots()\n bts.plot(ax=ax)\n assert ax.get_lines()[0].get_xydata()[0, 0] == bts.index[0].ordinal\n idx = ax.get_lines()[0].get_xdata()\n assert PeriodIndex(data=idx).freqstr == 'B'\n\n @pytest.mark.slow\n def test_business_freq_convert(self):\n n = tm.N\n tm.N = 300\n bts = tm.makeTimeSeries().asfreq('BM')\n tm.N = n\n ts = bts.to_period('M')\n _, ax = self.plt.subplots()\n bts.plot(ax=ax)\n assert ax.get_lines()[0].get_xydata()[0, 0] == ts.index[0].ordinal\n idx = ax.get_lines()[0].get_xdata()\n assert PeriodIndex(data=idx).freqstr == 'M'\n\n def test_nonzero_base(self):\n # GH2571\n idx = (date_range('2012-12-20', periods=24, freq='H') + timedelta(\n minutes=30))\n df = DataFrame(np.arange(24), index=idx)\n _, ax = self.plt.subplots()\n df.plot(ax=ax)\n rs = ax.get_lines()[0].get_xdata()\n assert not Index(rs).is_normalized\n\n def test_dataframe(self):\n bts = DataFrame({'a': tm.makeTimeSeries()})\n _, ax = self.plt.subplots()\n bts.plot(ax=ax)\n idx = ax.get_lines()[0].get_xdata()\n tm.assert_index_equal(bts.index.to_period(), PeriodIndex(idx))\n\n @pytest.mark.slow\n def test_axis_limits(self):\n\n def _test(ax):\n xlim = ax.get_xlim()\n ax.set_xlim(xlim[0] - 5, xlim[1] + 10)\n ax.get_figure().canvas.draw()\n result = ax.get_xlim()\n assert result[0] == xlim[0] - 5\n assert result[1] == xlim[1] + 10\n\n # string\n expected = (Period('1/1/2000', ax.freq),\n Period('4/1/2000', ax.freq))\n ax.set_xlim('1/1/2000', '4/1/2000')\n ax.get_figure().canvas.draw()\n result = ax.get_xlim()\n assert int(result[0]) == expected[0].ordinal\n assert int(result[1]) == expected[1].ordinal\n\n # datetime\n expected = (Period('1/1/2000', ax.freq),\n Period('4/1/2000', ax.freq))\n ax.set_xlim(datetime(2000, 1, 1), datetime(2000, 4, 1))\n ax.get_figure().canvas.draw()\n result = ax.get_xlim()\n assert int(result[0]) == expected[0].ordinal\n assert int(result[1]) == expected[1].ordinal\n fig = ax.get_figure()\n self.plt.close(fig)\n\n ser = tm.makeTimeSeries()\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n _test(ax)\n\n _, ax = self.plt.subplots()\n df = DataFrame({'a': ser, 'b': ser + 1})\n df.plot(ax=ax)\n _test(ax)\n\n df = DataFrame({'a': ser, 'b': ser + 1})\n axes = df.plot(subplots=True)\n\n for ax in axes:\n _test(ax)\n\n def test_get_finder(self):\n import pandas.plotting._converter as conv\n\n assert conv.get_finder('B') == conv._daily_finder\n assert conv.get_finder('D') == conv._daily_finder\n assert conv.get_finder('M') == conv._monthly_finder\n assert conv.get_finder('Q') == conv._quarterly_finder\n assert conv.get_finder('A') == conv._annual_finder\n assert conv.get_finder('W') == conv._daily_finder\n\n @pytest.mark.slow\n def test_finder_daily(self):\n day_lst = [10, 40, 252, 400, 950, 2750, 10000]\n\n if self.mpl_ge_2_0_0:\n xpl1 = [7565, 7564, 7553, 7546, 7518, 7428, 7066]\n xpl2 = [7566, 7564, 7554, 7546, 7519, 7429, 7066]\n else:\n xpl1 = xpl2 = [Period('1999-1-1', freq='B').ordinal] * len(day_lst)\n\n for i, n in enumerate(day_lst):\n xp = xpl1[i]\n rng = bdate_range('1999-1-1', periods=n)\n ser = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n xaxis = ax.get_xaxis()\n rs = xaxis.get_majorticklocs()[0]\n assert xp == rs\n xp = xpl2[i]\n vmin, vmax = ax.get_xlim()\n ax.set_xlim(vmin + 0.9, vmax)\n rs = xaxis.get_majorticklocs()[0]\n assert xp == rs\n self.plt.close(ax.get_figure())\n\n @pytest.mark.slow\n def test_finder_quarterly(self):\n yrs = [3.5, 11]\n\n if self.mpl_ge_2_0_0:\n xpl1 = [68, 68]\n xpl2 = [72, 68]\n else:\n xpl1 = xpl2 = [Period('1988Q1').ordinal] * len(yrs)\n\n for i, n in enumerate(yrs):\n xp = xpl1[i]\n rng = period_range('1987Q2', periods=int(n * 4), freq='Q')\n ser = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n xaxis = ax.get_xaxis()\n rs = xaxis.get_majorticklocs()[0]\n assert rs == xp\n xp = xpl2[i]\n (vmin, vmax) = ax.get_xlim()\n ax.set_xlim(vmin + 0.9, vmax)\n rs = xaxis.get_majorticklocs()[0]\n assert xp == rs\n self.plt.close(ax.get_figure())\n\n @pytest.mark.slow\n def test_finder_monthly(self):\n yrs = [1.15, 2.5, 4, 11]\n\n if self.mpl_ge_2_0_0:\n xpl1 = [216, 216, 204, 204]\n xpl2 = [216, 216, 216, 204]\n else:\n xpl1 = xpl2 = [Period('Jan 1988').ordinal] * len(yrs)\n\n for i, n in enumerate(yrs):\n xp = xpl1[i]\n rng = period_range('1987Q2', periods=int(n * 12), freq='M')\n ser = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n xaxis = ax.get_xaxis()\n rs = xaxis.get_majorticklocs()[0]\n assert rs == xp\n xp = xpl2[i]\n vmin, vmax = ax.get_xlim()\n ax.set_xlim(vmin + 0.9, vmax)\n rs = xaxis.get_majorticklocs()[0]\n assert xp == rs\n self.plt.close(ax.get_figure())\n\n def test_finder_monthly_long(self):\n rng = period_range('1988Q1', periods=24 * 12, freq='M')\n ser = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n xaxis = ax.get_xaxis()\n rs = xaxis.get_majorticklocs()[0]\n xp = Period('1989Q1', 'M').ordinal\n assert rs == xp\n\n @pytest.mark.slow\n def test_finder_annual(self):\n if self.mpl_ge_2_0_0:\n xp = [1986, 1986, 1990, 1990, 1995, 2020, 1970, 1970]\n else:\n xp = [1987, 1988, 1990, 1990, 1995, 2020, 2070, 2170]\n\n for i, nyears in enumerate([5, 10, 19, 49, 99, 199, 599, 1001]):\n rng = period_range('1987', periods=nyears, freq='A')\n ser = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n xaxis = ax.get_xaxis()\n rs = xaxis.get_majorticklocs()[0]\n assert rs == Period(xp[i], freq='A').ordinal\n self.plt.close(ax.get_figure())\n\n @pytest.mark.slow\n def test_finder_minutely(self):\n nminutes = 50 * 24 * 60\n rng = date_range('1/1/1999', freq='Min', periods=nminutes)\n ser = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n xaxis = ax.get_xaxis()\n rs = xaxis.get_majorticklocs()[0]\n if self.mpl_ge_2_0_0:\n xp = Period('1998-12-29 12:00', freq='Min').ordinal\n else:\n xp = Period('1/1/1999', freq='Min').ordinal\n assert rs == xp\n\n def test_finder_hourly(self):\n nhours = 23\n rng = date_range('1/1/1999', freq='H', periods=nhours)\n ser = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n xaxis = ax.get_xaxis()\n rs = xaxis.get_majorticklocs()[0]\n if self.mpl_ge_2_0_0:\n xp = Period('1998-12-31 22:00', freq='H').ordinal\n else:\n xp = Period('1/1/1999', freq='H').ordinal\n assert rs == xp\n\n @td.skip_if_mpl_1_5\n @pytest.mark.slow\n def test_gaps(self):\n ts = tm.makeTimeSeries()\n ts[5:25] = np.nan\n _, ax = self.plt.subplots()\n ts.plot(ax=ax)\n lines = ax.get_lines()\n assert len(lines) == 1\n l = lines[0]\n data = l.get_xydata()\n assert isinstance(data, np.ma.core.MaskedArray)\n mask = data.mask\n assert mask[5:25, 1].all()\n self.plt.close(ax.get_figure())\n\n # irregular\n ts = tm.makeTimeSeries()\n ts = ts[[0, 1, 2, 5, 7, 9, 12, 15, 20]]\n ts[2:5] = np.nan\n _, ax = self.plt.subplots()\n ax = ts.plot(ax=ax)\n lines = ax.get_lines()\n assert len(lines) == 1\n l = lines[0]\n data = l.get_xydata()\n assert isinstance(data, np.ma.core.MaskedArray)\n mask = data.mask\n assert mask[2:5, 1].all()\n self.plt.close(ax.get_figure())\n\n # non-ts\n idx = [0, 1, 2, 5, 7, 9, 12, 15, 20]\n ser = Series(np.random.randn(len(idx)), idx)\n ser[2:5] = np.nan\n _, ax = self.plt.subplots()\n ser.plot(ax=ax)\n lines = ax.get_lines()\n assert len(lines) == 1\n l = lines[0]\n data = l.get_xydata()\n assert isinstance(data, np.ma.core.MaskedArray)\n mask = data.mask\n assert mask[2:5, 1].all()\n\n @td.skip_if_mpl_1_5\n @pytest.mark.slow\n def test_gap_upsample(self):\n low = tm.makeTimeSeries()\n low[5:25] = np.nan\n _, ax = self.plt.subplots()\n low.plot(ax=ax)\n\n idxh = date_range(low.index[0], low.index[-1], freq='12h')\n s = Series(np.random.randn(len(idxh)), idxh)\n s.plot(secondary_y=True)\n lines = ax.get_lines()\n assert len(lines) == 1\n assert len(ax.right_ax.get_lines()) == 1\n l = lines[0]\n data = l.get_xydata()\n\n assert isinstance(data, np.ma.core.MaskedArray)\n mask = data.mask\n assert mask[5:25, 1].all()\n\n @pytest.mark.slow\n def test_secondary_y(self):\n ser = Series(np.random.randn(10))\n ser2 = Series(np.random.randn(10))\n fig, _ = self.plt.subplots()\n ax = ser.plot(secondary_y=True)\n assert hasattr(ax, 'left_ax')\n assert not hasattr(ax, 'right_ax')\n axes = fig.get_axes()\n l = ax.get_lines()[0]\n xp = Series(l.get_ydata(), l.get_xdata())\n assert_series_equal(ser, xp)\n assert ax.get_yaxis().get_ticks_position() == 'right'\n assert not axes[0].get_yaxis().get_visible()\n self.plt.close(fig)\n\n _, ax2 = self.plt.subplots()\n ser2.plot(ax=ax2)\n assert (ax2.get_yaxis().get_ticks_position() ==\n self.default_tick_position)\n self.plt.close(ax2.get_figure())\n\n ax = ser2.plot()\n ax2 = ser.plot(secondary_y=True)\n assert ax.get_yaxis().get_visible()\n assert not hasattr(ax, 'left_ax')\n assert hasattr(ax, 'right_ax')\n assert hasattr(ax2, 'left_ax')\n assert not hasattr(ax2, 'right_ax')\n\n @pytest.mark.slow\n def test_secondary_y_ts(self):\n idx = date_range('1/1/2000', periods=10)\n ser = Series(np.random.randn(10), idx)\n ser2 = Series(np.random.randn(10), idx)\n fig, _ = self.plt.subplots()\n ax = ser.plot(secondary_y=True)\n assert hasattr(ax, 'left_ax')\n assert not hasattr(ax, 'right_ax')\n axes = fig.get_axes()\n l = ax.get_lines()[0]\n xp = Series(l.get_ydata(), l.get_xdata()).to_timestamp()\n assert_series_equal(ser, xp)\n assert ax.get_yaxis().get_ticks_position() == 'right'\n assert not axes[0].get_yaxis().get_visible()\n self.plt.close(fig)\n\n _, ax2 = self.plt.subplots()\n ser2.plot(ax=ax2)\n assert (ax2.get_yaxis().get_ticks_position() ==\n self.default_tick_position)\n self.plt.close(ax2.get_figure())\n\n ax = ser2.plot()\n ax2 = ser.plot(secondary_y=True)\n assert ax.get_yaxis().get_visible()\n\n @pytest.mark.slow\n @td.skip_if_no_scipy\n def test_secondary_kde(self):\n if not self.mpl_ge_1_5_0:\n pytest.skip(\"mpl is not supported\")\n _skip_if_no_scipy_gaussian_kde()\n\n ser = Series(np.random.randn(10))\n fig, ax = self.plt.subplots()\n ax = ser.plot(secondary_y=True, kind='density', ax=ax)\n assert hasattr(ax, 'left_ax')\n assert not hasattr(ax, 'right_ax')\n axes = fig.get_axes()\n assert axes[1].get_yaxis().get_ticks_position() == 'right'\n\n @pytest.mark.slow\n def test_secondary_bar(self):\n ser = Series(np.random.randn(10))\n fig, ax = self.plt.subplots()\n ser.plot(secondary_y=True, kind='bar', ax=ax)\n axes = fig.get_axes()\n assert axes[1].get_yaxis().get_ticks_position() == 'right'\n\n @pytest.mark.slow\n def test_secondary_frame(self):\n df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])\n axes = df.plot(secondary_y=['a', 'c'], subplots=True)\n assert axes[0].get_yaxis().get_ticks_position() == 'right'\n assert (axes[1].get_yaxis().get_ticks_position() ==\n self.default_tick_position)\n assert axes[2].get_yaxis().get_ticks_position() == 'right'\n\n @pytest.mark.slow\n def test_secondary_bar_frame(self):\n df = DataFrame(np.random.randn(5, 3), columns=['a', 'b', 'c'])\n axes = df.plot(kind='bar', secondary_y=['a', 'c'], subplots=True)\n assert axes[0].get_yaxis().get_ticks_position() == 'right'\n assert (axes[1].get_yaxis().get_ticks_position() ==\n self.default_tick_position)\n assert axes[2].get_yaxis().get_ticks_position() == 'right'\n\n def test_mixed_freq_regular_first(self):\n # TODO\n s1 = tm.makeTimeSeries()\n s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]\n\n # it works!\n _, ax = self.plt.subplots()\n s1.plot(ax=ax)\n\n ax2 = s2.plot(style='g', ax=ax)\n lines = ax2.get_lines()\n idx1 = PeriodIndex(lines[0].get_xdata())\n idx2 = PeriodIndex(lines[1].get_xdata())\n\n tm.assert_index_equal(idx1, s1.index.to_period('B'))\n tm.assert_index_equal(idx2, s2.index.to_period('B'))\n\n left, right = ax2.get_xlim()\n pidx = s1.index.to_period()\n assert left <= pidx[0].ordinal\n assert right >= pidx[-1].ordinal\n\n @pytest.mark.slow\n def test_mixed_freq_irregular_first(self):\n s1 = tm.makeTimeSeries()\n s2 = s1[[0, 5, 10, 11, 12, 13, 14, 15]]\n _, ax = self.plt.subplots()\n s2.plot(style='g', ax=ax)\n s1.plot(ax=ax)\n assert not hasattr(ax, 'freq')\n lines = ax.get_lines()\n x1 = lines[0].get_xdata()\n tm.assert_numpy_array_equal(x1, s2.index.astype(object).values)\n x2 = lines[1].get_xdata()\n tm.assert_numpy_array_equal(x2, s1.index.astype(object).values)\n\n def test_mixed_freq_regular_first_df(self):\n # GH 9852\n s1 = tm.makeTimeSeries().to_frame()\n s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]\n _, ax = self.plt.subplots()\n s1.plot(ax=ax)\n ax2 = s2.plot(style='g', ax=ax)\n lines = ax2.get_lines()\n idx1 = PeriodIndex(lines[0].get_xdata())\n idx2 = PeriodIndex(lines[1].get_xdata())\n assert idx1.equals(s1.index.to_period('B'))\n assert idx2.equals(s2.index.to_period('B'))\n left, right = ax2.get_xlim()\n pidx = s1.index.to_period()\n assert left <= pidx[0].ordinal\n assert right >= pidx[-1].ordinal\n\n @pytest.mark.slow\n def test_mixed_freq_irregular_first_df(self):\n # GH 9852\n s1 = tm.makeTimeSeries().to_frame()\n s2 = s1.iloc[[0, 5, 10, 11, 12, 13, 14, 15], :]\n _, ax = self.plt.subplots()\n s2.plot(style='g', ax=ax)\n s1.plot(ax=ax)\n assert not hasattr(ax, 'freq')\n lines = ax.get_lines()\n x1 = lines[0].get_xdata()\n tm.assert_numpy_array_equal(x1, s2.index.astype(object).values)\n x2 = lines[1].get_xdata()\n tm.assert_numpy_array_equal(x2, s1.index.astype(object).values)\n\n def test_mixed_freq_hf_first(self):\n idxh = date_range('1/1/1999', periods=365, freq='D')\n idxl = date_range('1/1/1999', periods=12, freq='M')\n high = Series(np.random.randn(len(idxh)), idxh)\n low = Series(np.random.randn(len(idxl)), idxl)\n _, ax = self.plt.subplots()\n high.plot(ax=ax)\n low.plot(ax=ax)\n for l in ax.get_lines():\n assert PeriodIndex(data=l.get_xdata()).freq == 'D'\n\n @pytest.mark.slow\n def test_mixed_freq_alignment(self):\n ts_ind = date_range('2012-01-01 13:00', '2012-01-02', freq='H')\n ts_data = np.random.randn(12)\n\n ts = Series(ts_data, index=ts_ind)\n ts2 = ts.asfreq('T').interpolate()\n\n _, ax = self.plt.subplots()\n ax = ts.plot(ax=ax)\n ts2.plot(style='r', ax=ax)\n\n assert ax.lines[0].get_xdata()[0] == ax.lines[1].get_xdata()[0]\n\n @pytest.mark.slow\n def test_mixed_freq_lf_first(self):\n\n idxh = date_range('1/1/1999', periods=365, freq='D')\n idxl = date_range('1/1/1999', periods=12, freq='M')\n high = Series(np.random.randn(len(idxh)), idxh)\n low = Series(np.random.randn(len(idxl)), idxl)\n _, ax = self.plt.subplots()\n low.plot(legend=True, ax=ax)\n high.plot(legend=True, ax=ax)\n for l in ax.get_lines():\n assert PeriodIndex(data=l.get_xdata()).freq == 'D'\n leg = ax.get_legend()\n assert len(leg.texts) == 2\n self.plt.close(ax.get_figure())\n\n idxh = date_range('1/1/1999', periods=240, freq='T')\n idxl = date_range('1/1/1999', periods=4, freq='H')\n high = Series(np.random.randn(len(idxh)), idxh)\n low = Series(np.random.randn(len(idxl)), idxl)\n _, ax = self.plt.subplots()\n low.plot(ax=ax)\n high.plot(ax=ax)\n for l in ax.get_lines():\n assert PeriodIndex(data=l.get_xdata()).freq == 'T'\n\n def test_mixed_freq_irreg_period(self):\n ts = tm.makeTimeSeries()\n irreg = ts[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 29]]\n rng = period_range('1/3/2000', periods=30, freq='B')\n ps = Series(np.random.randn(len(rng)), rng)\n _, ax = self.plt.subplots()\n irreg.plot(ax=ax)\n ps.plot(ax=ax)\n\n def test_mixed_freq_shared_ax(self):\n\n # GH13341, using sharex=True\n idx1 = date_range('2015-01-01', periods=3, freq='M')\n idx2 = idx1[:1].union(idx1[2:])\n s1 = Series(range(len(idx1)), idx1)\n s2 = Series(range(len(idx2)), idx2)\n\n fig, (ax1, ax2) = self.plt.subplots(nrows=2, sharex=True)\n s1.plot(ax=ax1)\n s2.plot(ax=ax2)\n\n assert ax1.freq == 'M'\n assert ax2.freq == 'M'\n assert (ax1.lines[0].get_xydata()[0, 0] ==\n ax2.lines[0].get_xydata()[0, 0])\n\n # using twinx\n fig, ax1 = self.plt.subplots()\n ax2 = ax1.twinx()\n s1.plot(ax=ax1)\n s2.plot(ax=ax2)\n\n assert (ax1.lines[0].get_xydata()[0, 0] ==\n ax2.lines[0].get_xydata()[0, 0])\n\n # TODO (GH14330, GH14322)\n # plotting the irregular first does not yet work\n # fig, ax1 = plt.subplots()\n # ax2 = ax1.twinx()\n # s2.plot(ax=ax1)\n # s1.plot(ax=ax2)\n # assert (ax1.lines[0].get_xydata()[0, 0] ==\n # ax2.lines[0].get_xydata()[0, 0])\n\n def test_nat_handling(self):\n\n _, ax = self.plt.subplots()\n\n dti = DatetimeIndex(['2015-01-01', NaT, '2015-01-03'])\n s = Series(range(len(dti)), dti)\n s.plot(ax=ax)\n xdata = ax.get_lines()[0].get_xdata()\n # plot x data is bounded by index values\n assert s.index.min() <= Series(xdata).min()\n assert Series(xdata).max() <= s.index.max()\n\n @pytest.mark.slow\n def test_to_weekly_resampling(self):\n idxh = date_range('1/1/1999', periods=52, freq='W')\n idxl = date_range('1/1/1999', periods=12, freq='M')\n high = Series(np.random.randn(len(idxh)), idxh)\n low = Series(np.random.randn(len(idxl)), idxl)\n _, ax = self.plt.subplots()\n high.plot(ax=ax)\n low.plot(ax=ax)\n for l in ax.get_lines():\n assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq\n\n # tsplot\n from pandas.tseries.plotting import tsplot\n\n _, ax = self.plt.subplots()\n tsplot(high, self.plt.Axes.plot, ax=ax)\n lines = tsplot(low, self.plt.Axes.plot, ax=ax)\n for l in lines:\n assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq\n\n @pytest.mark.slow\n def test_from_weekly_resampling(self):\n idxh = date_range('1/1/1999', periods=52, freq='W')\n idxl = date_range('1/1/1999', periods=12, freq='M')\n high = Series(np.random.randn(len(idxh)), idxh)\n low = Series(np.random.randn(len(idxl)), idxl)\n _, ax = self.plt.subplots()\n low.plot(ax=ax)\n high.plot(ax=ax)\n\n expected_h = idxh.to_period().asi8.astype(np.float64)\n expected_l = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540, 1544,\n 1549, 1553, 1558, 1562], dtype=np.float64)\n for l in ax.get_lines():\n assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq\n xdata = l.get_xdata(orig=False)\n if len(xdata) == 12: # idxl lines\n tm.assert_numpy_array_equal(xdata, expected_l)\n else:\n tm.assert_numpy_array_equal(xdata, expected_h)\n tm.close()\n\n # tsplot\n from pandas.tseries.plotting import tsplot\n\n _, ax = self.plt.subplots()\n tsplot(low, self.plt.Axes.plot, ax=ax)\n lines = tsplot(high, self.plt.Axes.plot, ax=ax)\n for l in lines:\n assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq\n xdata = l.get_xdata(orig=False)\n if len(xdata) == 12: # idxl lines\n tm.assert_numpy_array_equal(xdata, expected_l)\n else:\n tm.assert_numpy_array_equal(xdata, expected_h)\n\n @pytest.mark.slow\n def test_from_resampling_area_line_mixed(self):\n idxh = date_range('1/1/1999', periods=52, freq='W')\n idxl = date_range('1/1/1999', periods=12, freq='M')\n high = DataFrame(np.random.rand(len(idxh), 3),\n index=idxh, columns=[0, 1, 2])\n low = DataFrame(np.random.rand(len(idxl), 3),\n index=idxl, columns=[0, 1, 2])\n\n # low to high\n for kind1, kind2 in [('line', 'area'), ('area', 'line')]:\n _, ax = self.plt.subplots()\n low.plot(kind=kind1, stacked=True, ax=ax)\n high.plot(kind=kind2, stacked=True, ax=ax)\n\n # check low dataframe result\n expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,\n 1544, 1549, 1553, 1558, 1562],\n dtype=np.float64)\n expected_y = np.zeros(len(expected_x), dtype=np.float64)\n for i in range(3):\n l = ax.lines[i]\n assert PeriodIndex(l.get_xdata()).freq == idxh.freq\n tm.assert_numpy_array_equal(l.get_xdata(orig=False),\n expected_x)\n # check stacked values are correct\n expected_y += low[i].values\n tm.assert_numpy_array_equal(l.get_ydata(orig=False),\n expected_y)\n\n # check high dataframe result\n expected_x = idxh.to_period().asi8.astype(np.float64)\n expected_y = np.zeros(len(expected_x), dtype=np.float64)\n for i in range(3):\n l = ax.lines[3 + i]\n assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq\n tm.assert_numpy_array_equal(l.get_xdata(orig=False),\n expected_x)\n expected_y += high[i].values\n tm.assert_numpy_array_equal(l.get_ydata(orig=False),\n expected_y)\n\n # high to low\n for kind1, kind2 in [('line', 'area'), ('area', 'line')]:\n _, ax = self.plt.subplots()\n high.plot(kind=kind1, stacked=True, ax=ax)\n low.plot(kind=kind2, stacked=True, ax=ax)\n\n # check high dataframe result\n expected_x = idxh.to_period().asi8.astype(np.float64)\n expected_y = np.zeros(len(expected_x), dtype=np.float64)\n for i in range(3):\n l = ax.lines[i]\n assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq\n tm.assert_numpy_array_equal(l.get_xdata(orig=False),\n expected_x)\n expected_y += high[i].values\n tm.assert_numpy_array_equal(l.get_ydata(orig=False),\n expected_y)\n\n # check low dataframe result\n expected_x = np.array([1514, 1519, 1523, 1527, 1531, 1536, 1540,\n 1544, 1549, 1553, 1558, 1562],\n dtype=np.float64)\n expected_y = np.zeros(len(expected_x), dtype=np.float64)\n for i in range(3):\n l = ax.lines[3 + i]\n assert PeriodIndex(data=l.get_xdata()).freq == idxh.freq\n tm.assert_numpy_array_equal(l.get_xdata(orig=False),\n expected_x)\n expected_y += low[i].values\n tm.assert_numpy_array_equal(l.get_ydata(orig=False),\n expected_y)\n\n @pytest.mark.slow\n def test_mixed_freq_second_millisecond(self):\n # GH 7772, GH 7760\n idxh = date_range('2014-07-01 09:00', freq='S', periods=50)\n idxl = date_range('2014-07-01 09:00', freq='100L', periods=500)\n high = Series(np.random.randn(len(idxh)), idxh)\n low = Series(np.random.randn(len(idxl)), idxl)\n # high to low\n _, ax = self.plt.subplots()\n high.plot(ax=ax)\n low.plot(ax=ax)\n assert len(ax.get_lines()) == 2\n for l in ax.get_lines():\n assert PeriodIndex(data=l.get_xdata()).freq == 'L'\n tm.close()\n\n # low to high\n _, ax = self.plt.subplots()\n low.plot(ax=ax)\n high.plot(ax=ax)\n assert len(ax.get_lines()) == 2\n for l in ax.get_lines():\n assert PeriodIndex(data=l.get_xdata()).freq == 'L'\n\n @pytest.mark.slow\n def test_irreg_dtypes(self):\n # date\n idx = [date(2000, 1, 1), date(2000, 1, 5), date(2000, 1, 20)]\n df = DataFrame(np.random.randn(len(idx), 3), Index(idx, dtype=object))\n _check_plot_works(df.plot)\n\n # np.datetime64\n idx = date_range('1/1/2000', periods=10)\n idx = idx[[0, 2, 5, 9]].astype(object)\n df = DataFrame(np.random.randn(len(idx), 3), idx)\n _, ax = self.plt.subplots()\n _check_plot_works(df.plot, ax=ax)\n\n @pytest.mark.slow\n def test_time(self):\n t = datetime(1, 1, 1, 3, 30, 0)\n deltas = np.random.randint(1, 20, 3).cumsum()\n ts = np.array([(t + timedelta(minutes=int(x))).time() for x in deltas])\n df = DataFrame({'a': np.random.randn(len(ts)),\n 'b': np.random.randn(len(ts))},\n index=ts)\n fig, ax = self.plt.subplots()\n df.plot(ax=ax)\n\n # verify tick labels\n fig.canvas.draw()\n ticks = ax.get_xticks()\n labels = ax.get_xticklabels()\n for t, l in zip(ticks, labels):\n m, s = divmod(int(t), 60)\n h, m = divmod(m, 60)\n rs = l.get_text()\n if len(rs) > 0:\n if s != 0:\n xp = time(h, m, s).strftime('%H:%M:%S')\n else:\n xp = time(h, m, s).strftime('%H:%M')\n assert xp == rs\n\n # change xlim\n ax.set_xlim('1:30', '5:00')\n\n # check tick labels again\n fig.canvas.draw()\n ticks = ax.get_xticks()\n labels = ax.get_xticklabels()\n for t, l in zip(ticks, labels):\n m, s = divmod(int(t), 60)\n h, m = divmod(m, 60)\n rs = l.get_text()\n if len(rs) > 0:\n if s != 0:\n xp = time(h, m, s).strftime('%H:%M:%S')\n else:\n xp = time(h, m, s).strftime('%H:%M')\n assert xp == rs\n\n @pytest.mark.slow\n def test_time_musec(self):\n t = datetime(1, 1, 1, 3, 30, 0)\n deltas = np.random.randint(1, 20, 3).cumsum()\n ts = np.array([(t + timedelta(microseconds=int(x))).time()\n for x in deltas])\n df = DataFrame({'a': np.random.randn(len(ts)),\n 'b': np.random.randn(len(ts))},\n index=ts)\n fig, ax = self.plt.subplots()\n ax = df.plot(ax=ax)\n\n # verify tick labels\n fig.canvas.draw()\n ticks = ax.get_xticks()\n labels = ax.get_xticklabels()\n for t, l in zip(ticks, labels):\n m, s = divmod(int(t), 60)\n\n us = int(round((t - int(t)) * 1e6))\n\n h, m = divmod(m, 60)\n rs = l.get_text()\n if len(rs) > 0:\n if (us % 1000) != 0:\n xp = time(h, m, s, us).strftime('%H:%M:%S.%f')\n elif (us // 1000) != 0:\n xp = time(h, m, s, us).strftime('%H:%M:%S.%f')[:-3]\n elif s != 0:\n xp = time(h, m, s, us).strftime('%H:%M:%S')\n else:\n xp = time(h, m, s, us).strftime('%H:%M')\n assert xp == rs\n\n @pytest.mark.slow\n def test_secondary_upsample(self):\n idxh = date_range('1/1/1999', periods=365, freq='D')\n idxl = date_range('1/1/1999', periods=12, freq='M')\n high = Series(np.random.randn(len(idxh)), idxh)\n low = Series(np.random.randn(len(idxl)), idxl)\n _, ax = self.plt.subplots()\n low.plot(ax=ax)\n ax = high.plot(secondary_y=True, ax=ax)\n for l in ax.get_lines():\n assert PeriodIndex(l.get_xdata()).freq == 'D'\n assert hasattr(ax, 'left_ax')\n assert not hasattr(ax, 'right_ax')\n for l in ax.left_ax.get_lines():\n assert PeriodIndex(l.get_xdata()).freq == 'D'\n\n @pytest.mark.slow\n def test_secondary_legend(self):\n fig = self.plt.figure()\n ax = fig.add_subplot(211)\n\n # ts\n df = tm.makeTimeDataFrame()\n df.plot(secondary_y=['A', 'B'], ax=ax)\n leg = ax.get_legend()\n assert len(leg.get_lines()) == 4\n assert leg.get_texts()[0].get_text() == 'A (right)'\n assert leg.get_texts()[1].get_text() == 'B (right)'\n assert leg.get_texts()[2].get_text() == 'C'\n assert leg.get_texts()[3].get_text() == 'D'\n assert ax.right_ax.get_legend() is None\n colors = set()\n for line in leg.get_lines():\n colors.add(line.get_color())\n\n # TODO: color cycle problems\n assert len(colors) == 4\n self.plt.close(fig)\n\n fig = self.plt.figure()\n ax = fig.add_subplot(211)\n df.plot(secondary_y=['A', 'C'], mark_right=False, ax=ax)\n leg = ax.get_legend()\n assert len(leg.get_lines()) == 4\n assert leg.get_texts()[0].get_text() == 'A'\n assert leg.get_texts()[1].get_text() == 'B'\n assert leg.get_texts()[2].get_text() == 'C'\n assert leg.get_texts()[3].get_text() == 'D'\n self.plt.close(fig)\n\n fig, ax = self.plt.subplots()\n df.plot(kind='bar', secondary_y=['A'], ax=ax)\n leg = ax.get_legend()\n assert leg.get_texts()[0].get_text() == 'A (right)'\n assert leg.get_texts()[1].get_text() == 'B'\n self.plt.close(fig)\n\n fig, ax = self.plt.subplots()\n df.plot(kind='bar', secondary_y=['A'], mark_right=False, ax=ax)\n leg = ax.get_legend()\n assert leg.get_texts()[0].get_text() == 'A'\n assert leg.get_texts()[1].get_text() == 'B'\n self.plt.close(fig)\n\n fig = self.plt.figure()\n ax = fig.add_subplot(211)\n df = tm.makeTimeDataFrame()\n ax = df.plot(secondary_y=['C', 'D'], ax=ax)\n leg = ax.get_legend()\n assert len(leg.get_lines()) == 4\n assert ax.right_ax.get_legend() is None\n colors = set()\n for line in leg.get_lines():\n colors.add(line.get_color())\n\n # TODO: color cycle problems\n assert len(colors) == 4\n self.plt.close(fig)\n\n # non-ts\n df = tm.makeDataFrame()\n fig = self.plt.figure()\n ax = fig.add_subplot(211)\n ax = df.plot(secondary_y=['A', 'B'], ax=ax)\n leg = ax.get_legend()\n assert len(leg.get_lines()) == 4\n assert ax.right_ax.get_legend() is None\n colors = set()\n for line in leg.get_lines():\n colors.add(line.get_color())\n\n # TODO: color cycle problems\n assert len(colors) == 4\n self.plt.close()\n\n fig = self.plt.figure()\n ax = fig.add_subplot(211)\n ax = df.plot(secondary_y=['C', 'D'], ax=ax)\n leg = ax.get_legend()\n assert len(leg.get_lines()) == 4\n assert ax.right_ax.get_legend() is None\n colors = set()\n for line in leg.get_lines():\n colors.add(line.get_color())\n\n # TODO: color cycle problems\n assert len(colors) == 4\n\n def test_format_date_axis(self):\n rng = date_range('1/1/2012', periods=12, freq='M')\n df = DataFrame(np.random.randn(len(rng), 3), rng)\n _, ax = self.plt.subplots()\n ax = df.plot(ax=ax)\n xaxis = ax.get_xaxis()\n for l in xaxis.get_ticklabels():\n if len(l.get_text()) > 0:\n assert l.get_rotation() == 30\n\n @pytest.mark.slow\n def test_ax_plot(self):\n x = DatetimeIndex(start='2012-01-02', periods=10, freq='D')\n y = lrange(len(x))\n _, ax = self.plt.subplots()\n lines = ax.plot(x, y, label='Y')\n tm.assert_index_equal(DatetimeIndex(lines[0].get_xdata()), x)\n\n @pytest.mark.slow\n def test_mpl_nopandas(self):\n dates = [date(2008, 12, 31), date(2009, 1, 31)]\n values1 = np.arange(10.0, 11.0, 0.5)\n values2 = np.arange(11.0, 12.0, 0.5)\n\n kw = dict(fmt='-', lw=4)\n\n _, ax = self.plt.subplots()\n ax.plot_date([x.toordinal() for x in dates], values1, **kw)\n ax.plot_date([x.toordinal() for x in dates], values2, **kw)\n\n line1, line2 = ax.get_lines()\n\n exp = np.array([x.toordinal() for x in dates], dtype=np.float64)\n tm.assert_numpy_array_equal(line1.get_xydata()[:, 0], exp)\n exp = np.array([x.toordinal() for x in dates], dtype=np.float64)\n tm.assert_numpy_array_equal(line2.get_xydata()[:, 0], exp)\n\n @pytest.mark.slow\n def test_irregular_ts_shared_ax_xlim(self):\n # GH 2960\n ts = tm.makeTimeSeries()[:20]\n ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]\n\n # plot the left section of the irregular series, then the right section\n _, ax = self.plt.subplots()\n ts_irregular[:5].plot(ax=ax)\n ts_irregular[5:].plot(ax=ax)\n\n # check that axis limits are correct\n left, right = ax.get_xlim()\n assert left <= ts_irregular.index.min().toordinal()\n assert right >= ts_irregular.index.max().toordinal()\n\n @pytest.mark.slow\n def test_secondary_y_non_ts_xlim(self):\n # GH 3490 - non-timeseries with secondary y\n index_1 = [1, 2, 3, 4]\n index_2 = [5, 6, 7, 8]\n s1 = Series(1, index=index_1)\n s2 = Series(2, index=index_2)\n\n _, ax = self.plt.subplots()\n s1.plot(ax=ax)\n left_before, right_before = ax.get_xlim()\n s2.plot(secondary_y=True, ax=ax)\n left_after, right_after = ax.get_xlim()\n\n assert left_before >= left_after\n assert right_before < right_after\n\n @pytest.mark.slow\n def test_secondary_y_regular_ts_xlim(self):\n # GH 3490 - regular-timeseries with secondary y\n index_1 = date_range(start='2000-01-01', periods=4, freq='D')\n index_2 = date_range(start='2000-01-05', periods=4, freq='D')\n s1 = Series(1, index=index_1)\n s2 = Series(2, index=index_2)\n\n _, ax = self.plt.subplots()\n s1.plot(ax=ax)\n left_before, right_before = ax.get_xlim()\n s2.plot(secondary_y=True, ax=ax)\n left_after, right_after = ax.get_xlim()\n\n assert left_before >= left_after\n assert right_before < right_after\n\n @pytest.mark.slow\n def test_secondary_y_mixed_freq_ts_xlim(self):\n # GH 3490 - mixed frequency timeseries with secondary y\n rng = date_range('2000-01-01', periods=10000, freq='min')\n ts = Series(1, index=rng)\n\n _, ax = self.plt.subplots()\n ts.plot(ax=ax)\n left_before, right_before = ax.get_xlim()\n ts.resample('D').mean().plot(secondary_y=True, ax=ax)\n left_after, right_after = ax.get_xlim()\n\n # a downsample should not have changed either limit\n assert left_before == left_after\n assert right_before == right_after\n\n @pytest.mark.slow\n def test_secondary_y_irregular_ts_xlim(self):\n # GH 3490 - irregular-timeseries with secondary y\n ts = tm.makeTimeSeries()[:20]\n ts_irregular = ts[[1, 4, 5, 6, 8, 9, 10, 12, 13, 14, 15, 17, 18]]\n\n _, ax = self.plt.subplots()\n ts_irregular[:5].plot(ax=ax)\n # plot higher-x values on secondary axis\n ts_irregular[5:].plot(secondary_y=True, ax=ax)\n # ensure secondary limits aren't overwritten by plot on primary\n ts_irregular[:5].plot(ax=ax)\n\n left, right = ax.get_xlim()\n assert left <= ts_irregular.index.min().toordinal()\n assert right >= ts_irregular.index.max().toordinal()\n\n def test_plot_outofbounds_datetime(self):\n # 2579 - checking this does not raise\n values = [date(1677, 1, 1), date(1677, 1, 2)]\n _, ax = self.plt.subplots()\n ax.plot(values)\n\n values = [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)]\n ax.plot(values)\n\n def test_format_timedelta_ticks_narrow(self):\n if is_platform_mac():\n pytest.skip(\"skip on mac for precision display issue on older mpl\")\n\n if self.mpl_ge_2_0_0:\n expected_labels = [''] + [\n '00:00:00.00000000{:d}'.format(2 * i)\n for i in range(5)] + ['']\n else:\n expected_labels = [\n '00:00:00.00000000{:d}'.format(i)\n for i in range(10)]\n\n rng = timedelta_range('0', periods=10, freq='ns')\n df = DataFrame(np.random.randn(len(rng), 3), rng)\n fig, ax = self.plt.subplots()\n df.plot(fontsize=2, ax=ax)\n fig.canvas.draw()\n labels = ax.get_xticklabels()\n assert len(labels) == len(expected_labels)\n for l, l_expected in zip(labels, expected_labels):\n assert l.get_text() == l_expected\n\n def test_format_timedelta_ticks_wide(self):\n if is_platform_mac():\n pytest.skip(\"skip on mac for precision display issue on older mpl\")\n\n if self.mpl_ge_2_0_0:\n expected_labels = [\n '',\n '00:00:00',\n '1 days 03:46:40',\n '2 days 07:33:20',\n '3 days 11:20:00',\n '4 days 15:06:40',\n '5 days 18:53:20',\n '6 days 22:40:00',\n '8 days 02:26:40',\n '9 days 06:13:20',\n ''\n ]\n else:\n expected_labels = [\n '00:00:00',\n '1 days 03:46:40',\n '2 days 07:33:20',\n '3 days 11:20:00',\n '4 days 15:06:40',\n '5 days 18:53:20',\n '6 days 22:40:00',\n '8 days 02:26:40',\n ''\n ]\n\n rng = timedelta_range('0', periods=10, freq='1 d')\n df = DataFrame(np.random.randn(len(rng), 3), rng)\n fig, ax = self.plt.subplots()\n ax = df.plot(fontsize=2, ax=ax)\n fig.canvas.draw()\n labels = ax.get_xticklabels()\n assert len(labels) == len(expected_labels)\n for l, l_expected in zip(labels, expected_labels):\n assert l.get_text() == l_expected\n\n def test_timedelta_plot(self):\n # test issue #8711\n s = Series(range(5), timedelta_range('1day', periods=5))\n _, ax = self.plt.subplots()\n _check_plot_works(s.plot, ax=ax)\n\n # test long period\n index = timedelta_range('1 day 2 hr 30 min 10 s',\n periods=10, freq='1 d')\n s = Series(np.random.randn(len(index)), index)\n _, ax = self.plt.subplots()\n _check_plot_works(s.plot, ax=ax)\n\n # test short period\n index = timedelta_range('1 day 2 hr 30 min 10 s',\n periods=10, freq='1 ns')\n s = Series(np.random.randn(len(index)), index)\n _, ax = self.plt.subplots()\n _check_plot_works(s.plot, ax=ax)\n\n def test_hist(self):\n # https://github.com/matplotlib/matplotlib/issues/8459\n rng = date_range('1/1/2011', periods=10, freq='H')\n x = rng\n w1 = np.arange(0, 1, .1)\n w2 = np.arange(0, 1, .1)[::-1]\n _, ax = self.plt.subplots()\n ax.hist([x, x], weights=[w1, w2])\n\n @pytest.mark.slow\n def test_overlapping_datetime(self):\n # GB 6608\n s1 = Series([1, 2, 3], index=[datetime(1995, 12, 31),\n datetime(2000, 12, 31),\n datetime(2005, 12, 31)])\n s2 = Series([1, 2, 3], index=[datetime(1997, 12, 31),\n datetime(2003, 12, 31),\n datetime(2008, 12, 31)])\n\n # plot first series, then add the second series to those axes,\n # then try adding the first series again\n _, ax = self.plt.subplots()\n s1.plot(ax=ax)\n s2.plot(ax=ax)\n s1.plot(ax=ax)\n\n @pytest.mark.xfail(reason=\"GH9053 matplotlib does not use\"\n \" ax.xaxis.converter\")\n def test_add_matplotlib_datetime64(self):\n # GH9053 - ensure that a plot with PeriodConverter still understands\n # datetime64 data. This still fails because matplotlib overrides the\n # ax.xaxis.converter with a DatetimeConverter\n s = Series(np.random.randn(10),\n index=date_range('1970-01-02', periods=10))\n ax = s.plot()\n ax.plot(s.index, s.values, color='g')\n l1, l2 = ax.lines\n tm.assert_numpy_array_equal(l1.get_xydata(), l2.get_xydata())\n\n\ndef _check_plot_works(f, freq=None, series=None, *args, **kwargs):\n import matplotlib.pyplot as plt\n\n fig = plt.gcf()\n\n try:\n plt.clf()\n ax = fig.add_subplot(211)\n orig_ax = kwargs.pop('ax', plt.gca())\n orig_axfreq = getattr(orig_ax, 'freq', None)\n\n ret = f(*args, **kwargs)\n assert ret is not None # do something more intelligent\n\n ax = kwargs.pop('ax', plt.gca())\n if series is not None:\n dfreq = series.index.freq\n if isinstance(dfreq, DateOffset):\n dfreq = dfreq.rule_code\n if orig_axfreq is None:\n assert ax.freq == dfreq\n\n if freq is not None and orig_axfreq is None:\n assert ax.freq == freq\n\n ax = fig.add_subplot(212)\n try:\n kwargs['ax'] = ax\n ret = f(*args, **kwargs)\n assert ret is not None # do something more intelligent\n except Exception:\n pass\n\n with ensure_clean(return_filelike=True) as path:\n plt.savefig(path)\n\n # GH18439\n # this is supported only in Python 3 pickle since\n # pickle in Python2 doesn't support instancemethod pickling\n if PY3:\n with ensure_clean(return_filelike=True) as path:\n pickle.dump(fig, path)\n finally:\n plt.close(fig)\n" ]
[ [ "pandas.util.testing.ensure_clean", "pandas.Series", "numpy.asarray", "pandas.DataFrame", "pandas.tests.plotting.common.TestPlotBase.setup_method", "pandas.core.indexes.period.Period", "pandas.core.indexes.datetimes.date_range", "numpy.random.randn", "pandas.core.indexes.timedeltas.timedelta_range", "pandas.tseries.plotting.tsplot", "pandas.util.testing.makeDataFrame", "pandas.core.indexes.period.PeriodIndex", "numpy.random.randint", "pandas.util.testing.makeTimeDataFrame", "matplotlib.pyplot.gca", "pandas.plotting._converter._from_ordinal", "pandas.util.testing.assert_numpy_array_equal", "numpy.arange", "pandas.core.indexes.period.period_range", "pandas.util.testing.assert_series_equal", "pandas.Index", "matplotlib.pyplot.gcf", "pandas.util.testing.makePeriodSeries", "matplotlib.pyplot.close", "pandas.compat.is_platform_mac", "pandas.util.testing.close", "pandas.core.resample.DatetimeIndex", "matplotlib.pyplot.savefig", "pandas.plotting._converter.get_datevalue", "numpy.array", "pandas.plotting._converter.get_finder", "pandas.util.testing.makeTimeSeries", "matplotlib.pyplot.clf", "pandas.compat.zip", "pandas.core.indexes.datetimes.bdate_range", "pandas.tests.plotting.common._skip_if_no_scipy_gaussian_kde", "numpy.fabs" ] ]
GillianGrayson/ipAGE
[ "7049cc37da98acf47ebf56876e8b49ae69996ecc" ]
[ "statistics.py" ]
[ "import pandas as pd\nfrom scipy import stats\n\n\ndef perform_statistical_analysis():\n path = f'data/' # Path to the directory with data table\n table_name = f'all_features' # Name of xlsx table with all features\n features_file = 'biomarkers' # Name of file with features, which will use to build the clock\n\n # Status column\n status_col_name = 'Group'\n status_options = ['Control', 'ESRD']\n\n # Select here features for statistical analysis\n regression_features = ['Age', 'DNAmAgeHannum', 'DNAmAge', 'DNAmPhenoAge', 'DNAmGrimAge']\n\n df_global = pd.read_excel(f'{path}/{table_name}.xlsx')\n\n with open(f'{path}/{features_file}.txt') as f:\n target_features = f.read().splitlines()\n\n # Create a dictionary with statistical results\n res_dict = {'metric': target_features}\n res_dict['mw_p_value'] = [] # Mann-Whitney p-values\n for a in regression_features:\n # Pearson's correlation for controls\n res_dict[f'{a}_pearson_r_C'] = []\n res_dict[f'{a}_pearson_p_value_C'] = []\n # Pearson's correlation for subjects with the ESRD\n res_dict[f'{a}_pearson_r_T'] = []\n res_dict[f'{a}_pearson_p_value_T'] = []\n\n for m_id, m in enumerate(target_features):\n\n # Associations with the status\n test_data = {}\n pb_x = {}\n for g_id, g in enumerate(status_options):\n test_data[g] = df_global.loc[df_global[status_col_name] == g][m].to_list()\n pb_x[g] = [g_id] * len(test_data[g])\n\n _, mw_p_value = stats.mannwhitneyu(\n test_data[status_options[0]],\n test_data[status_options[1]],\n alternative='two-sided'\n )\n res_dict['mw_p_value'].append(mw_p_value)\n\n # Correlations in Controls\n df_control = df_global.loc[df_global[status_col_name] == 'Control']\n for a in regression_features:\n pearson_r, pearson_p_value = stats.pearsonr(df_control[m].to_list(), df_control[a].to_list())\n res_dict[f'{a}_pearson_r_C'].append(pearson_r)\n res_dict[f'{a}_pearson_p_value_C'].append(pearson_p_value)\n\n # Correlations in ESRD group\n df_disease = df_global.loc[df_global[status_col_name] == 'ESRD']\n for a in regression_features:\n pearson_r, pearson_p_value = stats.pearsonr(df_disease[m].to_list(), df_disease[a].to_list())\n res_dict[f'{a}_pearson_r_T'].append(pearson_r)\n res_dict[f'{a}_pearson_p_value_T'].append(pearson_p_value)\n\n results_df = pd.DataFrame(res_dict)\n fn_save = f\"{path}/statistics.xlsx\"\n results_df.to_excel(fn_save, index=False)\n\n\nif __name__ == \"__main__\":\n perform_statistical_analysis()" ]
[ [ "scipy.stats.mannwhitneyu", "pandas.read_excel", "pandas.DataFrame" ] ]
avagreeen/KittiBox
[ "1dfa30352801277dc67d22953cdf87e40c1893e0" ]
[ "inputs/daimler_input.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport json\nimport logging\nimport os\nimport sys\nimport random\nfrom random import shuffle\n\nimport numpy as np\n\nimport scipy as scp\nimport scipy.misc\n\nfrom scipy.misc import imread, imresize\n\nimport tensorflow as tf\n\nfrom utils.data_utils import (annotation_jitter, annotation_to_h5)\nfrom utils.annolist import AnnotationLib as AnnoLib\nfrom utils.rect import Rect\n\nimport threading\n\nfrom collections import namedtuple\nfake_anno = namedtuple('fake_anno_object', ['rects'])\n\n\ndef read_daimler_anno(label_file, detect_truck):\n \"\"\" Reads a daimler cyclist annotation file.\n\n Args:\n label_file: Path to file\n\n Returns:\n Lists of rectangels: .\n \"\"\"\n label_data =json.load(open(label_file))\n labels = [item for item in label_data['children']]\n rect_list = []\n for label in labels:\n if not (label['identity'] == 'pedestrian' or label['identity'] == 'cyclist' or\n label['identity'] == 'wheelchairuser' or label['identity'] == 'motorcyclist' or\n label['identity']== 'tricyclist' or label['identity'] == 'mopedrider'):\n continue\n notruck = not detect_truck\n if notruck and label['identity'] == 'Truck':\n continue\n if label['identity'] == 'DontCare':\n class_id = -1\n else:\n class_id = 1\n object_rect = AnnoLib.AnnoRect(\n x1=float(label['mincol']), y1=float(label['minrow']),\n x2=float(label['maxcol']), y2=float(label['maxrow']))\n assert object_rect.x1 < object_rect.x2\n assert object_rect.y1 < object_rect.y2\n object_rect.classID = class_id\n rect_list.append(object_rect)\n\n return rect_list\n\n\n\ndef _rescale_boxes(current_shape, anno, target_height, target_width):\n x_scale = target_width / float(current_shape[1])\n y_scale = target_height / float(current_shape[0])\n for r in anno.rects:\n assert r.x1 < r.x2\n r.x1 *= x_scale\n r.x2 *= x_scale\n assert r.x1 < r.x2\n r.y1 *= y_scale\n r.y2 *= y_scale\n return anno\n\n\ndef _generate_mask(hypes, ignore_rects):\n\n width = hypes[\"image_width\"]\n height = hypes[\"image_height\"]\n grid_width = hypes[\"grid_width\"]\n grid_height = hypes[\"grid_height\"]\n\n mask = np.ones([grid_height, grid_width])\n\n if not hypes['use_mask']:\n return mask\n\n for rect in ignore_rects:\n left = int((rect.x1+2)/width*grid_width)\n right = int((rect.x2-2)/width*grid_width)\n top = int((rect.y1+2)/height*grid_height)\n bottom = int((rect.y2-2)/height*grid_height)\n for x in range(left, right+1):\n for y in range(top, bottom+1):\n mask[y, x] = 0\n\n return mask\n\n\ndef _load_daimler_txt(daimler_txt, hypes, jitter=False, random_shuffel=True):\n base_path = os.path.realpath(os.path.dirname(daimler_txt))\n gt_image_path = 'labelData/train/tsinghuaDaimlerDataset'\n image_path = 'leftImg8bit/train/tsinghuaDaimlerDataset'\n files = [line.rstrip() for line in open(daimler_txt)]\n if hypes['data']['truncate_data']:\n files = files[:10]\n random.seed(0)\n for epoch in itertools.count():\n if random_shuffel:\n random.shuffle(files)\n for file in files:\n image_file, gt_image_file = file.split(\" \")\n image_file = os.path.join(base_path, image_path , image_file)\n assert os.path.exists(image_file), \\\n \"File does not exist: %s\" % image_file\n gt_image_file = os.path.join(base_path, gt_image_path ,gt_image_file)\n assert os.path.exists(gt_image_file), \\\n \"File does not exist: %s\" % gt_image_file\n\n rect_list = read_daimler_anno(gt_image_file,\n detect_truck=hypes['detect_truck'])\n\n anno = AnnoLib.Annotation()\n anno.rects = rect_list\n\n im = scipy.misc.imread(image_file)\n if im.shape[2] == 4:\n im = im[:, :, :3]\n if im.shape[0] != hypes[\"image_height\"] or \\\n im.shape[1] != hypes[\"image_width\"]:\n if True:\n anno = _rescale_boxes(im.shape, anno,\n hypes[\"image_height\"],\n hypes[\"image_width\"])\n im = scipy.misc.imresize(\n im, (hypes[\"image_height\"], hypes[\"image_width\"]),\n interp='cubic')\n\n if jitter:\n jitter_scale_min = 0.9\n jitter_scale_max = 1.1\n jitter_offset = 16\n im, anno = annotation_jitter(\n im, anno, target_width=hypes[\"image_width\"],\n target_height=hypes[\"image_height\"],\n jitter_scale_min=jitter_scale_min,\n jitter_scale_max=jitter_scale_max,\n jitter_offset=jitter_offset)\n\n pos_list = [rect for rect in anno.rects if rect.classID == 1]\n pos_anno = fake_anno(pos_list)\n\n boxes, confs = annotation_to_h5(hypes,\n pos_anno,\n hypes[\"grid_width\"],\n hypes[\"grid_height\"],\n hypes[\"rnn_len\"])\n\n mask_list = [rect for rect in anno.rects if rect.classID == -1]\n mask = _generate_mask(hypes, mask_list)\n\n boxes = boxes.reshape([hypes[\"grid_height\"],\n hypes[\"grid_width\"], 4])\n confs = confs.reshape(hypes[\"grid_height\"], hypes[\"grid_width\"])\n\n yield {\"image\": im, \"boxes\": boxes, \"confs\": confs,\n \"rects\": pos_list, \"mask\": mask}\n\n\ndef _make_sparse(n, d):\n v = np.zeros((d,), dtype=np.float32)\n v[n] = 1.\n return v\n\n\ndef create_queues(hypes, phase):\n \"\"\"Create Queues.\"\"\"\n hypes[\"rnn_len\"] = 1\n dtypes = [tf.float32, tf.float32, tf.float32, tf.float32]\n grid_size = hypes['grid_width'] * hypes['grid_height']\n shapes = ([hypes['image_height'], hypes['image_width'], 3],\n [hypes['grid_height'], hypes['grid_width']],\n [hypes['grid_height'], hypes['grid_width'], 4],\n [hypes['grid_height'], hypes['grid_width']])\n capacity = 30\n q = tf.FIFOQueue(capacity=capacity, dtypes=dtypes, shapes=shapes)\n return q\n\n\ndef _processe_image(hypes, image):\n # Because these operations are not commutative, consider randomizing\n # randomize the order their operation.\n augment_level = hypes['augment_level']\n if augment_level > 0:\n image = tf.image.random_brightness(image, max_delta=30)\n image = tf.image.random_contrast(image, lower=0.75, upper=1.25)\n if augment_level > 1:\n image = tf.image.random_saturation(image, lower=0.5, upper=1.6)\n image = tf.image.random_hue(image, max_delta=0.15)\n\n image = tf.minimum(image, 255.0)\n image = tf.maximum(image, 0)\n\n return image\n\n\ndef start_enqueuing_threads(hypes, q, phase, sess):\n \"\"\"Start enqueuing threads.\"\"\"\n print(\"START ENQUING\")\n # Creating Placeholder for the Queue\n x_in = tf.placeholder(tf.float32)\n confs_in = tf.placeholder(tf.float32)\n boxes_in = tf.placeholder(tf.float32)\n mask_in = tf.placeholder(tf.float32)\n\n # Creating Enqueue OP\n enqueue_op = q.enqueue((x_in, confs_in, boxes_in, mask_in))\n\n def make_feed(data):\n return {x_in: data['image'],\n confs_in: data['confs'],\n boxes_in: data['boxes'],\n mask_in: data['mask']}\n\n def thread_loop(sess, enqueue_op, gen):\n for d in gen:\n sess.run(enqueue_op, feed_dict=make_feed(d))\n\n data_file = hypes[\"data\"]['%s_file' % phase]\n data_dir = hypes['dirs']['data_dir']\n data_file = os.path.join(data_dir, data_file)\n print(\"-----------------creating generator object----------------\")\n gen = _load_daimler_txt(data_file, hypes,\n jitter={'train': hypes['solver']['use_jitter'],\n 'val': False}[phase])\n data = gen.next()\n print(\"-----------------fetched data--------------------------\")\n sess.run(enqueue_op, feed_dict=make_feed(data))\n t = threading.Thread(target=thread_loop,\n args=(sess, enqueue_op, gen))\n t.daemon = True\n t.start()\n\n\n# comment: NOT USED ANYWHERE\n# def test_new_kitti():\n# idlfile = \"/home/mifs/mttt2/cvfs/DATA/KittiBox/train_3.idl\"\n# kitti_txt = \"/home/mifs/mttt2/cvfs/DATA/KittiBox/train.txt\"\n\n# with open('hypes/daimlerBox.json', 'r') as f:\n# logging.info(\"f: %s\", f)\n# hypes = json.load(f)\n\n# hypes[\"rnn_len\"] = 1\n# hypes[\"image_height\"] = 200\n# hypes[\"image_width\"] = 800\n\n# gen1 = _load_daimler_txt(kitti_txt, hypes, random_shuffel=False)\n# gen2 = _load_daimler_txt(idlfile, hypes, random_shuffel=False)\n\n# print('testing generators')\n\n# for i in range(20):\n# data1 = gen1.next()\n# data2 = gen2.next()\n# rects1 = data1['rects']\n# rects2 = data2['rects']\n\n# assert len(rects1) <= len(rects2)\n\n# if not len(rects1) == len(rects2):\n# print('ignoring flags')\n# continue\n# else:\n# print('comparing flags')\n# assert(np.all(data1['image'] == data2['image']))\n# # assert(np.all(data1['boxes'] == data2['boxes']))\n# if np.all(data1['flags'] == data2['flags']):\n# print('same')\n# else:\n# print('diff')\n\n\ndef inputs(hypes, q, phase):\n\n if phase == 'val':\n image, confidences, boxes, mask = q.dequeue()\n image = tf.expand_dims(image, 0)\n confidences = tf.expand_dims(confidences, 0)\n boxes = tf.expand_dims(boxes, 0)\n mask = tf.expand_dims(mask, 0)\n return image, (confidences, boxes, mask)\n elif phase == 'train':\n image, confidences, boxes, mask = q.dequeue_many(hypes['batch_size'])\n image = _processe_image(hypes, image)\n return image, (confidences, boxes, mask)\n else:\n assert(\"Bad phase: {}\".format(phase))\n" ]
[ [ "tensorflow.image.random_brightness", "tensorflow.image.random_contrast", "tensorflow.FIFOQueue", "tensorflow.image.random_hue", "tensorflow.maximum", "tensorflow.minimum", "tensorflow.placeholder", "tensorflow.expand_dims", "numpy.ones", "tensorflow.image.random_saturation", "numpy.zeros" ] ]
g0lemXIV/harmfulness_app
[ "bd73c8bcac2b0da941590c5632900ea98dc52d3e" ]
[ "backend/models/predict.py" ]
[ "import numpy as np\nfrom langdetect import detect\nfrom backend.models import model_lib\nfrom backend.schemas import TextPredict, TextCreate\nfrom backend.core.messages import NO_VALID_PAYLOAD, NO_VALID_LANGUAGE, NO_VALID_SENTENCE\nfrom backend.data import parse_text\n\n\nclass Predictor:\n \"\"\"Base class for generate prediction using sklearn models,\n in the next version it should be change to child class,\n and codding with some patterns.\"\"\"\n\n def __init__(self, model_name: str, language=\"pl\", min_length: int = 10) -> None:\n \"\"\"\n :param model_name: name of the model loaded into model_lib in memory dict\n :param language: language to use\n :param min_length: minimum length of preprocessed text\n \"\"\"\n self.model_name = model_name\n self.model = model_lib[model_name]\n self.language = language\n self.min_length = min_length\n\n def __str__(self):\n return self.__class__.__name__\n\n def predict(self, payload: TextCreate) -> TextPredict:\n \"\"\"Function for predict new text data used in api.\n\n :param payload: TextCreate object created with pydantic\n :return:\n return pydantic TextPredict object with prediction,\n prediction probabilities, text, processed text, and\n additional information.\n \"\"\"\n if payload is None:\n raise ValueError(NO_VALID_PAYLOAD.format(payload))\n detect_lang = detect(payload.text)\n if (detect_lang != self.language) or (payload.language != self.language):\n raise ValueError(NO_VALID_LANGUAGE.format(payload.language, detect_lang))\n # parse text\n text = parse_text(text=payload.text, language=payload.language)\n if not text or len(text) < self.min_length:\n raise ValueError(NO_VALID_SENTENCE.format(text, self.min_length, len(text)))\n # score model\n score = self.model.predict_proba([text])[0]\n score = np.around(score, decimals=4)\n prediction = TextPredict(\n prediction=np.argmax(score),\n prediction_proba=list(score),\n user_id=payload.user_id,\n time_utc=payload.time_utc,\n language=payload.language,\n text=payload.text,\n text_tokenized=text,\n )\n return prediction\n" ]
[ [ "numpy.around", "numpy.argmax" ] ]
Jopplk/trumpRallies
[ "a4ad985354192a9880c5e55068fb7d7eca3738ee" ]
[ "geopandasMap.py" ]
[ "# Creates US state choropleth map with geopandas\nimport geopandas as gpd\nimport pandas as pd\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\n\n# Getting rally data\ndef agg(dfFile):\n data = pd.read_csv(dfFile)\n aggData = data.groupby('State')['Venue'].count()\n\n return data, aggData\n\n\ndata2016, data2016agg = agg('data/2016Campaign/data2016.csv')\ndataPost, dataPostagg = agg('data/postElection/dataPost.csv')\n\nbothAgg = data2016agg.combine(\n dataPostagg, lambda x, y: x + y, fill_value=0)\n\n# Converting rally coords to geopandas dataframe\nbothData = data2016.append(dataPost)\ngeoRallyPoints = gpd.GeoDataFrame(\n bothData,\n geometry=gpd.points_from_xy(bothData.longitude, bothData.latitude))\n\n# Getting and cleaning map shape data\npath = 'data/us-states.json'\nmapShape = gpd.read_file(path)\n\nmapShape = mapShape.set_index('name')\nmapShape = mapShape.drop(['AK', 'HI', 'PR'])\n\n# Combining data\nmapShape['occurances'] = bothAgg\n#mapShape = mapShape.fillna(0)\n\n\n### Begin Map plotting ###\n# figsize=(x_inches, y_inches)\n# DPI will control final export resolution\n\n# Style Declerations\nplt.rcParams[\"font.family\"] = \"Nirmala UI\"\ntextcolor = '2e2e2e'\nplt.rcParams['text.color'] = textcolor\nplt.rcParams['axes.labelcolor'] = textcolor\nplt.rcParams['xtick.color'] = textcolor\nplt.rcParams['ytick.color'] = textcolor\n\nfig, ax = plt.subplots(1, figsize=(8, 5))\n\nax.axis('off')\nax.set_aspect('equal')\nax.set_title('Number of Trump Rallies by State',\n fontsize=36)\n\n# Create colorbar\ndivider = make_axes_locatable(ax)\ncax = divider.append_axes(\"bottom\", size=\"3.5%\", pad=0.05)\n\nsegs = 8\ncolors = plt.cm.get_cmap('OrRd', segs)\n\nmappable = mpl.cm.ScalarMappable(cmap=colors)\nmappable.set_array([])\nmappable.set_clim(-0.5, segs + 0.5) # Controlls tick positioning\ncbar = fig.colorbar(mappable, cax=cax, orientation='horizontal')\n\ncbar.set_ticks(np.linspace(0, segs, segs))\ncbar.set_ticklabels([5, 10, 15, 20, 25, 30, 35, 40])\ncbar.ax.tick_params(labelsize=16)\n\n# Create plots\nmapShape.plot(\n missing_kwds={'color': 'lightgrey'},\n ax=ax,\n column='occurances',\n cmap=colors,\n linewidth=0.8,\n edgecolor='0.8')\n\ngeoRallyPoints.plot(\n ax=ax,\n alpha=.75,\n marker='o',\n color='grey',\n markersize=26)\n\nplt.show()\n\nfig.savefig(\"map.png\", dpi=300)\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.cm.get_cmap", "numpy.linspace", "matplotlib.pyplot.subplots", "matplotlib.cm.ScalarMappable", "matplotlib.pyplot.show" ] ]
YagoGG/pandas
[ "fc813e76e7d8863410aa4b1a581dfa777281271c" ]
[ "pandas/core/indexes/timedeltas.py" ]
[ "\"\"\" implement the TimedeltaIndex \"\"\"\n\nimport numpy as np\n\nfrom pandas._libs import NaT, Timedelta, index as libindex\nfrom pandas.util._decorators import Appender\n\nfrom pandas.core.dtypes.common import (\n _TD_DTYPE,\n is_float,\n is_integer,\n is_scalar,\n is_timedelta64_dtype,\n is_timedelta64_ns_dtype,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.missing import is_valid_nat_for_dtype\n\nfrom pandas.core.arrays import datetimelike as dtl\nfrom pandas.core.arrays.timedeltas import TimedeltaArray\nimport pandas.core.common as com\nfrom pandas.core.indexes.base import Index, InvalidIndexError, maybe_extract_name\nfrom pandas.core.indexes.datetimelike import (\n DatetimeIndexOpsMixin,\n DatetimeTimedeltaMixin,\n)\nfrom pandas.core.indexes.extension import inherit_names\n\nfrom pandas.tseries.frequencies import to_offset\n\n\n@inherit_names(\n [\n \"_box_values\",\n \"__neg__\",\n \"__pos__\",\n \"__abs__\",\n \"total_seconds\",\n \"round\",\n \"floor\",\n \"ceil\",\n ]\n + TimedeltaArray._field_ops,\n TimedeltaArray,\n wrap=True,\n)\n@inherit_names(\n [\n \"_bool_ops\",\n \"_object_ops\",\n \"_field_ops\",\n \"_datetimelike_ops\",\n \"_datetimelike_methods\",\n \"_other_ops\",\n \"components\",\n \"_box_func\",\n \"to_pytimedelta\",\n \"sum\",\n \"std\",\n \"median\",\n \"_format_native_types\",\n \"freq\",\n ],\n TimedeltaArray,\n)\nclass TimedeltaIndex(DatetimeTimedeltaMixin, dtl.TimelikeOps):\n \"\"\"\n Immutable ndarray of timedelta64 data, represented internally as int64, and\n which can be boxed to timedelta objects.\n\n Parameters\n ----------\n data : array-like (1-dimensional), optional\n Optional timedelta-like data to construct index with.\n unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional\n Which is an integer/float number.\n freq : str or pandas offset object, optional\n One of pandas date offset strings or corresponding objects. The string\n 'infer' can be passed in order to set the frequency of the index as the\n inferred frequency upon creation.\n copy : bool\n Make a copy of input ndarray.\n name : object\n Name to be stored in the index.\n\n Attributes\n ----------\n days\n seconds\n microseconds\n nanoseconds\n components\n inferred_freq\n\n Methods\n -------\n to_pytimedelta\n to_series\n round\n floor\n ceil\n to_frame\n mean\n\n See Also\n --------\n Index : The base pandas Index type.\n Timedelta : Represents a duration between two dates or times.\n DatetimeIndex : Index of datetime64 data.\n PeriodIndex : Index of Period data.\n timedelta_range : Create a fixed-frequency TimedeltaIndex.\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n \"\"\"\n\n _typ = \"timedeltaindex\"\n\n _engine_type = libindex.TimedeltaEngine\n\n _comparables = [\"name\", \"freq\"]\n _attributes = [\"name\", \"freq\"]\n _is_numeric_dtype = True\n _infer_as_myclass = True\n\n _data: TimedeltaArray\n\n # -------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n data=None,\n unit=None,\n freq=None,\n closed=None,\n dtype=_TD_DTYPE,\n copy=False,\n name=None,\n ):\n name = maybe_extract_name(name, data, cls)\n\n if is_scalar(data):\n raise TypeError(\n f\"{cls.__name__}() must be called with a \"\n f\"collection of some kind, {repr(data)} was passed\"\n )\n\n if unit in {\"Y\", \"y\", \"M\"}:\n raise ValueError(\n \"Units 'M' and 'Y' are no longer supported, as they do not \"\n \"represent unambiguous timedelta values durations.\"\n )\n\n if isinstance(data, TimedeltaArray) and freq is None:\n if copy:\n data = data.copy()\n return cls._simple_new(data, name=name, freq=freq)\n\n if isinstance(data, TimedeltaIndex) and freq is None and name is None:\n if copy:\n return data.copy()\n else:\n return data._shallow_copy()\n\n # - Cases checked above all return/raise before reaching here - #\n\n tdarr = TimedeltaArray._from_sequence(\n data, freq=freq, unit=unit, dtype=dtype, copy=copy\n )\n return cls._simple_new(tdarr, name=name)\n\n @classmethod\n def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE):\n # `dtype` is passed by _shallow_copy in corner cases, should always\n # be timedelta64[ns] if present\n\n if not isinstance(values, TimedeltaArray):\n values = TimedeltaArray._simple_new(values, dtype=dtype, freq=freq)\n else:\n if freq is None:\n freq = values.freq\n assert isinstance(values, TimedeltaArray), type(values)\n assert dtype == _TD_DTYPE, dtype\n assert values.dtype == \"m8[ns]\", values.dtype\n\n tdarr = TimedeltaArray._simple_new(values._data, freq=freq)\n result = object.__new__(cls)\n result._data = tdarr\n result._name = name\n # For groupby perf. See note in indexes/base about _index_data\n result._index_data = tdarr._data\n\n result._reset_identity()\n return result\n\n # -------------------------------------------------------------------\n # Rendering Methods\n\n @property\n def _formatter_func(self):\n from pandas.io.formats.format import _get_format_timedelta64\n\n return _get_format_timedelta64(self, box=True)\n\n # -------------------------------------------------------------------\n\n @Appender(Index.astype.__doc__)\n def astype(self, dtype, copy=True):\n dtype = pandas_dtype(dtype)\n if is_timedelta64_dtype(dtype) and not is_timedelta64_ns_dtype(dtype):\n # Have to repeat the check for 'timedelta64' (not ns) dtype\n # so that we can return a numeric index, since pandas will return\n # a TimedeltaIndex when dtype='timedelta'\n result = self._data.astype(dtype, copy=copy)\n if self.hasnans:\n return Index(result, name=self.name)\n return Index(result.astype(\"i8\"), name=self.name)\n return DatetimeIndexOpsMixin.astype(self, dtype, copy=copy)\n\n def _maybe_promote(self, other):\n if other.inferred_type == \"timedelta\":\n other = TimedeltaIndex(other)\n return self, other\n\n def get_value(self, series, key):\n \"\"\"\n Fast lookup of value from 1-dimensional ndarray. Only use this if you\n know what you're doing\n \"\"\"\n if is_integer(key):\n loc = key\n else:\n loc = self.get_loc(key)\n return self._get_values_for_loc(series, loc)\n\n def get_loc(self, key, method=None, tolerance=None):\n \"\"\"\n Get integer location for requested label\n\n Returns\n -------\n loc : int, slice, or ndarray[int]\n \"\"\"\n if not is_scalar(key):\n raise InvalidIndexError(key)\n\n if is_valid_nat_for_dtype(key, self.dtype):\n key = NaT\n\n elif isinstance(key, str):\n try:\n key = Timedelta(key)\n except ValueError:\n raise KeyError(key)\n\n elif isinstance(key, self._data._recognized_scalars) or key is NaT:\n key = Timedelta(key)\n\n else:\n raise KeyError(key)\n\n if tolerance is not None:\n # try converting tolerance now, so errors don't get swallowed by\n # the try/except clauses below\n tolerance = self._convert_tolerance(tolerance, np.asarray(key))\n\n return Index.get_loc(self, key, method, tolerance)\n\n def _maybe_cast_slice_bound(self, label, side: str, kind):\n \"\"\"\n If label is a string, cast it to timedelta according to resolution.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'loc', 'getitem'} or None\n\n Returns\n -------\n label : object\n \"\"\"\n assert kind in [\"loc\", \"getitem\", None]\n\n if isinstance(label, str):\n parsed = Timedelta(label)\n lbound = parsed.round(parsed.resolution_string)\n if side == \"left\":\n return lbound\n else:\n return lbound + to_offset(parsed.resolution_string) - Timedelta(1, \"ns\")\n elif is_integer(label) or is_float(label):\n self._invalid_indexer(\"slice\", label)\n\n return label\n\n def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):\n # TODO: Check for non-True use_lhs/use_rhs\n assert isinstance(key, str), type(key)\n # given a key, try to figure out a location for a partial slice\n raise NotImplementedError\n\n def is_type_compatible(self, typ) -> bool:\n return typ == self.inferred_type or typ == \"timedelta\"\n\n @property\n def inferred_type(self) -> str:\n return \"timedelta64\"\n\n\nTimedeltaIndex._add_logical_methods_disabled()\n\n\ndef timedelta_range(\n start=None, end=None, periods=None, freq=None, name=None, closed=None\n) -> TimedeltaIndex:\n \"\"\"\n Return a fixed frequency TimedeltaIndex, with day as the default\n frequency.\n\n Parameters\n ----------\n start : str or timedelta-like, default None\n Left bound for generating timedeltas.\n end : str or timedelta-like, default None\n Right bound for generating timedeltas.\n periods : int, default None\n Number of periods to generate.\n freq : str or DateOffset, default 'D'\n Frequency strings can have multiples, e.g. '5H'.\n name : str, default None\n Name of the resulting TimedeltaIndex.\n closed : str, default None\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None).\n\n Returns\n -------\n rng : TimedeltaIndex\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``TimedeltaIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end`` (closed on both sides).\n\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n\n >>> pd.timedelta_range(start='1 day', periods=4)\n TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq='D')\n\n The ``closed`` parameter specifies which endpoint is included. The default\n behavior is to include both endpoints.\n\n >>> pd.timedelta_range(start='1 day', periods=4, closed='right')\n TimedeltaIndex(['2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq='D')\n\n The ``freq`` parameter specifies the frequency of the TimedeltaIndex.\n Only fixed frequencies can be passed, non-fixed frequencies such as\n 'M' (month end) will raise.\n\n >>> pd.timedelta_range(start='1 day', end='2 days', freq='6H')\n TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',\n '1 days 18:00:00', '2 days 00:00:00'],\n dtype='timedelta64[ns]', freq='6H')\n\n Specify ``start``, ``end``, and ``periods``; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.timedelta_range(start='1 day', end='5 days', periods=4)\n TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',\n '5 days 00:00:00'],\n dtype='timedelta64[ns]', freq=None)\n \"\"\"\n if freq is None and com.any_none(periods, start, end):\n freq = \"D\"\n\n freq, freq_infer = dtl.maybe_infer_freq(freq)\n tdarr = TimedeltaArray._generate_range(start, end, periods, freq, closed=closed)\n return TimedeltaIndex._simple_new(tdarr, name=name)\n" ]
[ [ "pandas.tseries.frequencies.to_offset", "numpy.asarray", "pandas.core.indexes.datetimelike.DatetimeIndexOpsMixin.astype", "pandas.core.indexes.base.Index", "pandas.core.arrays.timedeltas.TimedeltaArray._simple_new", "pandas.core.dtypes.common.is_timedelta64_ns_dtype", "pandas.core.indexes.extension.inherit_names", "pandas.core.arrays.timedeltas.TimedeltaArray._generate_range", "pandas.core.common.any_none", "pandas.core.indexes.base.maybe_extract_name", "pandas.core.dtypes.common.is_float", "pandas.core.arrays.timedeltas.TimedeltaArray._from_sequence", "pandas.core.arrays.datetimelike.maybe_infer_freq", "pandas.io.formats.format._get_format_timedelta64", "pandas.util._decorators.Appender", "pandas.core.dtypes.common.pandas_dtype", "pandas.core.dtypes.common.is_timedelta64_dtype", "pandas.core.dtypes.missing.is_valid_nat_for_dtype", "pandas.core.dtypes.common.is_scalar", "pandas.core.dtypes.common.is_integer", "pandas.core.indexes.base.Index.get_loc", "pandas.core.indexes.base.InvalidIndexError", "pandas._libs.Timedelta" ] ]
jackalhan/trax
[ "bbabf6cc8a0682218927080bce33a4f90591aa0b" ]
[ "trax/rl/envs/async_trajectory_collector.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A trajectory collector that polls on policy files and keeps collecting trajectories.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport multiprocessing\nimport os\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport gin\nimport jax\nfrom jax.config import config\nfrom tensor2tensor.envs import env_problem_utils\nfrom tensor2tensor.rl.google import atari_utils # GOOGLE-INTERNAL:\nimport tensorflow as tf\nfrom trax import rl # pylint: disable=unused-import\nfrom trax.rl import envs as rl_envs # pylint: disable=unused-import\nfrom trax.rl.envs import async_trajectory_collector_lib as async_lib\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_multi_string('config_file', None,\n 'Configuration file with parameters (.gin).')\nflags.DEFINE_multi_string('config', None,\n 'Configuration parameters (gin string).')\nflags.DEFINE_bool('use_tpu', False, \"Whether we're running on TPU.\")\nflags.DEFINE_bool('xm', False, 'Copy atari roms?')\n\nflags.DEFINE_bool(\n 'try_abort', True,\n 'Should we try to abort a trajectory collection if a newer '\n 'policy is available.')\n\nflags.DEFINE_string('output_dir', '', 'Output dir.')\nflags.DEFINE_string('envs_output_dir', '', 'Output dir for the envs.')\n\nflags.DEFINE_boolean(\n 'jax_debug_nans', False,\n 'Setting to true will help to debug nans and disable jit.')\nflags.DEFINE_boolean('disable_jit', False, 'Setting to true will disable jit.')\n\nflags.DEFINE_boolean('parallelize_envs', False,\n 'If true, sets parallelism to number of cpu cores.')\n\nflags.DEFINE_integer('replica', 0, 'Basically to append to trajectory name.')\nflags.DEFINE_bool('enable_eager_execution', False, '')\n\nflags.DEFINE_integer(\n 'max_trajectories_to_collect', -1,\n '-1 for infinite, otherwise whatever number was specified.')\n\n\n# TODO(afrozm): This code snippet is strewn across many places, unify it.\ndef initialize_gin():\n gin_configs = FLAGS.config or []\n gin.parse_config_files_and_bindings(FLAGS.config_file, gin_configs)\n\n\ndef get_output_dir():\n \"\"\"Return output_dir.\"\"\"\n output_dir = FLAGS.output_dir\n return output_dir\n\n\ndef update_jax_config():\n \"\"\"Update JAX config based on flags.\"\"\"\n\n if FLAGS.jax_debug_nans:\n config.update('jax_debug_nans', True)\n\n if FLAGS.use_tpu:\n config.update('jax_platform_name', 'tpu')\n else:\n config.update('jax_platform_name', 'gpu')\n\n\n@gin.configurable(blacklist=[\n 'output_dir',\n])\ndef create_envs_and_collect_trajectories(\n output_dir,\n env_name='OnlineTuneEnv-v0',\n max_timestep=None,\n clip_rewards=False,\n rendered_env=False,\n resize_dims=(105, 80),\n):\n \"\"\"Creates the envs and continuously collects trajectories.\"\"\"\n\n\n train_batch_size = 1\n eval_batch_size = 1\n\n # TODO(pkozakowski): Find a better way to determine this.\n train_env_kwargs = {}\n eval_env_kwargs = {}\n if 'OnlineTuneEnv' in env_name:\n envs_output_dir = FLAGS.envs_output_dir or os.path.join(output_dir, 'envs')\n train_env_output_dir = os.path.join(envs_output_dir, 'train')\n eval_env_output_dir = os.path.join(envs_output_dir, 'eval')\n train_env_kwargs = {'output_dir': train_env_output_dir}\n eval_env_kwargs = {'output_dir': eval_env_output_dir}\n\n parallelism = multiprocessing.cpu_count() if FLAGS.parallelize_envs else 1\n train_parallelism = min(train_batch_size, parallelism)\n eval_parallelism = min(eval_batch_size, parallelism)\n\n train_env = env_problem_utils.make_env(\n batch_size=train_batch_size,\n env_problem_name=env_name,\n resize=rendered_env,\n resize_dims=resize_dims,\n max_timestep=max_timestep,\n clip_rewards=clip_rewards,\n parallelism=train_parallelism,\n use_tpu=FLAGS.use_tpu,\n **train_env_kwargs)\n assert train_env\n\n eval_env = env_problem_utils.make_env(\n batch_size=eval_batch_size,\n env_problem_name=env_name,\n resize=rendered_env,\n resize_dims=resize_dims,\n max_timestep=max_timestep,\n clip_rewards=clip_rewards,\n parallelism=eval_parallelism,\n use_tpu=FLAGS.use_tpu,\n **eval_env_kwargs)\n assert eval_env\n\n def run_collect_loop():\n async_lib.continuously_collect_trajectories(\n output_dir,\n train_env,\n eval_env,\n trajectory_dump_dir=None,\n env_id=FLAGS.replica,\n try_abort=FLAGS.try_abort,\n max_trajectories_to_collect=(None\n if FLAGS.max_trajectories_to_collect < 0\n else FLAGS.max_trajectories_to_collect))\n\n if FLAGS.jax_debug_nans or FLAGS.disable_jit:\n with jax.disable_jit():\n run_collect_loop()\n else:\n run_collect_loop()\n\n\ndef main(argv):\n del argv\n\n if FLAGS.enable_eager_execution:\n tf.enable_eager_execution()\n\n logging.info('Initializing Gin.')\n initialize_gin()\n\n logging.info('Update JAX config.')\n update_jax_config()\n\n logging.info('Getting output_dir')\n output_dir = get_output_dir()\n logging.info('Got output_dir = %s', output_dir)\n\n logging.info('Starting Trajectory collection.')\n create_envs_and_collect_trajectories(output_dir)\n\n\nif __name__ == '__main__':\n app.run(main)\n" ]
[ [ "tensorflow.enable_eager_execution" ] ]
djones1040/PyPhot
[ "9b0d2c71b28f83abb73d021693f2c75309c9d710" ]
[ "PythonPhot/iterstat.py" ]
[ "#!/usr/bin/env python\n# D. Jones - 2/13/14\n\nimport numpy as np\n\ndef iterstat(d,startMedian=False,sigmaclip=3.0,\n iter=6):\n \"\"\"Get the sigma-clipped mean of \n a distribution, d.\n\n Usage: mean,stdev = iterstat.iterstat\n\n Input:\n d: the data\n Optional Inputs:\n sigmaclip: number of standard deviations to clip\n startMedian: if True, begin with the median of the distribution\n iter: number of iterations\n \"\"\"\n\n clip=sigmaclip\n img=d.astype('float64')\n if startMedian:\n md=np.median(img)\n else:\n md=np.mean(img)\n n = float(len(img))\n std = np.sqrt(np.sum((img-md)**2.)/(n-1))\n\n for ii in range(iter):\n gd=np.where((img < md+clip*std) &\n (img > md-clip*std))\n\n md=np.mean(img[gd])\n n = float(len(gd[0]))\n std = np.sqrt(np.sum((img[gd]-md)**2.)/(n-1.))\n\n return(md,std)\n" ]
[ [ "numpy.sum", "numpy.median", "numpy.mean", "numpy.where" ] ]
CoraJung/end-to-end-spoken-language-understanding
[ "d1b15dad1a8f01336bcb0adcbf95d8c6ea279d09" ]
[ "training_finetune.py" ]
[ "import numpy as np\nimport torch\nfrom tqdm import tqdm # for displaying progress bar\nimport os\nfrom data import SLUDataset, ASRDataset\nfrom models import PretrainedModel, Model\nimport pandas as pd\n\nclass Trainer:\n\tdef __init__(self, model, config, lr):\n\t\tself.model = model\n\t\tself.config = config\n\t\tif isinstance(self.model, PretrainedModel):\n\t\t\tself.lr = config.pretraining_lr\n\t\t\tself.checkpoint_path = os.path.join(self.config.folder, \"pretraining\")\n\t\telse:\n\t\t\t# self.lr = config.training_lr\n\t\t\tself.lr = lr\n\t\t\tself.checkpoint_path = os.path.join(self.config.folder, \"training\")\n\t\tself.optimizer = torch.optim.Adam(model.parameters(), lr=self.lr)\n\t\tself.epoch = 0\n\t\tself.df = None\n\n###---Emmy edited on 10/29/2020---###\n### Add new arg to pass in the model dir for code pretrained on fluent.ai ###\n\tdef load_checkpoint(self, model_state_num, model_path=None):\n\t\tif model_path is not None:\n\t\t\tcheckpoint_path = model_path \n\t\t\tprint(f\"model path is given as {checkpoint_path}\")\n\t\telse:\n\t\t\tcheckpoint_path = self.checkpoint_path \n\t\t\tprint(f\"model path is not given and checkpoint_path <- self.checkpoint_path: {checkpoint_path}\") \n\t\tif os.path.isfile(os.path.join(checkpoint_path, \"model_state.pth\")):\n\t\t\tprint(\"there is a model_state.pth in given model path.\")\n\t\t\ttry:\n\t\t\t\tif self.model.is_cuda:\n\t\t\t\t\tprint(\"using cuda...\")\n\t\t\t\t\t### Edit by Sue (11/07/2020): This function loads the model states right before the final intent classifier ###########\n\t\t\t\t\tmodel_dict = self.model.state_dict()\n\t\t\t\t\tpretrained_dict = torch.load(os.path.join(checkpoint_path, \"model_state.pth\"))\n\t\t\t\t\tpretrained_dict_list = list(pretrained_dict.items())[:model_state_num]\n\t\t\t\t\tpretrained_dict = {k:v for (k,v) in pretrained_dict_list}\n\n\t\t\t\t\tfor param_tensor in pretrained_dict:\n\t\t\t\t\t\tprint(param_tensor, \"\\t\", pretrained_dict[param_tensor].size())\n\n\t\t\t\t\tmodel_dict.update(pretrained_dict)\n\t\t\t\t\tself.model.load_state_dict(model_dict)\n\t\t\t\t\tprint(\"pretrain model successfully loaded\")\n\t\t\t\t\t#####################################################################################################################\n\t\t\t\t\t# self.model.load_state_dict(torch.load(os.path.join(checkpoint_path, \"model_state.pth\")))\n\t\t\t\telse:\n\t\t\t\t\t### Edit by Sue (11/07/2020): This function loads the model states right before the final intent classifier ###########\n\t\t\t\t\tmodel_dict = self.model.state_dict()\n\t\t\t\t\tpretrained_dict = torch.load(os.path.join(checkpoint_path, \"model_state.pth\"), map_location=\"cpu\")\n\t\t\t\t\tpretrained_dict_list = list(pretrained_dict.items())[:model_state_num]\n\t\t\t\t\tpretrained_dict = {k:v for (k,v) in pretrained_dict_list}\n\n\t\t\t\t\tfor param_tensor in pretrained_dict:\n\t\t\t\t\t\tprint(param_tensor, \"\\t\", pretrained_dict[param_tensor].size())\n\n\t\t\t\t\tmodel_dict.update(pretrained_dict)\n\t\t\t\t\tself.model.load_state_dict(model_dict)\n\t\t\t\t\tprint(\"pretrain model successfully loaded\")\n\t\t\t\t\t#####################################################################################################################\n\t\t\t\t\t# self.model.load_state_dict(torch.load(os.path.join(checkpoint_path, \"model_state.pth\"), map_location=\"cpu\"))\n\t\t\texcept:\n\t\t\t\tprint(\"Could not load previous model; starting from scratch\")\n\t\telse:\n\t\t\tprint(\"No previous model; starting from scratch\")\n\n\tdef save_checkpoint(self):\n\t\ttry:\n\t\t\ttorch.save(self.model.state_dict(), os.path.join(self.checkpoint_path, \"model_state.pth\"))\n\t\t\tprint(\"Model's state_dict:\")\n\t\t\t### Edit by Cora (11/07/2020)\n\t\t\t# for param_tensor in self.model.state_dict():\n\t\t\t#\tprint(param_tensor, \"\\t\", self.model.state_dict()[param_tensor].size())\n\t\t\tsave_dir = os.path.join(self.checkpoint_path, \"model_state.pth\")\n\t\t\t### Edit by Cora (11/07/2020)\n\t\t\t#print(f\"model successfully saved to {save_dir}\")\n\t\texcept:\n\t\t\tprint(\"Could not save model\")\n\n\tdef log(self, results):\n\t\tif self.df is None:\n\t\t\tself.df = pd.DataFrame(columns=[field for field in results])\n\t\tself.df.loc[len(self.df)] = results\n\t\tself.df.to_csv(os.path.join(self.checkpoint_path, \"log.csv\"))\n\n\tdef train(self, dataset, print_interval=100):\n\t\t# TODO: refactor to remove if-statement?\n\t\tif isinstance(dataset, ASRDataset):\n\t\t\ttrain_phone_acc = 0\n\t\t\ttrain_phone_loss = 0\n\t\t\ttrain_word_acc = 0\n\t\t\ttrain_word_loss = 0\n\t\t\tnum_examples = 0\n\t\t\tself.model.train()\n\t\t\tfor idx, batch in enumerate(tqdm(dataset.loader)):\n\t\t\t\tx,y_phoneme,y_word = batch\n\t\t\t\tbatch_size = len(x)\n\t\t\t\tnum_examples += batch_size\n\t\t\t\tphoneme_loss, word_loss, phoneme_acc, word_acc = self.model(x,y_phoneme,y_word)\n\t\t\t\tif self.config.pretraining_type == 1: loss = phoneme_loss\n\t\t\t\tif self.config.pretraining_type == 2: loss = phoneme_loss + word_loss\n\t\t\t\tif self.config.pretraining_type == 3: loss = word_loss\n\t\t\t\tself.optimizer.zero_grad()\n\t\t\t\tloss.backward()\n\t\t\t\tself.optimizer.step()\n\t\t\t\ttrain_phone_loss += phoneme_loss.cpu().data.numpy().item() * batch_size\n\t\t\t\ttrain_word_loss += word_loss.cpu().data.numpy().item() * batch_size\n\t\t\t\ttrain_phone_acc += phoneme_acc.cpu().data.numpy().item() * batch_size\n\t\t\t\ttrain_word_acc += word_acc.cpu().data.numpy().item() * batch_size\n\t\t\t\tif idx % print_interval == 0:\n\t\t\t\t\tprint(\"phoneme loss: \" + str(phoneme_loss.cpu().data.numpy().item()))\n\t\t\t\t\tprint(\"word loss: \" + str(word_loss.cpu().data.numpy().item()))\n\t\t\t\t\tprint(\"phoneme acc: \" + str(phoneme_acc.cpu().data.numpy().item()))\n\t\t\t\t\tprint(\"word acc: \" + str(word_acc.cpu().data.numpy().item()))\n\t\t\ttrain_phone_loss /= num_examples\n\t\t\ttrain_phone_acc /= num_examples\n\t\t\ttrain_word_loss /= num_examples\n\t\t\ttrain_word_acc /= num_examples\n\t\t\tresults = {\"phone_loss\" : train_phone_loss, \"phone_acc\" : train_phone_acc, \"word_loss\" : train_word_loss, \"word_acc\" : train_word_acc, \"set\": \"train\"}\n\t\t\tself.log(results)\n\t\t\tself.epoch += 1\n\t\t\treturn train_phone_acc, train_phone_loss, train_word_acc, train_word_loss\n\t\telse: # SLUDataset\n\t\t\ttrain_intent_acc = 0\n\t\t\ttrain_intent_loss = 0\n\t\t\tnum_examples = 0\n\t\t\tself.model.train()\n\t\t\tself.model.print_frozen()\n\t\t\tfor idx, batch in enumerate(tqdm(dataset.loader)):\n\t\t\t\tx,y_intent = batch\n\t\t\t\tbatch_size = len(x)\n\t\t\t\tnum_examples += batch_size\n\t\t\t\tintent_loss, intent_acc = self.model(x,y_intent)\n\t\t\t\tloss = intent_loss\n\t\t\t\tself.optimizer.zero_grad()\n\t\t\t\tloss.backward()\n\t\t\t\tself.optimizer.step()\n\t\t\t\ttrain_intent_loss += intent_loss.cpu().data.numpy().item() * batch_size\n\t\t\t\ttrain_intent_acc += intent_acc.cpu().data.numpy().item() * batch_size\n \n\t\t\t\tif idx % print_interval == 0:\n\t\t\t\t\tprint(\"intent loss: \" + str(intent_loss.cpu().data.numpy().item()))\n\t\t\t\t\tprint(\"intent acc: \" + str(intent_acc.cpu().data.numpy().item()))\n\t\t\t\t\tif self.model.seq2seq:\n\t\t\t\t\t\tself.model.cpu(); self.model.is_cuda = False\n\t\t\t\t\t\tx = x.cpu(); y_intent = y_intent.cpu()\n\t\t\t\t\t\tprint(\"seq2seq output\")\n\t\t\t\t\t\tself.model.eval()\n\t\t\t\t\t\tprint(\"guess: \" + self.model.decode_intents(x)[0])\n\t\t\t\t\t\tprint(\"truth: \" + self.model.one_hot_to_string(y_intent[0],self.model.Sy_intent))\n\t\t\t\t\t\tself.model.train()\n\t\t\t\t\t\tself.model.cuda(); self.model.is_cuda = True\n\t\t\ttrain_intent_loss /= num_examples\n\t\t\ttrain_intent_acc /= num_examples\n\t\t\tself.model.unfreeze_one_layer()\n\t\t\tresults = {\"intent_loss\" : train_intent_loss, \"intent_acc\" : train_intent_acc, \"set\": \"train\"}\n\t\t\tself.log(results)\n\t\t\tself.epoch += 1\n\t\t\treturn train_intent_acc, train_intent_loss\n\n\tdef test(self, dataset, print_option = False):\n\t\tif isinstance(dataset, ASRDataset):\n\t\t\ttest_phone_acc = 0\n\t\t\ttest_phone_loss = 0\n\t\t\ttest_word_acc = 0\n\t\t\ttest_word_loss = 0\n\t\t\tnum_examples = 0\n\t\t\tself.model.eval()\n\t\t\tfor idx, batch in enumerate(dataset.loader):\n\t\t\t\tx,y_phoneme,y_word = batch\n\t\t\t\tbatch_size = len(x)\n\t\t\t\tnum_examples += batch_size\n\t\t\t\tphoneme_loss, word_loss, phoneme_acc, word_acc = self.model(x,y_phoneme,y_word)\n\t\t\t\ttest_phone_loss += phoneme_loss.cpu().data.numpy().item() * batch_size\n\t\t\t\ttest_word_loss += word_loss.cpu().data.numpy().item() * batch_size\n\t\t\t\ttest_phone_acc += phoneme_acc.cpu().data.numpy().item() * batch_size\n\t\t\t\ttest_word_acc += word_acc.cpu().data.numpy().item() * batch_size\n\t\t\ttest_phone_loss /= num_examples\n\t\t\ttest_phone_acc /= num_examples\n\t\t\ttest_word_loss /= num_examples\n\t\t\ttest_word_acc /= num_examples\n\t\t\tresults = {\"phone_loss\" : test_phone_loss, \"phone_acc\" : test_phone_acc, \"word_loss\" : test_word_loss, \"word_acc\" : test_word_acc,\"set\": \"valid\"}\n\t\t\tself.log(results)\n\t\t\treturn test_phone_acc, test_phone_loss, test_word_acc, test_word_loss \n\t\telse:\n\t\t\ttest_intent_acc = 0\n\t\t\ttest_intent_loss = 0\n\t\t\tnum_examples = 0\n\t\t\tself.model.eval()\n\t\t\tself.model.cpu(); self.model.is_cuda = False # beam search is memory-intensive; do on CPU for now\n\t\t\tfor idx, batch in enumerate(dataset.loader):\n\t\t\t\tx,y_intent = batch\n\t\t\t\tbatch_size = len(x)\n\t\t\t\tnum_examples += batch_size\n\t\t\t\tintent_loss, intent_acc = self.model(x,y_intent)\n\t\t\t\ttest_intent_loss += intent_loss.cpu().data.numpy().item() * batch_size\n\t\t\t\ttest_intent_acc += intent_acc.cpu().data.numpy().item() * batch_size\n\t\t\t\t#----- print out predicted and true intents 10/15/2020 - edit by Emmy ----#\n\t\t\t\tif print_option == True:\n\t\t\t\t\tprint(\"decoding batch %d\" % idx)\n\t\t\t\t\tguess_strings = np.array(self.model.decode_intents(x))\n\t\t\t\t\ttruth_strings = np.array(self.model.decode_intents_truth_label(y_intent))\n\t\t\t\t\t#test_intent_acc += (guess_strings == truth_strings).mean() * batch_size\n\t\t\t\t\t#print(\"acc: \" + str((guess_strings == truth_strings).mean()))\n\t\t\t\t\t######## Print Every Line - Wendy\n\t\t\t\t\tfor i in range(len(guess_strings)):\n\t\t\t\t\t\tprint(i, \"guess: \", guess_strings[i])\n\t\t\t\t\t\tprint(i, \"truth: \", truth_strings[i])\n\t\t\t\tif self.model.seq2seq and self.epoch > 1:\n\t\t\t\t\tprint(\"decoding batch %d\" % idx)\n\t\t\t\t\tguess_strings = np.array(self.model.decode_intents(x))\n\t\t\t\t\ttruth_strings = np.array([self.model.one_hot_to_string(y_intent[i],self.model.Sy_intent) for i in range(batch_size)])\n\t\t\t\t\ttest_intent_acc += (guess_strings == truth_strings).mean() * batch_size\n\t\t\t\t\tprint(\"acc: \" + str((guess_strings == truth_strings).mean()))\n\t\t\t\t\tprint(\"guess: \" + guess_strings[0])\n\t\t\t\t\tprint(\"truth: \" + truth_strings[0])\n\t\t\tself.model.cuda(); self.model.is_cuda = True\n\t\t\ttest_intent_loss /= num_examples\n\t\t\ttest_intent_acc /= num_examples\n\t\t\tresults = {\"intent_loss\" : test_intent_loss, \"intent_acc\" : test_intent_acc, \"set\": \"valid\"}\n\t\t\tself.log(results)\n\t\t\treturn test_intent_acc, test_intent_loss \n" ]
[ [ "pandas.DataFrame" ] ]
nataliyah123/addons
[ "13e40e613df3ead8f190258b273aeccaff05dedc" ]
[ "tensorflow_addons/optimizers/tests/discriminative_layer_training_test.py" ]
[ "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Discriminative Layer Training Optimizer for TensorFlow.\"\"\"\n\nimport pytest\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_addons.optimizers.discriminative_layer_training import MultiOptimizer\nfrom tensorflow_addons.utils import test_utils\n\n\ndef _dtypes_to_test(use_gpu):\n # Based on issue #347 in the following link,\n # \"https://github.com/tensorflow/addons/issues/347\"\n # tf.half is not registered for 'ResourceScatterUpdate' OpKernel\n # for 'GPU' devices.\n # So we have to remove tf.half when testing with gpu.\n # The function \"_DtypesToTest\" is from\n # \"https://github.com/tensorflow/tensorflow/blob/5d4a6cee737a1dc6c20172a1dc1\n # 5df10def2df72/tensorflow/python/kernel_tests/conv_ops_3d_test.py#L53-L62\"\n # TODO(WindQAQ): Clean up this in TF2.4\n\n if use_gpu:\n return [tf.float32, tf.float64]\n else:\n return [tf.half, tf.float32, tf.float64]\n\n\n@pytest.mark.with_device([\"cpu\", \"gpu\"])\n@pytest.mark.parametrize(\"dtype\", [tf.float16, tf.float32, tf.float64])\n@pytest.mark.parametrize(\"serialize\", [True, False])\ndef test_fit_layer_optimizer(dtype, device, serialize):\n # Test ensures that each optimizer is only optimizing its own layer with its learning rate\n\n if \"gpu\" in device and dtype == tf.float16:\n pytest.xfail(\"See https://github.com/tensorflow/addons/issues/347\")\n\n model = tf.keras.Sequential(\n [tf.keras.Input(shape=[1]), tf.keras.layers.Dense(1), tf.keras.layers.Dense(1)]\n )\n\n x = np.array(np.ones([100]))\n y = np.array(np.ones([100]))\n\n weights_before_train = (\n model.layers[0].weights[0].numpy(),\n model.layers[1].weights[0].numpy(),\n )\n\n opt1 = tf.keras.optimizers.Adam(learning_rate=1e-3)\n opt2 = tf.keras.optimizers.SGD(learning_rate=0)\n\n opt_layer_pairs = [(opt1, model.layers[0]), (opt2, model.layers[1])]\n\n loss = tf.keras.losses.MSE\n optimizer = MultiOptimizer(opt_layer_pairs)\n\n model.compile(optimizer=optimizer, loss=loss)\n\n # serialize whole model including optimizer, clear the session, then reload the whole model.\n if serialize:\n model.save(\"test\", save_format=\"tf\")\n tf.keras.backend.clear_session()\n model = tf.keras.models.load_model(\"test\")\n\n model.fit(x, y, batch_size=8, epochs=10)\n\n weights_after_train = (\n model.layers[0].weights[0].numpy(),\n model.layers[1].weights[0].numpy(),\n )\n\n with np.testing.assert_raises(AssertionError):\n # expect weights to be different for layer 1\n test_utils.assert_allclose_according_to_type(\n weights_before_train[0], weights_after_train[0]\n )\n\n # expect weights to be same for layer 2\n test_utils.assert_allclose_according_to_type(\n weights_before_train[1], weights_after_train[1]\n )\n\n\ndef test_serialization():\n\n model = tf.keras.Sequential(\n [tf.keras.Input(shape=[1]), tf.keras.layers.Dense(1), tf.keras.layers.Dense(1)]\n )\n\n opt1 = tf.keras.optimizers.Adam(learning_rate=1e-3)\n opt2 = tf.keras.optimizers.SGD(learning_rate=0)\n\n opt_layer_pairs = [(opt1, model.layers[0]), (opt2, model.layers[1])]\n\n optimizer = MultiOptimizer(opt_layer_pairs)\n config = tf.keras.optimizers.serialize(optimizer)\n\n new_optimizer = tf.keras.optimizers.deserialize(config)\n assert new_optimizer.get_config() == optimizer.get_config()\n" ]
[ [ "tensorflow.keras.models.load_model", "tensorflow.keras.optimizers.deserialize", "tensorflow.keras.Input", "tensorflow.keras.layers.Dense", "numpy.ones", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.backend.clear_session", "numpy.testing.assert_raises", "tensorflow.keras.optimizers.serialize", "tensorflow.keras.optimizers.SGD" ] ]
lilleswing/deepchem
[ "355954b37d333fc3e2c3b3e28d103297eb642769" ]
[ "deepchem/feat/complex_featurizers/splif_fingerprints.py" ]
[ "\"\"\"\nSPLIF Fingerprints for molecular complexes.\n\"\"\"\nimport logging\nimport itertools\nimport numpy as np\nfrom deepchem.utils.hash_utils import hash_ecfp_pair\nfrom deepchem.utils.rdkit_utils import load_complex\nfrom deepchem.utils.rdkit_utils import compute_all_ecfp\nfrom deepchem.utils.rdkit_utils import MoleculeLoadException\nfrom deepchem.utils.rdkit_utils import compute_contact_centroid\nfrom deepchem.feat import ComplexFeaturizer\nfrom deepchem.utils.hash_utils import vectorize\nfrom deepchem.utils.voxel_utils import voxelize\nfrom deepchem.utils.voxel_utils import convert_atom_pair_to_voxel\nfrom deepchem.utils.geometry_utils import compute_pairwise_distances\nfrom deepchem.utils.geometry_utils import subtract_centroid\n\nfrom typing import Tuple, Dict, List\n\nlogger = logging.getLogger(__name__)\n\nSPLIF_CONTACT_BINS = [(0, 2.0), (2.0, 3.0), (3.0, 4.5)]\n\n\ndef compute_splif_features_in_range(frag1: Tuple,\n frag2: Tuple,\n pairwise_distances: np.ndarray,\n contact_bin: List,\n ecfp_degree: int = 2) -> Dict:\n \"\"\"Computes SPLIF features for close atoms in molecular complexes.\n\n Finds all frag1 atoms that are > contact_bin[0] and <\n contact_bin[1] away from frag2 atoms. Then, finds the ECFP\n fingerprints for the contacting atoms. Returns a dictionary\n mapping (frag1_index_i, frag2_index_j) --> (frag1_ecfp_i,\n frag2_ecfp_j)\n\n Parameters\n ----------\n frag1: Tuple\n A tuple of (coords, mol) returned by `load_molecule`.\n frag2: Tuple\n A tuple of (coords, mol) returned by `load_molecule`.\n contact_bins: np.ndarray\n Ranges of pair distances which are placed in separate bins.\n pairwise_distances: np.ndarray\n Array of pairwise fragment-fragment distances (Angstroms)\n ecfp_degree: int\n ECFP radius\n \"\"\"\n contacts = np.nonzero((pairwise_distances > contact_bin[0]) &\n (pairwise_distances < contact_bin[1]))\n frag1_atoms = set([int(c) for c in contacts[0].tolist()])\n contacts = zip(contacts[0], contacts[1])\n\n frag1_ecfp_dict = compute_all_ecfp(\n frag1[1], indices=frag1_atoms, degree=ecfp_degree)\n frag2_ecfp_dict = compute_all_ecfp(frag2[1], degree=ecfp_degree)\n splif_dict = {\n contact: (frag1_ecfp_dict[contact[0]], frag2_ecfp_dict[contact[1]])\n for contact in contacts\n }\n return splif_dict\n\n\ndef featurize_splif(frag1, frag2, contact_bins, pairwise_distances,\n ecfp_degree):\n \"\"\"Computes SPLIF featurization of fragment interactions binding pocket.\n\n For each contact range (i.e. 1 A to 2 A, 2 A to 3 A, etc.)\n compute a dictionary mapping (frag1_index_i, frag2_index_j)\n tuples --> (frag1_ecfp_i, frag2_ecfp_j) tuples. Return a\n list of such splif dictionaries.\n\n Parameters\n ----------\n frag1: Tuple\n A tuple of (coords, mol) returned by `load_molecule`.\n frag2: Tuple\n A tuple of (coords, mol) returned by `load_molecule`.\n contact_bins: np.ndarray\n Ranges of pair distances which are placed in separate bins.\n pairwise_distances: np.ndarray\n Array of pairwise fragment-fragment distances (Angstroms)\n ecfp_degree: int\n ECFP radius, the graph distance at which fragments are computed.\n\n Returns\n -------\n Dictionaries of SPLIF interactions suitable for `vectorize` or\n `voxelize`.\n \"\"\"\n splif_dicts = []\n for i, contact_bin in enumerate(contact_bins):\n splif_dicts.append(\n compute_splif_features_in_range(frag1, frag2, pairwise_distances,\n contact_bin, ecfp_degree))\n\n return splif_dicts\n\n\nclass SplifFingerprint(ComplexFeaturizer):\n \"\"\"Computes SPLIF Fingerprints for a macromolecular complex.\n\n SPLIF fingerprints are based on a technique introduced in the\n following paper.\n\n Da, C., and D. Kireev. \"Structural protein–ligand interaction\n fingerprints (SPLIF) for structure-based virtual screening:\n method and benchmark study.\" Journal of chemical information\n and modeling 54.9 (2014): 2555-2561.\n\n SPLIF fingerprints are a subclass of `ComplexFeaturizer`. It\n requires 3D coordinates for a molecular complex. For each ligand\n atom, it identifies close pairs of atoms from different molecules.\n These atom pairs are expanded to 2D circular fragments and a\n fingerprint for the union is turned on in the bit vector. Note that\n we slightly generalize the original paper by not requiring the\n interacting molecules to be proteins or ligands.\n\n This is conceptually pretty similar to\n `ContactCircularFingerprint` but computes ECFP fragments only\n for direct contacts instead of the entire contact region.\n\n For a macromolecular complex, returns a vector of shape\n `(len(contact_bins)*size,)`\n \"\"\"\n\n def __init__(self, contact_bins=None, radius=2, size=8):\n \"\"\"\n Parameters\n ----------\n contact_bins: list[tuple]\n List of contact bins. If not specified is set to default\n `[(0, 2.0), (2.0, 3.0), (3.0, 4.5)]`.\n radius : int, optional (default 2)\n Fingerprint radius used for circular fingerprints.\n size: int, optional (default 8)\n Length of generated bit vector.\n \"\"\"\n if contact_bins is None:\n self.contact_bins = SPLIF_CONTACT_BINS\n else:\n self.contact_bins = contact_bins\n self.size = size\n self.radius = radius\n\n def _featurize(self, mol_pdb: str, complex_pdb: str):\n \"\"\"\n Compute featurization for a molecular complex\n\n Parameters\n ----------\n mol_pdb: str\n Filename for ligand molecule\n complex_pdb: str\n Filename for protein molecule\n \"\"\"\n molecular_complex = (mol_pdb, complex_pdb)\n try:\n fragments = load_complex(molecular_complex, add_hydrogens=False)\n\n except MoleculeLoadException:\n logger.warning(\"This molecule cannot be loaded by Rdkit. Returning None\")\n return None\n pairwise_features = []\n # We compute pairwise contact fingerprints\n for (frag1, frag2) in itertools.combinations(fragments, 2):\n # Get coordinates\n distances = compute_pairwise_distances(frag1[0], frag2[0])\n # distances = compute_pairwise_distances(prot_xyz, lig_xyz)\n vectors = [\n vectorize(hash_ecfp_pair, feature_dict=splif_dict,\n size=self.size) for splif_dict in featurize_splif(\n frag1, frag2, self.contact_bins, distances, self.radius)\n ]\n pairwise_features += vectors\n pairwise_features = np.concatenate(pairwise_features)\n return pairwise_features\n\n\nclass SplifVoxelizer(ComplexFeaturizer):\n \"\"\"Computes SPLIF voxel grid for a macromolecular complex.\n\n SPLIF fingerprints are based on a technique introduced in the\n following paper [1]_.\n\n The SPLIF voxelizer localizes local SPLIF descriptors in\n space, by assigning features to the voxel in which they\n originated. This technique may be useful for downstream\n learning methods such as convolutional networks.\n\n Featurizes a macromolecular complex into a tensor of shape\n `(voxels_per_edge, voxels_per_edge, voxels_per_edge, size)`\n where `voxels_per_edge = int(box_width/voxel_width)`.\n\n References\n ----------\n .. [1] Da, C., and D. Kireev. \"Structural protein–ligand interaction\n fingerprints (SPLIF) for structure-based virtual screening:\n method and benchmark study.\" Journal of chemical information\n and modeling 54.9 (2014): 2555-2561.\n \"\"\"\n\n def __init__(self,\n cutoff: float = 4.5,\n contact_bins: List = None,\n radius: int = 2,\n size: int = 8,\n box_width: float = 16.0,\n voxel_width: float = 1.0):\n \"\"\"\n Parameters\n ----------\n cutoff: float (default 4.5)\n Distance cutoff in angstroms for molecules in complex.\n contact_bins: list[tuple]\n List of contact bins. If not specified is set to default\n `[(0, 2.0), (2.0, 3.0), (3.0, 4.5)]`.\n radius : int, optional (default 2)\n Fingerprint radius used for circular fingerprints.\n size: int, optional (default 8)\n Length of generated bit vector.\n box_width: float, optional (default 16.0)\n Size of a box in which voxel features are calculated. Box\n is centered on a ligand centroid.\n voxel_width: float, optional (default 1.0)\n Size of a 3D voxel in a grid.\n \"\"\"\n self.cutoff = cutoff\n if contact_bins is None:\n self.contact_bins = SPLIF_CONTACT_BINS\n else:\n self.contact_bins = contact_bins\n self.size = size\n self.radius = radius\n self.box_width = box_width\n self.voxel_width = voxel_width\n self.voxels_per_edge = int(self.box_width / self.voxel_width)\n\n def _featurize(self, mol_pdb: str, complex_pdb: str):\n \"\"\"\n Compute featurization for a molecular complex\n\n Parameters\n ----------\n mol_pdb: str\n Filename for ligand molecule\n complex_pdb: str\n Filename for protein molecule\n \"\"\"\n molecular_complex = (mol_pdb, complex_pdb)\n try:\n fragments = load_complex(molecular_complex, add_hydrogens=False)\n\n except MoleculeLoadException:\n logger.warning(\"This molecule cannot be loaded by Rdkit. Returning None\")\n return None\n pairwise_features = []\n # We compute pairwise contact fingerprints\n centroid = compute_contact_centroid(fragments, cutoff=self.cutoff)\n for (frag1, frag2) in itertools.combinations(fragments, 2):\n distances = compute_pairwise_distances(frag1[0], frag2[0])\n frag1_xyz = subtract_centroid(frag1[0], centroid)\n frag2_xyz = subtract_centroid(frag2[0], centroid)\n xyzs = [frag1_xyz, frag2_xyz]\n pairwise_features.append(\n np.concatenate(\n [\n voxelize(\n convert_atom_pair_to_voxel,\n hash_function=hash_ecfp_pair,\n coordinates=xyzs,\n box_width=self.box_width,\n voxel_width=self.voxel_width,\n feature_dict=splif_dict,\n nb_channel=self.size)\n for splif_dict in featurize_splif(\n frag1, frag2, self.contact_bins, distances, self.radius)\n ],\n axis=-1))\n # Features are of shape (voxels_per_edge, voxels_per_edge, voxels_per_edge, 1) so we should concatenate on the last axis.\n return np.concatenate(pairwise_features, axis=-1)\n" ]
[ [ "numpy.concatenate", "numpy.nonzero" ] ]
NUDTUGVexplorer/acados
[ "430fdb19896368ef48e76cda954d1d698ac57c7b" ]
[ "examples/acados_python/getting_started/ocp/example_gnsf_ocp.py" ]
[ "#\n# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,\n# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,\n# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,\n# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl\n#\n# This file is part of acados.\n#\n# The 2-Clause BSD License\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.;\n#\n\nimport sys\nsys.path.insert(0, '../common')\n\nimport json\nfrom acados_template import AcadosOcp, AcadosOcpSolver\nfrom export_pendulum_ode_model import export_pendulum_ode_model\nimport numpy as np\nimport scipy.linalg\nfrom utils import plot_pendulum\n\n# create ocp object to formulate the OCP\nocp = AcadosOcp()\n\n# set model\nmodel = export_pendulum_ode_model()\nocp.model = model\n\n# load gnsf model\n# NOTE: generated from Matlab, using simulation example of pendulum model with irk_gnsf\n# then >> dump_gnsf_functions(sim.model_struct)\nwith open('../common/' + model.name + '_gnsf_functions.json', 'r') as f:\n gnsf_dict = json.load(f)\nocp.gnsf_model = gnsf_dict\n\nTf = 1.0\nnx = model.x.size()[0]\nnu = model.u.size()[0]\nny = nx + nu\nny_e = nx\nN = 20\n\n# set dimensions\nocp.dims.ny = ny\nocp.dims.ny_e = ny_e\nocp.dims.nbu = nu\nocp.dims.N = N\n\n# set cost\nQ = 2*np.diag([1e3, 1e3, 1e-2, 1e-2])\nR = 2*np.diag([1e-2])\n\nocp.cost.W_e = Q\nocp.cost.W = scipy.linalg.block_diag(Q, R)\n\nocp.cost.Vx = np.zeros((ny, nx))\nocp.cost.Vx[:nx,:nx] = np.eye(nx)\n\nVu = np.zeros((ny, nu))\nVu[4,0] = 1.0\nocp.cost.Vu = Vu\n\nocp.cost.Vx_e = np.eye(nx)\n\nocp.cost.yref = np.zeros((ny, ))\nocp.cost.yref_e = np.zeros((ny_e, ))\n\n# set constraints\nFmax = 80\nocp.constraints.lbu = np.array([-Fmax])\nocp.constraints.ubu = np.array([+Fmax])\nocp.constraints.x0 = np.array([0.0, np.pi, 0.0, 0.0])\nocp.constraints.idxbu = np.array([0])\n\nocp.solver_options.qp_solver = 'PARTIAL_CONDENSING_HPIPM' # FULL_CONDENSING_QPOASES\nocp.solver_options.hessian_approx = 'GAUSS_NEWTON'\nocp.solver_options.integrator_type = 'GNSF'\nocp.solver_options.print_level = 0\n\n# set prediction horizon\nocp.solver_options.tf = Tf\nocp.solver_options.nlp_solver_type = 'SQP' # SQP_RTI\n\nocp_solver = AcadosOcpSolver(ocp, json_file = 'acados_ocp.json')\n\nsimX = np.ndarray((N+1, nx))\nsimU = np.ndarray((N, nu))\n\nstatus = ocp_solver.solve()\n\nif status != 0:\n raise Exception('acados returned status {}. Exiting.'.format(status))\n\n# get solution\nfor i in range(N):\n simX[i,:] = ocp_solver.get(i, \"x\")\n simU[i,:] = ocp_solver.get(i, \"u\")\nsimX[N,:] = ocp_solver.get(N, \"x\")\n\nplot_pendulum(Tf/N, Fmax, simU, simX)\n" ]
[ [ "numpy.diag", "numpy.eye", "numpy.ndarray", "numpy.array", "numpy.zeros" ] ]
haesleinhuepf/napari-tabu
[ "710b99dfbc5490b70c56651e646f43b26e6e2e11" ]
[ "napari_tabu/_tests/test_dock_widget.py" ]
[ "import numpy as np\n\nimport napari_tabu\nimport pytest\n\n\ndef test_something_with_viewer(make_napari_viewer):\n\n viewer = make_napari_viewer()\n num_dw = len(viewer.window._dock_widgets)\n from napari_tabu._dock_widget import SendBackWidget\n\n viewer.window.add_dock_widget(\n SendBackWidget(viewer, viewer)\n )\n assert len(viewer.window._dock_widgets) == num_dw + 1\n\n from napari_tabu._dock_widget import _add_layer_to_viewer\n\n image_layer = viewer.add_image(np.random.random((10, 10)))\n labels_layer = viewer.add_labels(np.random.random((10, 10)).astype(int))\n points_layer = viewer.add_points(np.random.random((2, 2)))\n shapes_layer = viewer.add_shapes(np.random.random((2, 2)))\n\n\n _add_layer_to_viewer(image_layer, viewer)\n _add_layer_to_viewer(labels_layer, viewer)\n _add_layer_to_viewer(points_layer, viewer)\n _add_layer_to_viewer(shapes_layer, viewer)\n" ]
[ [ "numpy.random.random" ] ]
mrterry/scipy
[ "f47d2f16e6debbb7753e9ee0fa0ff78d4b4a0c2b", "112d9a25fe3b898eff862e4d4596409372a9b237" ]
[ "scipy/sparse/construct.py", "scipy/linalg/tests/test_blas.py" ]
[ "\"\"\"Functions to construct sparse matrices\n\"\"\"\n\n__docformat__ = \"restructuredtext en\"\n\n__all__ = [ 'spdiags', 'eye', 'identity', 'kron', 'kronsum',\n 'hstack', 'vstack', 'bmat', 'rand']\n\n\nfrom warnings import warn\n\nimport numpy as np\n\nfrom sputils import upcast\n\nfrom csr import csr_matrix\nfrom csc import csc_matrix\nfrom bsr import bsr_matrix\nfrom coo import coo_matrix\nfrom lil import lil_matrix\nfrom dia import dia_matrix\n\ndef spdiags(data, diags, m, n, format=None):\n \"\"\"\n Return a sparse matrix from diagonals.\n\n Parameters\n ----------\n data : array_like\n matrix diagonals stored row-wise\n diags : diagonals to set\n - k = 0 the main diagonal\n - k > 0 the k-th upper diagonal\n - k < 0 the k-th lower diagonal\n m, n : int\n shape of the result\n format : format of the result (e.g. \"csr\")\n By default (format=None) an appropriate sparse matrix\n format is returned. This choice is subject to change.\n\n See Also\n --------\n dia_matrix : the sparse DIAgonal format.\n\n Examples\n --------\n >>> data = array([[1,2,3,4],[1,2,3,4],[1,2,3,4]])\n >>> diags = array([0,-1,2])\n >>> spdiags(data, diags, 4, 4).todense()\n matrix([[1, 0, 3, 0],\n [1, 2, 0, 4],\n [0, 2, 3, 0],\n [0, 0, 3, 4]])\n\n \"\"\"\n return dia_matrix((data, diags), shape=(m,n)).asformat(format)\n\ndef identity(n, dtype='d', format=None):\n \"\"\"Identity matrix in sparse format\n\n Returns an identity matrix with shape (n,n) using a given\n sparse format and dtype.\n\n Parameters\n ----------\n n : integer\n Shape of the identity matrix.\n dtype :\n Data type of the matrix\n format : string\n Sparse format of the result, e.g. format=\"csr\", etc.\n\n Examples\n --------\n >>> identity(3).todense()\n matrix([[ 1., 0., 0.],\n [ 0., 1., 0.],\n [ 0., 0., 1.]])\n >>> identity(3, dtype='int8', format='dia')\n <3x3 sparse matrix of type '<type 'numpy.int8'>'\n with 3 stored elements (1 diagonals) in DIAgonal format>\n\n \"\"\"\n\n if format in ['csr','csc']:\n indptr = np.arange(n+1, dtype=np.intc)\n indices = np.arange(n, dtype=np.intc)\n data = np.ones(n, dtype=dtype)\n cls = eval('%s_matrix' % format)\n return cls((data,indices,indptr),(n,n))\n elif format == 'coo':\n row = np.arange(n, dtype=np.intc)\n col = np.arange(n, dtype=np.intc)\n data = np.ones(n, dtype=dtype)\n return coo_matrix((data,(row,col)),(n,n))\n elif format == 'dia':\n data = np.ones(n, dtype=dtype)\n diags = [0]\n return dia_matrix((data,diags), shape=(n,n))\n else:\n return identity(n, dtype=dtype, format='csr').asformat(format)\n\n\ndef eye(m, n, k=0, dtype='d', format=None):\n \"\"\"eye(m, n) returns a sparse (m x n) matrix where the k-th diagonal\n is all ones and everything else is zeros.\n \"\"\"\n m,n = int(m),int(n)\n diags = np.ones((1, max(0, min(m + k, n))), dtype=dtype)\n return spdiags(diags, k, m, n).asformat(format)\n\n\ndef kron(A, B, format=None):\n \"\"\"kronecker product of sparse matrices A and B\n\n Parameters\n ----------\n A : sparse or dense matrix\n first matrix of the product\n B : sparse or dense matrix\n second matrix of the product\n format : string\n format of the result (e.g. \"csr\")\n\n Returns\n -------\n kronecker product in a sparse matrix format\n\n\n Examples\n --------\n >>> A = csr_matrix(array([[0,2],[5,0]]))\n >>> B = csr_matrix(array([[1,2],[3,4]]))\n >>> kron(A,B).todense()\n matrix([[ 0, 0, 2, 4],\n [ 0, 0, 6, 8],\n [ 5, 10, 0, 0],\n [15, 20, 0, 0]])\n\n >>> kron(A,[[1,2],[3,4]]).todense()\n matrix([[ 0, 0, 2, 4],\n [ 0, 0, 6, 8],\n [ 5, 10, 0, 0],\n [15, 20, 0, 0]])\n\n \"\"\"\n B = coo_matrix(B)\n\n if (format is None or format == \"bsr\") and 2*B.nnz >= B.shape[0] * B.shape[1]:\n #B is fairly dense, use BSR\n A = csr_matrix(A,copy=True)\n\n output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])\n\n if A.nnz == 0 or B.nnz == 0:\n # kronecker product is the zero matrix\n return coo_matrix( output_shape )\n\n B = B.toarray()\n data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])\n data = data * B\n\n return bsr_matrix((data,A.indices,A.indptr), shape=output_shape)\n else:\n #use COO\n A = coo_matrix(A)\n output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])\n\n if A.nnz == 0 or B.nnz == 0:\n # kronecker product is the zero matrix\n return coo_matrix( output_shape )\n\n # expand entries of a into blocks\n row = A.row.repeat(B.nnz)\n col = A.col.repeat(B.nnz)\n data = A.data.repeat(B.nnz)\n\n row *= B.shape[0]\n col *= B.shape[1]\n\n # increment block indices\n row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)\n row += B.row\n col += B.col\n row,col = row.reshape(-1),col.reshape(-1)\n\n # compute block entries\n data = data.reshape(-1,B.nnz) * B.data\n data = data.reshape(-1)\n\n return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)\n\ndef kronsum(A, B, format=None):\n \"\"\"kronecker sum of sparse matrices A and B\n\n Kronecker sum of two sparse matrices is a sum of two Kronecker\n products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)\n and B has shape (n,n) and I_m and I_n are identity matrices\n of shape (m,m) and (n,n) respectively.\n\n Parameters\n ----------\n A\n square matrix\n B\n square matrix\n format : string\n format of the result (e.g. \"csr\")\n\n Returns\n -------\n kronecker sum in a sparse matrix format\n\n Examples\n --------\n\n\n \"\"\"\n A = coo_matrix(A)\n B = coo_matrix(B)\n\n if A.shape[0] != A.shape[1]:\n raise ValueError('A is not square')\n\n if B.shape[0] != B.shape[1]:\n raise ValueError('B is not square')\n\n dtype = upcast(A.dtype, B.dtype)\n\n L = kron(identity(B.shape[0],dtype=dtype), A, format=format)\n R = kron(B, identity(A.shape[0],dtype=dtype), format=format)\n\n return (L+R).asformat(format) #since L + R is not always same format\n\n\ndef hstack(blocks, format=None, dtype=None):\n \"\"\"\n Stack sparse matrices horizontally (column wise)\n\n Parameters\n ----------\n blocks\n sequence of sparse matrices with compatible shapes\n format : string\n sparse format of the result (e.g. \"csr\")\n by default an appropriate sparse matrix format is returned.\n This choice is subject to change.\n\n See Also\n --------\n vstack : stack sparse matrices vertically (row wise)\n\n Examples\n --------\n >>> from scipy.sparse import coo_matrix, vstack\n >>> A = coo_matrix([[1,2],[3,4]])\n >>> B = coo_matrix([[5],[6]])\n >>> hstack( [A,B] ).todense()\n matrix([[1, 2, 5],\n [3, 4, 6]])\n\n \"\"\"\n return bmat([blocks], format=format, dtype=dtype)\n\ndef vstack(blocks, format=None, dtype=None):\n \"\"\"\n Stack sparse matrices vertically (row wise)\n\n Parameters\n ----------\n blocks\n sequence of sparse matrices with compatible shapes\n format : string\n sparse format of the result (e.g. \"csr\")\n by default an appropriate sparse matrix format is returned.\n This choice is subject to change.\n\n See Also\n --------\n hstack : stack sparse matrices horizontally (column wise)\n\n Examples\n --------\n >>> from scipy.sparse import coo_matrix, vstack\n >>> A = coo_matrix([[1,2],[3,4]])\n >>> B = coo_matrix([[5,6]])\n >>> vstack( [A,B] ).todense()\n matrix([[1, 2],\n [3, 4],\n [5, 6]])\n\n \"\"\"\n return bmat([ [b] for b in blocks ], format=format, dtype=dtype)\n\ndef bmat(blocks, format=None, dtype=None):\n \"\"\"\n Build a sparse matrix from sparse sub-blocks\n\n Parameters\n ----------\n blocks\n grid of sparse matrices with compatible shapes\n an entry of None implies an all-zero matrix\n format : sparse format of the result (e.g. \"csr\")\n by default an appropriate sparse matrix format is returned.\n This choice is subject to change.\n\n Examples\n --------\n >>> from scipy.sparse import coo_matrix, bmat\n >>> A = coo_matrix([[1,2],[3,4]])\n >>> B = coo_matrix([[5],[6]])\n >>> C = coo_matrix([[7]])\n >>> bmat( [[A,B],[None,C]] ).todense()\n matrix([[1, 2, 5],\n [3, 4, 6],\n [0, 0, 7]])\n\n >>> bmat( [[A,None],[None,C]] ).todense()\n matrix([[1, 2, 0],\n [3, 4, 0],\n [0, 0, 7]])\n\n \"\"\"\n\n blocks = np.asarray(blocks, dtype='object')\n\n if np.rank(blocks) != 2:\n raise ValueError('blocks must have rank 2')\n\n M,N = blocks.shape\n\n block_mask = np.zeros(blocks.shape, dtype=np.bool)\n brow_lengths = np.zeros(blocks.shape[0], dtype=np.intc)\n bcol_lengths = np.zeros(blocks.shape[1], dtype=np.intc)\n\n # convert everything to COO format\n for i in range(M):\n for j in range(N):\n if blocks[i,j] is not None:\n A = coo_matrix(blocks[i,j])\n blocks[i,j] = A\n block_mask[i,j] = True\n\n if brow_lengths[i] == 0:\n brow_lengths[i] = A.shape[0]\n else:\n if brow_lengths[i] != A.shape[0]:\n raise ValueError('blocks[%d,:] has incompatible row dimensions' % i)\n\n if bcol_lengths[j] == 0:\n bcol_lengths[j] = A.shape[1]\n else:\n if bcol_lengths[j] != A.shape[1]:\n raise ValueError('blocks[:,%d] has incompatible column dimensions' % j)\n\n\n # ensure that at least one value in each row and col is not None\n if brow_lengths.min() == 0:\n raise ValueError('blocks[%d,:] is all None' % brow_lengths.argmin() )\n if bcol_lengths.min() == 0:\n raise ValueError('blocks[:,%d] is all None' % bcol_lengths.argmin() )\n\n nnz = sum([ A.nnz for A in blocks[block_mask] ])\n if dtype is None:\n dtype = upcast( *tuple([A.dtype for A in blocks[block_mask]]) )\n\n row_offsets = np.concatenate(([0], np.cumsum(brow_lengths)))\n col_offsets = np.concatenate(([0], np.cumsum(bcol_lengths)))\n\n data = np.empty(nnz, dtype=dtype)\n row = np.empty(nnz, dtype=np.intc)\n col = np.empty(nnz, dtype=np.intc)\n\n nnz = 0\n for i in range(M):\n for j in range(N):\n if blocks[i,j] is not None:\n A = blocks[i,j]\n data[nnz:nnz + A.nnz] = A.data\n row[nnz:nnz + A.nnz] = A.row\n col[nnz:nnz + A.nnz] = A.col\n\n row[nnz:nnz + A.nnz] += row_offsets[i]\n col[nnz:nnz + A.nnz] += col_offsets[j]\n\n nnz += A.nnz\n\n shape = (np.sum(brow_lengths), np.sum(bcol_lengths))\n return coo_matrix((data, (row, col)), shape=shape).asformat(format)\n\ndef rand(m, n, density=0.01, format=\"coo\", dtype=None):\n \"\"\"Generate a sparse matrix of the given shape and density with uniformely\n distributed values.\n\n Parameters\n ----------\n m, n: int\n shape of the matrix\n density: real\n density of the generated matrix: density equal to one means a full\n matrix, density of 0 means a matrix with no non-zero items.\n format: str\n sparse matrix format.\n dtype: dtype\n type of the returned matrix values.\n\n Notes\n -----\n Only float types are supported for now.\n \"\"\"\n if density < 0 or density > 1:\n raise ValueError(\"density expected to be 0 <= density <= 1\")\n if dtype and not dtype in [np.float32, np.float64, np.longdouble]:\n raise NotImplementedError(\"type %s not supported\" % dtype)\n\n mn = m * n\n\n # XXX: sparse uses intc instead of intp...\n tp = np.intp\n if mn > np.iinfo(tp).max:\n msg = \"\"\"\\\nTrying to generate a random sparse matrix such as the product of dimensions is\ngreater than %d - this is not supported on this machine\n\"\"\"\n raise ValueError(msg % np.iinfo(tp).max)\n\n # Number of non zero values\n k = long(density * m * n)\n\n # Generate a few more values than k so that we can get unique values\n # afterwards.\n # XXX: one could be smarter here\n mlow = 5\n fac = 1.02\n gk = min(k + mlow, fac * k)\n\n def _gen_unique_rand(_gk):\n id = np.random.rand(_gk)\n return np.unique(np.floor(id * mn))[:k]\n\n id = _gen_unique_rand(gk)\n while id.size < k:\n gk *= 1.05\n id = _gen_unique_rand(gk)\n\n j = np.floor(id * 1. / m).astype(tp)\n i = (id - j * m).astype(tp)\n vals = np.random.rand(k).astype(dtype)\n return coo_matrix((vals, (i, j)), shape=(m, n)).asformat(format)\n", "#!/usr/bin/env python\n#\n# Created by: Pearu Peterson, April 2002\n#\n\n__usage__ = \"\"\"\nBuild linalg:\n python setup.py build\nRun tests if scipy is installed:\n python -c 'import scipy;scipy.linalg.test()'\n\"\"\"\n\nimport math\n\nimport numpy as np\nfrom numpy.testing import TestCase, run_module_suite, assert_equal, \\\n assert_almost_equal, assert_array_almost_equal\n\nfrom scipy.linalg import fblas, cblas, get_blas_funcs\n\ndef test_get_blas_funcs():\n # check that it returns Fortran code for arrays that are\n # fortran-ordered\n f1, f2, f3 = get_blas_funcs(\n ('axpy', 'axpy', 'axpy'),\n (np.empty((2,2), dtype=np.complex64, order='F'),\n np.empty((2,2), dtype=np.complex128, order='C'))\n )\n\n # get_blas_funcs will choose libraries depending on most generic\n # array\n assert_equal(f1.typecode, 'z')\n assert_equal(f1.module_name, 'cblas')\n assert_equal(f2.typecode, 'z')\n assert_equal(f2.module_name, 'cblas')\n\n # check defaults.\n f1 = get_blas_funcs('rotg')\n assert_equal(f1.typecode, 'd')\n\n # check also dtype interface\n f1 = get_blas_funcs('gemm', dtype=np.complex64)\n assert_equal(f1.typecode, 'c')\n f1 = get_blas_funcs('gemm', dtype='F')\n assert_equal(f1.typecode, 'c')\n\n # extended precision complex\n f1 = get_blas_funcs('gemm', dtype=np.longcomplex)\n assert_equal(f1.typecode, 'z')\n\ndef test_get_blas_funcs_alias():\n # check alias for get_blas_funcs\n f, g = get_blas_funcs(('nrm2', 'dot'), dtype=np.complex64)\n assert f.typecode == 'c'\n assert g.typecode == 'c'\n\n f, g, h = get_blas_funcs(('dot', 'dotc', 'dotu'), dtype=np.float64)\n assert f is g\n assert f is h\n\nclass TestCBLAS1Simple(TestCase):\n\n def test_axpy(self):\n for p in 'sd':\n f = getattr(cblas,p+'axpy',None)\n if f is None: continue\n assert_array_almost_equal(f(5,[1,2,3],[2,-1,3]),[7,9,18])\n for p in 'cz':\n f = getattr(cblas,p+'axpy',None)\n if f is None: continue\n assert_array_almost_equal(f(5,[1,2j,3],[2,-1,3]),[7,10j-1,18])\n\nclass TestFBLAS1Simple(TestCase):\n\n def test_axpy(self):\n for p in 'sd':\n f = getattr(fblas,p+'axpy',None)\n if f is None: continue\n assert_array_almost_equal(f([1,2,3],[2,-1,3],a=5),[7,9,18])\n for p in 'cz':\n f = getattr(fblas,p+'axpy',None)\n if f is None: continue\n assert_array_almost_equal(f([1,2j,3],[2,-1,3],a=5),[7,10j-1,18])\n\n def test_copy(self):\n for p in 'sd':\n f = getattr(fblas,p+'copy',None)\n if f is None: continue\n assert_array_almost_equal(f([3,4,5],[8]*3),[3,4,5])\n for p in 'cz':\n f = getattr(fblas,p+'copy',None)\n if f is None: continue\n assert_array_almost_equal(f([3,4j,5+3j],[8]*3),[3,4j,5+3j])\n\n def test_asum(self):\n for p in 'sd':\n f = getattr(fblas,p+'asum',None)\n if f is None: continue\n assert_almost_equal(f([3,-4,5]),12)\n for p in ['sc','dz']:\n f = getattr(fblas,p+'asum',None)\n if f is None: continue\n assert_almost_equal(f([3j,-4,3-4j]),14)\n\n def test_dot(self):\n for p in 'sd':\n f = getattr(fblas,p+'dot',None)\n if f is None: continue\n assert_almost_equal(f([3,-4,5],[2,5,1]),-9)\n\n def test_complex_dotu(self):\n for p in 'cz':\n f = getattr(fblas,p+'dotu',None)\n if f is None: continue\n assert_almost_equal(f([3j,-4,3-4j],[2,3,1]),-9+2j)\n\n def test_complex_dotc(self):\n for p in 'cz':\n f = getattr(fblas,p+'dotc',None)\n if f is None: continue\n assert_almost_equal(f([3j,-4,3-4j],[2,3j,1]),3-14j)\n\n def test_nrm2(self):\n for p in 'sd':\n f = getattr(fblas,p+'nrm2',None)\n if f is None: continue\n assert_almost_equal(f([3,-4,5]),math.sqrt(50))\n for p in ['c', 'z', 'sc','dz']:\n f = getattr(fblas,p+'nrm2',None)\n if f is None: continue\n assert_almost_equal(f([3j,-4,3-4j]),math.sqrt(50))\n\n def test_scal(self):\n for p in 'sd':\n f = getattr(fblas,p+'scal',None)\n if f is None: continue\n assert_array_almost_equal(f(2,[3,-4,5]),[6,-8,10])\n for p in 'cz':\n f = getattr(fblas,p+'scal',None)\n if f is None: continue\n assert_array_almost_equal(f(3j,[3j,-4,3-4j]),[-9,-12j,12+9j])\n for p in ['cs','zd']:\n f = getattr(fblas,p+'scal',None)\n if f is None: continue\n assert_array_almost_equal(f(3,[3j,-4,3-4j]),[9j,-12,9-12j])\n\n def test_swap(self):\n for p in 'sd':\n f = getattr(fblas,p+'swap',None)\n if f is None: continue\n x,y = [2,3,1],[-2,3,7]\n x1,y1 = f(x,y)\n assert_array_almost_equal(x1,y)\n assert_array_almost_equal(y1,x)\n for p in 'cz':\n f = getattr(fblas,p+'swap',None)\n if f is None: continue\n x,y = [2,3j,1],[-2,3,7-3j]\n x1,y1 = f(x,y)\n assert_array_almost_equal(x1,y)\n assert_array_almost_equal(y1,x)\n\n def test_amax(self):\n for p in 'sd':\n f = getattr(fblas,'i'+p+'amax')\n assert_equal(f([-2,4,3]),1)\n for p in 'cz':\n f = getattr(fblas,'i'+p+'amax')\n assert_equal(f([-5,4+3j,6]),1)\n #XXX: need tests for rot,rotm,rotg,rotmg\n\nclass TestFBLAS2Simple(TestCase):\n\n def test_gemv(self):\n for p in 'sd':\n f = getattr(fblas,p+'gemv',None)\n if f is None: continue\n assert_array_almost_equal(f(3,[[3]],[-4]),[-36])\n assert_array_almost_equal(f(3,[[3]],[-4],3,[5]),[-21])\n for p in 'cz':\n f = getattr(fblas,p+'gemv',None)\n if f is None: continue\n assert_array_almost_equal(f(3j,[[3-4j]],[-4]),[-48-36j])\n assert_array_almost_equal(f(3j,[[3-4j]],[-4],3,[5j]),[-48-21j])\n\n def test_ger(self):\n\n for p in 'sd':\n f = getattr(fblas,p+'ger',None)\n if f is None: continue\n assert_array_almost_equal(f(1,[1,\n 2],[3,4]),[[3,4],[6,8]])\n assert_array_almost_equal(f(2,[1,\n 2,\n 3],[3,4]),[[6,8],[12,16],[18,24]])\n\n assert_array_almost_equal(f(1,[1,\n 2],[3,4],\n a=[[1,2],[3,4]]\n ),[[4,6],[9,12]])\n\n for p in 'cz':\n f = getattr(fblas,p+'geru',None)\n if f is None: continue\n assert_array_almost_equal(f(1,[1j,\n 2],[3,4]),[[3j,4j],[6,8]])\n assert_array_almost_equal(f(-2,[1j,\n 2j,\n 3j],[3j,4j]),[[6,8],[12,16],[18,24]])\n\n for p in 'cz':\n for name in ('ger', 'gerc'):\n f = getattr(fblas,p+name,None)\n if f is None: continue\n assert_array_almost_equal(f(1,[1j,\n 2],[3,4]),[[3j,4j],[6,8]])\n assert_array_almost_equal(f(2,[1j,\n 2j,\n 3j],[3j,4j]),[[6,8],[12,16],[18,24]])\n\nclass TestFBLAS3Simple(TestCase):\n\n def test_gemm(self):\n for p in 'sd':\n f = getattr(fblas,p+'gemm',None)\n if f is None: continue\n assert_array_almost_equal(f(3,[3],[-4]),[[-36]])\n assert_array_almost_equal(f(3,[3],[-4],3,[5]),[-21])\n for p in 'cz':\n f = getattr(fblas,p+'gemm',None)\n if f is None: continue\n assert_array_almost_equal(f(3j,[3-4j],[-4]),[[-48-36j]])\n assert_array_almost_equal(f(3j,[3-4j],[-4],3,[5j]),[-48-21j])\n\nif __name__ == \"__main__\":\n run_module_suite()\n" ]
[ [ "numpy.rank", "numpy.asarray", "numpy.arange", "numpy.cumsum", "numpy.ones", "numpy.random.rand", "numpy.iinfo", "numpy.floor", "numpy.zeros", "numpy.sum", "numpy.empty" ], [ "numpy.testing.assert_equal", "scipy.linalg.get_blas_funcs", "numpy.testing.run_module_suite", "numpy.empty", "numpy.testing.assert_array_almost_equal" ] ]
aforren1/moderngl
[ "32fe79927e02b0fa893b3603d677bdae39771e14" ]
[ "tests/test_context.py" ]
[ "from unittest import TestCase\nimport moderngl\nimport numpy\nimport platform\n\n\nclass ContextTests(TestCase):\n\n def test_create_destroy(self):\n \"\"\"Create and destroy a context\"\"\"\n for _ in range(25):\n ctx = moderngl.create_context(standalone=True)\n ctx.release()\n\n def test_context_switch(self):\n \"\"\"Ensure context switching is working\"\"\"\n ctx1 = moderngl.create_context(standalone=True)\n ctx2 = moderngl.create_context(standalone=True)\n\n with ctx1 as ctx:\n buffer1 = ctx.buffer(reserve=1024)\n \n with ctx2 as ctx:\n buffer2 = ctx.buffer(reserve=1024)\n\n self.assertEqual(buffer1.glo, buffer2.glo)\n ctx1.release()\n ctx2.release()\n\n def test_exit(self):\n \"\"\"Ensure the previous context was activated on exit\"\"\"\n ctx1 = moderngl.create_context(standalone=True)\n ctx2 = moderngl.create_context(standalone=True)\n\n with ctx1 as ctx:\n ctx.buffer(reserve=1024)\n\n # Will error out if no context is active \"moderngl.error.Error: cannot create buffer\"\n ctx1.buffer(reserve=1024)\n\n ctx1.release()\n ctx2.release()\n\n def test_share(self):\n \"\"\"Create resources with shared context\"\"\"\n if platform.system().lower() in [\"darwin\", \"linux\"]:\n self.skipTest('Context sharing not supported on darwin')\n\n data1 = numpy.array([1, 2, 3, 4], dtype='u1')\n data2 = numpy.array([4, 3, 2, 1], dtype='u1')\n\n ctx1 = moderngl.create_context(standalone=True)\n ctx2 = moderngl.create_context(standalone=True, share=True)\n\n with ctx1 as ctx:\n b1 = ctx.buffer(data=data1)\n\n with ctx2 as ctx:\n b2 = ctx.buffer(data=data2)\n\n # Because the resources are shared the name should increment\n self.assertEqual(b1.glo, 1)\n self.assertEqual(b2.glo, 2)\n\n # Ensure we can read the same buffer data in both contexts\n with ctx1:\n self.assertEqual(b1.read(), b'\\x01\\x02\\x03\\x04')\n self.assertEqual(b2.read(), b'\\x04\\x03\\x02\\x01')\n\n with ctx2:\n self.assertEqual(b1.read(), b'\\x01\\x02\\x03\\x04')\n self.assertEqual(b2.read(), b'\\x04\\x03\\x02\\x01')\n\n ctx1.release()\n ctx2.release()\n\n def test_extensions(self):\n ctx = moderngl.create_context(standalone=True)\n # self.assertTrue(\"GL_ARB_vertex_array_object\" in ctx.extensions)\n # self.assertTrue(\"GL_ARB_transform_feedback2\" in ctx.extensions)\n # self.assertTrue(\"GL_ARB_shader_subroutine\" in ctx.extensions)\n self.assertIsInstance(ctx.extensions, set)\n self.assertTrue(len(ctx.extensions) > 0)\n ctx.release()\n\n def test_attributes(self):\n \"\"\"Ensure enums are present in the context instance\"\"\"\n ctx = moderngl.create_context(standalone=True)\n # Flags\n self.assertIsInstance(ctx.NOTHING, int)\n self.assertIsInstance(ctx.BLEND, int)\n self.assertIsInstance(ctx.DEPTH_TEST, int)\n self.assertIsInstance(ctx.CULL_FACE, int)\n self.assertIsInstance(ctx.RASTERIZER_DISCARD, int)\n self.assertIsInstance(ctx.PROGRAM_POINT_SIZE, int)\n\n # Primitive modes\n self.assertIsInstance(ctx.POINTS, int)\n self.assertIsInstance(ctx.LINES, int)\n self.assertIsInstance(ctx.LINE_LOOP, int)\n self.assertIsInstance(ctx.LINE_STRIP, int)\n self.assertIsInstance(ctx.TRIANGLES, int)\n self.assertIsInstance(ctx.TRIANGLE_STRIP, int)\n self.assertIsInstance(ctx.TRIANGLE_FAN, int)\n self.assertIsInstance(ctx.LINES_ADJACENCY, int)\n self.assertIsInstance(ctx.LINE_STRIP_ADJACENCY, int)\n self.assertIsInstance(ctx.TRIANGLES_ADJACENCY, int)\n self.assertIsInstance(ctx.TRIANGLE_STRIP_ADJACENCY, int)\n self.assertIsInstance(ctx.PATCHES, int)\n\n # Texture filters\n self.assertIsInstance(ctx.LINEAR, int)\n self.assertIsInstance(ctx.NEAREST, int)\n self.assertIsInstance(ctx.NEAREST_MIPMAP_NEAREST, int)\n self.assertIsInstance(ctx.LINEAR_MIPMAP_LINEAR, int)\n self.assertIsInstance(ctx.LINEAR_MIPMAP_NEAREST, int)\n self.assertIsInstance(ctx.NEAREST_MIPMAP_LINEAR, int)\n\n # Blend functions\n self.assertIsInstance(ctx.ZERO, int)\n self.assertIsInstance(ctx.ONE, int)\n self.assertIsInstance(ctx.SRC_COLOR, int)\n self.assertIsInstance(ctx.ONE_MINUS_SRC_COLOR, int)\n self.assertIsInstance(ctx.SRC_ALPHA, int)\n self.assertIsInstance(ctx.ONE_MINUS_SRC_ALPHA, int)\n self.assertIsInstance(ctx.DST_ALPHA, int)\n self.assertIsInstance(ctx.ONE_MINUS_DST_ALPHA, int)\n self.assertIsInstance(ctx.DST_COLOR, int)\n self.assertIsInstance(ctx.ONE_MINUS_DST_COLOR, int)\n\n # Blend shortcuts\n self.assertIsInstance(ctx.DEFAULT_BLENDING, tuple)\n self.assertIsInstance(ctx.ADDITIVE_BLENDING, tuple)\n self.assertIsInstance(ctx.PREMULTIPLIED_ALPHA, tuple)\n\n # Blend equations\n self.assertIsInstance(ctx.FUNC_ADD, int)\n self.assertIsInstance(ctx.FUNC_SUBTRACT, int)\n self.assertIsInstance(ctx.FUNC_REVERSE_SUBTRACT, int)\n self.assertIsInstance(ctx.MIN, int)\n self.assertIsInstance(ctx.MAX, int)\n\n # Provoking vertex\n self.assertIsInstance(ctx.FIRST_VERTEX_CONVENTION, int)\n self.assertIsInstance(ctx.LAST_VERTEX_CONVENTION, int)\n\n def test_enable_direct(self):\n ctx = moderngl.create_context(standalone=True)\n ctx.error # consume error during initialization\n # We already support this, but it's a safe value\n GL_PROGRAM_POINT_SIZE = 0x8642\n\n ctx.enable_direct(GL_PROGRAM_POINT_SIZE)\n self.assertEqual(ctx.error, \"GL_NO_ERROR\")\n\n ctx.disable_direct(GL_PROGRAM_POINT_SIZE)\n self.assertEqual(ctx.error, \"GL_NO_ERROR\")\n\n def test_info(self):\n ctx = moderngl.create_context(standalone=True)\n self.assertIsInstance(ctx.info, dict)\n self.assertTrue(len(ctx.info) > 50)\n self.assertTrue(ctx.info[\"GL_MAX_GEOMETRY_OUTPUT_VERTICES\"] >= 256)\n\n def test_polyon_offset(self):\n ctx = moderngl.create_context(standalone=True)\n ctx.polygon_offset = 0.0, 1.0\n self.assertEqual(ctx.polygon_offset, (0.0, 1.0))\n ctx.polygon_offset = 1.0, 0.0\n self.assertEqual(ctx.polygon_offset, (1.0, 0.0))\n ctx.polygon_offset = -1.0, -1.0\n self.assertEqual(ctx.polygon_offset, (-1.0, -1.0))\n" ]
[ [ "numpy.array" ] ]
Pistachio504/openpilot
[ "2bb5df8ff722918ccc6361a8f588f8447a9cce36" ]
[ "tools/sim/bridge.py" ]
[ "#!/usr/bin/env python3\nimport argparse\nimport math\nimport os\nimport signal\nimport threading\nimport time\nfrom multiprocessing import Process, Queue\nfrom typing import Any\n\nimport carla # pylint: disable=import-error\nimport numpy as np\nimport pyopencl as cl\nimport pyopencl.array as cl_array\n\nimport cereal.messaging as messaging\nfrom cereal import log\nfrom cereal.visionipc.visionipc_pyx import VisionIpcServer, VisionStreamType # pylint: disable=no-name-in-module, import-error\nfrom common.basedir import BASEDIR\nfrom common.numpy_fast import clip\nfrom common.params import Params\nfrom common.realtime import DT_DMON, Ratekeeper\nfrom selfdrive.car.honda.values import CruiseButtons\nfrom selfdrive.test.helpers import set_params_enabled\nfrom tools.sim.lib.can import can_function\n\nW, H = 1928, 1208\nREPEAT_COUNTER = 5\nPRINT_DECIMATION = 100\nSTEER_RATIO = 15.\n\npm = messaging.PubMaster(['roadCameraState', 'wideRoadCameraState', 'sensorEvents', 'can', \"gpsLocationExternal\"])\nsm = messaging.SubMaster(['carControl', 'controlsState'])\n\ndef parse_args(add_args=None):\n parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')\n parser.add_argument('--joystick', action='store_true')\n parser.add_argument('--high_quality', action='store_true')\n parser.add_argument('--dual_camera', action='store_true')\n parser.add_argument('--town', type=str, default='Town04_Opt')\n parser.add_argument('--spawn_point', dest='num_selected_spawn_point', type=int, default=16)\n\n return parser.parse_args(add_args)\n\n\nclass VehicleState:\n def __init__(self):\n self.speed = 0.0\n self.angle = 0.0\n self.bearing_deg = 0.0\n self.vel = carla.Vector3D()\n self.cruise_button = 0\n self.is_engaged = False\n self.ignition = True\n\n\ndef steer_rate_limit(old, new):\n # Rate limiting to 0.5 degrees per step\n limit = 0.5\n if new > old + limit:\n return old + limit\n elif new < old - limit:\n return old - limit\n else:\n return new\n\n\nclass Camerad:\n def __init__(self):\n self.frame_road_id = 0\n self.frame_wide_id = 0\n self.vipc_server = VisionIpcServer(\"camerad\")\n\n # TODO: remove RGB buffers once the last RGB vipc subscriber is removed\n self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_RGB_ROAD, 4, True, W, H)\n self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD, 5, False, W, H)\n\n self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_RGB_WIDE_ROAD, 4, True, W, H)\n self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_WIDE_ROAD, 5, False, W, H)\n self.vipc_server.start_listener()\n\n # set up for pyopencl rgb to yuv conversion\n self.ctx = cl.create_some_context()\n self.queue = cl.CommandQueue(self.ctx)\n cl_arg = f\" -DHEIGHT={H} -DWIDTH={W} -DRGB_STRIDE={W * 3} -DUV_WIDTH={W // 2} -DUV_HEIGHT={H // 2} -DRGB_SIZE={W * H} -DCL_DEBUG \"\n\n # TODO: move rgb_to_yuv.cl to local dir once the frame stream camera is removed\n kernel_fn = os.path.join(BASEDIR, \"selfdrive\", \"camerad\", \"transforms\", \"rgb_to_yuv.cl\")\n with open(kernel_fn) as f:\n prg = cl.Program(self.ctx, f.read()).build(cl_arg)\n self.krnl = prg.rgb_to_yuv\n self.Wdiv4 = W // 4 if (W % 4 == 0) else (W + (4 - W % 4)) // 4\n self.Hdiv4 = H // 4 if (H % 4 == 0) else (H + (4 - H % 4)) // 4\n\n def cam_callback_road(self, image):\n self._cam_callback(image, self.frame_road_id, 'roadCameraState',\n VisionStreamType.VISION_STREAM_RGB_ROAD, VisionStreamType.VISION_STREAM_ROAD)\n self.frame_road_id += 1\n\n def cam_callback_wide_road(self, image):\n self._cam_callback(image, self.frame_wide_id, 'wideRoadCameraState',\n VisionStreamType.VISION_STREAM_RGB_WIDE_ROAD, VisionStreamType.VISION_STREAM_WIDE_ROAD)\n self.frame_wide_id += 1\n\n def _cam_callback(self, image, frame_id, pub_type, rgb_type, yuv_type):\n img = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\n img = np.reshape(img, (H, W, 4))\n img = img[:, :, [0, 1, 2]].copy()\n\n # convert RGB frame to YUV\n rgb = np.reshape(img, (H, W * 3))\n rgb_cl = cl_array.to_device(self.queue, rgb)\n yuv_cl = cl_array.empty_like(rgb_cl)\n self.krnl(self.queue, (np.int32(self.Wdiv4), np.int32(self.Hdiv4)), None, rgb_cl.data, yuv_cl.data).wait()\n yuv = np.resize(yuv_cl.get(), rgb.size // 2)\n eof = int(frame_id * 0.05 * 1e9)\n\n # TODO: remove RGB send once the last RGB vipc subscriber is removed\n self.vipc_server.send(rgb_type, img.tobytes(), frame_id, eof, eof)\n self.vipc_server.send(yuv_type, yuv.data.tobytes(), frame_id, eof, eof)\n\n dat = messaging.new_message(pub_type)\n msg = {\n \"frameId\": frame_id,\n \"transform\": [1.0, 0.0, 0.0,\n 0.0, 1.0, 0.0,\n 0.0, 0.0, 1.0]\n }\n setattr(dat, pub_type, msg)\n pm.send(pub_type, dat)\n\ndef imu_callback(imu, vehicle_state):\n vehicle_state.bearing_deg = math.degrees(imu.compass)\n dat = messaging.new_message('sensorEvents', 2)\n dat.sensorEvents[0].sensor = 4\n dat.sensorEvents[0].type = 0x10\n dat.sensorEvents[0].init('acceleration')\n dat.sensorEvents[0].acceleration.v = [imu.accelerometer.x, imu.accelerometer.y, imu.accelerometer.z]\n # copied these numbers from locationd\n dat.sensorEvents[1].sensor = 5\n dat.sensorEvents[1].type = 0x10\n dat.sensorEvents[1].init('gyroUncalibrated')\n dat.sensorEvents[1].gyroUncalibrated.v = [imu.gyroscope.x, imu.gyroscope.y, imu.gyroscope.z]\n pm.send('sensorEvents', dat)\n\n\ndef panda_state_function(vs: VehicleState, exit_event: threading.Event):\n pm = messaging.PubMaster(['pandaStates'])\n while not exit_event.is_set():\n dat = messaging.new_message('pandaStates', 1)\n dat.valid = True\n dat.pandaStates[0] = {\n 'ignitionLine': vs.ignition,\n 'pandaType': \"blackPanda\",\n 'controlsAllowed': True,\n 'safetyModel': 'hondaNidec'\n }\n pm.send('pandaStates', dat)\n time.sleep(0.5)\n\n\ndef peripheral_state_function(exit_event: threading.Event):\n pm = messaging.PubMaster(['peripheralState'])\n while not exit_event.is_set():\n dat = messaging.new_message('peripheralState')\n dat.valid = True\n # fake peripheral state data\n dat.peripheralState = {\n 'pandaType': log.PandaState.PandaType.blackPanda,\n 'voltage': 12000,\n 'current': 5678,\n 'fanSpeedRpm': 1000\n }\n pm.send('peripheralState', dat)\n time.sleep(0.5)\n\n\ndef gps_callback(gps, vehicle_state):\n dat = messaging.new_message('gpsLocationExternal')\n\n # transform vel from carla to NED\n # north is -Y in CARLA\n velNED = [\n -vehicle_state.vel.y, # north/south component of NED is negative when moving south\n vehicle_state.vel.x, # positive when moving east, which is x in carla\n vehicle_state.vel.z,\n ]\n\n dat.gpsLocationExternal = {\n \"timestamp\": int(time.time() * 1000),\n \"flags\": 1, # valid fix\n \"accuracy\": 1.0,\n \"verticalAccuracy\": 1.0,\n \"speedAccuracy\": 0.1,\n \"bearingAccuracyDeg\": 0.1,\n \"vNED\": velNED,\n \"bearingDeg\": vehicle_state.bearing_deg,\n \"latitude\": gps.latitude,\n \"longitude\": gps.longitude,\n \"altitude\": gps.altitude,\n \"speed\": vehicle_state.speed,\n \"source\": log.GpsLocationData.SensorSource.ublox,\n }\n\n pm.send('gpsLocationExternal', dat)\n\n\ndef fake_driver_monitoring(exit_event: threading.Event):\n pm = messaging.PubMaster(['driverState', 'driverMonitoringState'])\n while not exit_event.is_set():\n # dmonitoringmodeld output\n dat = messaging.new_message('driverState')\n dat.driverState.faceProb = 1.0\n pm.send('driverState', dat)\n\n # dmonitoringd output\n dat = messaging.new_message('driverMonitoringState')\n dat.driverMonitoringState = {\n \"faceDetected\": True,\n \"isDistracted\": False,\n \"awarenessStatus\": 1.,\n }\n pm.send('driverMonitoringState', dat)\n\n time.sleep(DT_DMON)\n\n\ndef can_function_runner(vs: VehicleState, exit_event: threading.Event):\n i = 0\n while not exit_event.is_set():\n can_function(pm, vs.speed, vs.angle, i, vs.cruise_button, vs.is_engaged)\n time.sleep(0.01)\n i += 1\n\n\ndef connect_carla_client():\n client = carla.Client(\"127.0.0.1\", 2000)\n client.set_timeout(5)\n return client\n\n\nclass CarlaBridge:\n\n def __init__(self, arguments):\n set_params_enabled()\n\n msg = messaging.new_message('liveCalibration')\n msg.liveCalibration.validBlocks = 20\n msg.liveCalibration.rpyCalib = [0.0, 0.0, 0.0]\n Params().put(\"CalibrationParams\", msg.to_bytes())\n Params().put_bool(\"WideCameraOnly\", not arguments.dual_camera)\n\n self._args = arguments\n self._carla_objects = []\n self._camerad = None\n self._exit_event = threading.Event()\n self._threads = []\n self._keep_alive = True\n self.started = False\n signal.signal(signal.SIGTERM, self._on_shutdown)\n self._exit = threading.Event()\n\n def _on_shutdown(self, signal, frame):\n self._keep_alive = False\n\n def bridge_keep_alive(self, q: Queue, retries: int):\n try:\n while self._keep_alive:\n try:\n self._run(q)\n break\n except RuntimeError as e:\n self.close()\n if retries == 0:\n raise\n\n # Reset for another try\n self._carla_objects = []\n self._threads = []\n self._exit_event = threading.Event()\n\n retries -= 1\n if retries <= -1:\n print(f\"Restarting bridge. Error: {e} \")\n else:\n print(f\"Restarting bridge. Retries left {retries}. Error: {e} \")\n finally:\n # Clean up resources in the opposite order they were created.\n self.close()\n\n def _run(self, q: Queue):\n client = connect_carla_client()\n world = client.load_world(self._args.town)\n\n settings = world.get_settings()\n settings.synchronous_mode = True # Enables synchronous mode\n settings.fixed_delta_seconds = 0.05\n world.apply_settings(settings)\n\n world.set_weather(carla.WeatherParameters.ClearSunset)\n\n if not self._args.high_quality:\n world.unload_map_layer(carla.MapLayer.Foliage)\n world.unload_map_layer(carla.MapLayer.Buildings)\n world.unload_map_layer(carla.MapLayer.ParkedVehicles)\n world.unload_map_layer(carla.MapLayer.Props)\n world.unload_map_layer(carla.MapLayer.StreetLights)\n world.unload_map_layer(carla.MapLayer.Particles)\n\n blueprint_library = world.get_blueprint_library()\n\n world_map = world.get_map()\n\n vehicle_bp = blueprint_library.filter('vehicle.tesla.*')[1]\n spawn_points = world_map.get_spawn_points()\n assert len(spawn_points) > self._args.num_selected_spawn_point, f'''No spawn point {self._args.num_selected_spawn_point}, try a value between 0 and\n {len(spawn_points)} for this town.'''\n spawn_point = spawn_points[self._args.num_selected_spawn_point]\n vehicle = world.spawn_actor(vehicle_bp, spawn_point)\n self._carla_objects.append(vehicle)\n max_steer_angle = vehicle.get_physics_control().wheels[0].max_steer_angle\n\n # make tires less slippery\n # wheel_control = carla.WheelPhysicsControl(tire_friction=5)\n physics_control = vehicle.get_physics_control()\n physics_control.mass = 2326\n # physics_control.wheels = [wheel_control]*4\n physics_control.torque_curve = [[20.0, 500.0], [5000.0, 500.0]]\n physics_control.gear_switch_time = 0.0\n vehicle.apply_physics_control(physics_control)\n\n transform = carla.Transform(carla.Location(x=0.8, z=1.13))\n\n def create_camera(fov, callback):\n blueprint = blueprint_library.find('sensor.camera.rgb')\n blueprint.set_attribute('image_size_x', str(W))\n blueprint.set_attribute('image_size_y', str(H))\n blueprint.set_attribute('fov', str(fov))\n if not self._args.high_quality:\n blueprint.set_attribute('enable_postprocess_effects', 'False')\n camera = world.spawn_actor(blueprint, transform, attach_to=vehicle)\n camera.listen(callback)\n return camera\n\n self._camerad = Camerad()\n\n if self._args.dual_camera:\n road_camera = create_camera(fov=40, callback=self._camerad.cam_callback_road)\n self._carla_objects.append(road_camera)\n\n road_wide_camera = create_camera(fov=120, callback=self._camerad.cam_callback_wide_road) # fov bigger than 120 shows unwanted artifacts\n self._carla_objects.append(road_wide_camera)\n\n vehicle_state = VehicleState()\n\n # reenable IMU\n imu_bp = blueprint_library.find('sensor.other.imu')\n imu = world.spawn_actor(imu_bp, transform, attach_to=vehicle)\n imu.listen(lambda imu: imu_callback(imu, vehicle_state))\n\n gps_bp = blueprint_library.find('sensor.other.gnss')\n gps = world.spawn_actor(gps_bp, transform, attach_to=vehicle)\n gps.listen(lambda gps: gps_callback(gps, vehicle_state))\n\n self._carla_objects.extend([imu, gps])\n # launch fake car threads\n self._threads.append(threading.Thread(target=panda_state_function, args=(vehicle_state, self._exit_event,)))\n self._threads.append(threading.Thread(target=peripheral_state_function, args=(self._exit_event,)))\n self._threads.append(threading.Thread(target=fake_driver_monitoring, args=(self._exit_event,)))\n self._threads.append(threading.Thread(target=can_function_runner, args=(vehicle_state, self._exit_event,)))\n for t in self._threads:\n t.start()\n\n # init\n throttle_ease_out_counter = REPEAT_COUNTER\n brake_ease_out_counter = REPEAT_COUNTER\n steer_ease_out_counter = REPEAT_COUNTER\n\n vc = carla.VehicleControl(throttle=0, steer=0, brake=0, reverse=False)\n\n is_openpilot_engaged = False\n throttle_out = steer_out = brake_out = 0.\n throttle_op = steer_op = brake_op = 0.\n throttle_manual = steer_manual = brake_manual = 0.\n\n old_steer = old_brake = old_throttle = 0.\n throttle_manual_multiplier = 0.7 # keyboard signal is always 1\n brake_manual_multiplier = 0.7 # keyboard signal is always 1\n steer_manual_multiplier = 45 * STEER_RATIO # keyboard signal is always 1\n\n # Simulation tends to be slow in the initial steps. This prevents lagging later\n for _ in range(20):\n world.tick()\n\n # loop\n rk = Ratekeeper(100, print_delay_threshold=0.05)\n\n while self._keep_alive:\n # 1. Read the throttle, steer and brake from op or manual controls\n # 2. Set instructions in Carla\n # 3. Send current carstate to op via can\n\n cruise_button = 0\n throttle_out = steer_out = brake_out = 0.0\n throttle_op = steer_op = brake_op = 0.0\n throttle_manual = steer_manual = brake_manual = 0.0\n\n # --------------Step 1-------------------------------\n if not q.empty():\n message = q.get()\n m = message.split('_')\n if m[0] == \"steer\":\n steer_manual = float(m[1])\n is_openpilot_engaged = False\n elif m[0] == \"throttle\":\n throttle_manual = float(m[1])\n is_openpilot_engaged = False\n elif m[0] == \"brake\":\n brake_manual = float(m[1])\n is_openpilot_engaged = False\n elif m[0] == \"reverse\":\n cruise_button = CruiseButtons.CANCEL\n is_openpilot_engaged = False\n elif m[0] == \"cruise\":\n if m[1] == \"down\":\n cruise_button = CruiseButtons.DECEL_SET\n is_openpilot_engaged = True\n elif m[1] == \"up\":\n cruise_button = CruiseButtons.RES_ACCEL\n is_openpilot_engaged = True\n elif m[1] == \"cancel\":\n cruise_button = CruiseButtons.CANCEL\n is_openpilot_engaged = False\n elif m[0] == \"ignition\":\n vehicle_state.ignition = not vehicle_state.ignition\n elif m[0] == \"quit\":\n break\n\n throttle_out = throttle_manual * throttle_manual_multiplier\n steer_out = steer_manual * steer_manual_multiplier\n brake_out = brake_manual * brake_manual_multiplier\n\n old_steer = steer_out\n old_throttle = throttle_out\n old_brake = brake_out\n\n if is_openpilot_engaged:\n sm.update(0)\n\n # TODO gas and brake is deprecated\n throttle_op = clip(sm['carControl'].actuators.accel / 1.6, 0.0, 1.0)\n brake_op = clip(-sm['carControl'].actuators.accel / 4.0, 0.0, 1.0)\n steer_op = sm['carControl'].actuators.steeringAngleDeg\n\n throttle_out = throttle_op\n steer_out = steer_op\n brake_out = brake_op\n\n steer_out = steer_rate_limit(old_steer, steer_out)\n old_steer = steer_out\n\n else:\n if throttle_out == 0 and old_throttle > 0:\n if throttle_ease_out_counter > 0:\n throttle_out = old_throttle\n throttle_ease_out_counter += -1\n else:\n throttle_ease_out_counter = REPEAT_COUNTER\n old_throttle = 0\n\n if brake_out == 0 and old_brake > 0:\n if brake_ease_out_counter > 0:\n brake_out = old_brake\n brake_ease_out_counter += -1\n else:\n brake_ease_out_counter = REPEAT_COUNTER\n old_brake = 0\n\n if steer_out == 0 and old_steer != 0:\n if steer_ease_out_counter > 0:\n steer_out = old_steer\n steer_ease_out_counter += -1\n else:\n steer_ease_out_counter = REPEAT_COUNTER\n old_steer = 0\n\n # --------------Step 2-------------------------------\n steer_carla = steer_out / (max_steer_angle * STEER_RATIO * -1)\n\n steer_carla = np.clip(steer_carla, -1, 1)\n steer_out = steer_carla * (max_steer_angle * STEER_RATIO * -1)\n old_steer = steer_carla * (max_steer_angle * STEER_RATIO * -1)\n\n vc.throttle = throttle_out / 0.6\n vc.steer = steer_carla\n vc.brake = brake_out\n vehicle.apply_control(vc)\n\n # --------------Step 3-------------------------------\n vel = vehicle.get_velocity()\n speed = math.sqrt(vel.x ** 2 + vel.y ** 2 + vel.z ** 2) # in m/s\n vehicle_state.speed = speed\n vehicle_state.vel = vel\n vehicle_state.angle = steer_out\n vehicle_state.cruise_button = cruise_button\n vehicle_state.is_engaged = is_openpilot_engaged\n\n if rk.frame % PRINT_DECIMATION == 0:\n print(\"frame: \", \"engaged:\", is_openpilot_engaged, \"; throttle: \", round(vc.throttle, 3), \"; steer(c/deg): \",\n round(vc.steer, 3), round(steer_out, 3), \"; brake: \", round(vc.brake, 3))\n\n if rk.frame % 5 == 0:\n world.tick()\n rk.keep_time()\n self.started = True\n\n def close(self):\n self.started = False\n self._exit_event.set()\n\n for s in self._carla_objects:\n try:\n s.destroy()\n except Exception as e:\n print(\"Failed to destroy carla object\", e)\n for t in reversed(self._threads):\n t.join()\n\n def run(self, queue, retries=-1):\n bridge_p = Process(target=self.bridge_keep_alive, args=(queue, retries), daemon=True)\n bridge_p.start()\n return bridge_p\n\n\nif __name__ == \"__main__\":\n q: Any = Queue()\n args = parse_args()\n\n try:\n carla_bridge = CarlaBridge(args)\n p = carla_bridge.run(q)\n\n if args.joystick:\n # start input poll for joystick\n from tools.sim.lib.manual_ctrl import wheel_poll_thread\n\n wheel_poll_thread(q)\n else:\n # start input poll for keyboard\n from tools.sim.lib.keyboard_ctrl import keyboard_poll_thread\n\n keyboard_poll_thread(q)\n p.join()\n\n finally:\n # Try cleaning up the wide camera param\n # in case users want to use replay after\n Params().delete(\"WideCameraOnly\")\n" ]
[ [ "numpy.reshape", "numpy.int32", "numpy.dtype", "numpy.clip" ] ]
Linxxx/text-classification-cnn-rnn
[ "2e5d50dd44488068716066b97f5795f85b02e204" ]
[ "run_cnn.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nfrom datetime import timedelta\n\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn import metrics\n\nfrom cnn_model import TCNNConfig, TextCNN\nfrom data.cnews_loader import read_vocab, read_category, batch_iter, process_file, build_vocab\n\nbase_dir = 'data'\ntrain_dir = os.path.join(base_dir, 'train.txt')\ntest_dir = os.path.join(base_dir, 'test.txt')\nval_dir = os.path.join(base_dir, 'test.txt')\nvocab_dir = os.path.join(base_dir, 'vocab.txt')\n\nsave_dir = 'checkpoints/textcnn'\nsave_path = os.path.join(save_dir, 'best_validation') # 最佳验证结果保存路径\n\n\ndef get_time_dif(start_time):\n \"\"\"获取已使用时间\"\"\"\n end_time = time.time()\n time_dif = end_time - start_time\n return timedelta(seconds=int(round(time_dif)))\n\n\ndef feed_data(x_batch, y_batch, keep_prob):\n feed_dict = {\n model.input_x: x_batch,\n model.input_y: y_batch,\n model.keep_prob: keep_prob\n }\n return feed_dict\n\n\ndef evaluate(sess, x_, y_):\n \"\"\"评估在某一数据上的准确率和损失\"\"\"\n data_len = len(x_)\n batch_eval = batch_iter(x_, y_, 128)\n total_loss = 0.0\n total_acc = 0.0\n for x_batch, y_batch in batch_eval:\n batch_len = len(x_batch)\n feed_dict = feed_data(x_batch, y_batch, 1.0)\n loss, acc = sess.run([model.loss, model.acc], feed_dict=feed_dict)\n total_loss += loss * batch_len\n total_acc += acc * batch_len\n\n return total_loss / data_len, total_acc / data_len\n\n\ndef train():\n print(\"Configuring TensorBoard and Saver...\")\n # 配置 Tensorboard,重新训练时,请将tensorboard文件夹删除,不然图会覆盖\n tensorboard_dir = 'tensorboard/textcnn'\n if not os.path.exists(tensorboard_dir):\n os.makedirs(tensorboard_dir)\n\n tf.summary.scalar(\"loss\", model.loss)\n tf.summary.scalar(\"accuracy\", model.acc)\n merged_summary = tf.summary.merge_all()\n writer = tf.summary.FileWriter(tensorboard_dir)\n\n # 配置 Saver\n saver = tf.train.Saver()\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n print(\"Loading training and validation data...\")\n # 载入训练集与验证集\n start_time = time.time()\n x_train, y_train = process_file(train_dir, word_to_id, cat_to_id, config.seq_length)\n x_val, y_val = process_file(val_dir, word_to_id, cat_to_id, config.seq_length)\n time_dif = get_time_dif(start_time)\n print(\"Time usage:\", time_dif)\n\n # 创建session\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n writer.add_graph(session.graph)\n\n print('Training and evaluating...')\n start_time = time.time()\n total_batch = 0 # 总批次\n best_acc_val = 0.0 # 最佳验证集准确率\n last_improved = 0 # 记录上一次提升批次\n require_improvement = 1000 # 如果超过1000轮未提升,提前结束训练\n\n flag = False\n for epoch in range(config.num_epochs):\n print('Epoch:', epoch + 1)\n batch_train = batch_iter(x_train, y_train, config.batch_size)\n for x_batch, y_batch in batch_train:\n feed_dict = feed_data(x_batch, y_batch, config.dropout_keep_prob)\n\n if total_batch % config.save_per_batch == 0:\n # 每多少轮次将训练结果写入tensorboard scalar\n s = session.run(merged_summary, feed_dict=feed_dict)\n writer.add_summary(s, total_batch)\n\n if total_batch % config.print_per_batch == 0:\n # 每多少轮次输出在训练集和验证集上的性能\n feed_dict[model.keep_prob] = 1.0\n loss_train, acc_train = session.run([model.loss, model.acc], feed_dict=feed_dict)\n loss_val, acc_val = evaluate(session, x_val, y_val) # todo\n\n if acc_val > best_acc_val:\n # 保存最好结果\n best_acc_val = acc_val\n last_improved = total_batch\n saver.save(sess=session, save_path=save_path)\n improved_str = '*'\n else:\n improved_str = ''\n\n time_dif = get_time_dif(start_time)\n msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' \\\n + ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6}'\n print(msg.format(total_batch, loss_train, acc_train, loss_val, acc_val, time_dif, improved_str))\n\n session.run(model.optim, feed_dict=feed_dict) # 运行优化\n total_batch += 1\n\n if total_batch - last_improved > require_improvement:\n # 验证集正确率长期不提升,提前结束训练\n print(\"No optimization for a long time, auto-stopping...\")\n flag = True\n break # 跳出循环\n if flag: # 同上\n break\n\n\ndef modeltest():\n print(\"Loading test data...\")\n start_time = time.time()\n x_test, y_test = process_file(test_dir, word_to_id, cat_to_id, config.seq_length)\n\n session = tf.Session()\n session.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(sess=session, save_path=save_path) # 读取保存的模型\n\n print('Testing...')\n loss_test, acc_test = evaluate(session, x_test, y_test)\n msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}'\n print(msg.format(loss_test, acc_test))\n\n batch_size = 128\n data_len = len(x_test)\n num_batch = int((data_len - 1) / batch_size) + 1\n\n y_test_cls = np.argmax(y_test, 1)\n y_pred_cls = np.zeros(shape=len(x_test), dtype=np.int32) # 保存预测结果\n for i in range(num_batch): # 逐批次处理\n start_id = i * batch_size\n end_id = min((i + 1) * batch_size, data_len)\n feed_dict = {\n model.input_x: x_test[start_id:end_id],\n model.keep_prob: 1.0\n }\n y_pred_cls[start_id:end_id] = session.run(model.y_pred_cls, feed_dict=feed_dict)\n\n # 评估\n print(\"Precision, Recall and F1-Score...\")\n print(metrics.classification_report(y_test_cls, y_pred_cls, target_names=categories))\n\n # 混淆矩阵\n print(\"Confusion Matrix...\")\n cm = metrics.confusion_matrix(y_test_cls, y_pred_cls)\n print(cm)\n\n time_dif = get_time_dif(start_time)\n print(\"Time usage:\", time_dif)\n\n\nif __name__ == '__main__':\n # if len(sys.argv) != 2 or sys.argv[1] not in ['train', 'test']:\n # raise ValueError(\"\"\"usage: python run_cnn.py [train / test]\"\"\")\n\n print('Configuring CNN model...')\n config = TCNNConfig()\n if not os.path.exists(vocab_dir): # 如果不存在词汇表,重建\n build_vocab(train_dir, vocab_dir, config.vocab_size)\n categories, cat_to_id = read_category()\n words, word_to_id = read_vocab(vocab_dir)\n config.vocab_size = len(words)\n model = TextCNN(config)\n train()\n modeltest()\n\n # if sys.argv[1] == 'train':\n # train()\n # else:\n # test()\n" ]
[ [ "tensorflow.summary.FileWriter", "sklearn.metrics.confusion_matrix", "sklearn.metrics.classification_report", "tensorflow.global_variables_initializer", "numpy.argmax", "tensorflow.summary.merge_all", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.summary.scalar" ] ]
mohyunho/ENAS_CWRU
[ "1bf3bd76a5d80eea39305ce67f8f8ac85eb85a26" ]
[ "fd_network.py" ]
[ "import time\nimport json\nimport logging as log\nimport sys\n\nimport os\nimport math\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport random\nimport importlib\nfrom scipy.stats import randint, expon, uniform\n\nimport sklearn as sk\nfrom sklearn import svm\nfrom sklearn.utils import shuffle\nfrom sklearn import metrics\nfrom sklearn import preprocessing\nfrom sklearn import pipeline\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import mean_squared_error\n\nfrom math import sqrt\n# import keras\nimport tensorflow as tf\n\nprint(tf.__version__)\n\n# import keras.backend as K\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras import backend\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.models import Sequential, load_model, Model\nfrom tensorflow.keras.layers import Input, Dense, Flatten, Dropout, Embedding\nfrom tensorflow.keras.layers import BatchNormalization, Activation, LSTM, TimeDistributed\nfrom tensorflow.keras.layers import Conv1D\nfrom tensorflow.keras.layers import MaxPooling1D\nfrom tensorflow.keras.layers import concatenate\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler\n\nnp.random.seed(0)\ntf.random.set_seed(0)\n\n\ndef scheduler(epoch, lr):\n if epoch == 200:\n return lr * 0.1\n\n else:\n return lr\n\n# def scheduler(epoch, lr):\n# return lr\n\n\n\ndef gen_net(vec_len, num_hidden1, num_hidden2):\n '''\n TODO: Generate and evaluate any CNN instead of MLPs\n :param vec_len:\n :param num_hidden1:\n :param num_hidden2:\n :return:\n '''\n\n model = Sequential()\n model.add(Dense(num_hidden1, activation='relu', input_shape=(vec_len,)))\n model.add(Dense(num_hidden2, activation='relu'))\n model.add(Dense(10, activation='softmax'))\n # model.add(Dense(10, activation='sigmoid'))\n\n return model\n\n\nclass network_fit(object):\n '''\n class for network\n '''\n\n def __init__(self, train_samples, label_array_train, test_samples, label_array_test,\n model_path, n_hidden1=100, n_hidden2=10, verbose=2):\n '''\n Constructor\n Generate a NN and train\n @param none\n '''\n # self.__logger = logging.getLogger('data preparation for using it as the network input')\n self.train_samples = train_samples\n self.label_array_train = label_array_train\n self.test_samples = test_samples\n self.label_array_test = label_array_test\n self.n_hidden1 = n_hidden1\n self.n_hidden2 = n_hidden2\n self.model_path = model_path\n self.verbose = verbose\n\n self.mlps = gen_net(self.train_samples.shape[1], self.n_hidden1, self.n_hidden2)\n\n def train_net(self, epochs=500, batch_size=500, lr=1e-05, plotting=True):\n '''\n specify the optimizers and train the network\n :param epochs:\n :param batch_size:\n :param lr:\n :return:\n '''\n print(\"Initializing network...\")\n # compile the model\n rp = optimizers.RMSprop(learning_rate=lr, rho=0.9, centered=True)\n adm = optimizers.Adam(learning_rate=lr, epsilon=1)\n sgd_m = optimizers.SGD(learning_rate=lr)\n adam = optimizers.Adam(lr=0.0001)\n\n lr_scheduler = LearningRateScheduler(scheduler)\n\n keras_rmse = tf.keras.metrics.RootMeanSquaredError()\n self.mlps.compile(loss='categorical_crossentropy', optimizer=adam, metrics=[\"accuracy\"])\n # print(self.mlps.summary())\n # print (\"self.train_samples.shape\", self.train_samples.shape)\n # print (\"self.label_array_train.shape\", self.label_array_train.shape)\n\n # Train the model\n history = self.mlps.fit(self.train_samples, self.label_array_train, epochs=epochs, batch_size=batch_size,\n validation_split=0.3, verbose=0,\n callbacks=[lr_scheduler,\n EarlyStopping(monitor='val_loss', min_delta=0, patience=100,\n verbose=0, mode='min'),\n ModelCheckpoint(self.model_path, monitor='val_loss',\n save_best_only=True, mode='min',\n verbose=0)])\n\n # print(history.history.keys())\n val_rmse_k = history.history['val_loss']\n val_rmse_min = min(val_rmse_k)\n min_val_rmse_idx = val_rmse_k.index(min(val_rmse_k))\n stop_epoch = min_val_rmse_idx + 1\n val_loss_min = round(val_rmse_min, 4)\n print(\"val_loss_min: \", val_loss_min)\n\n val_acc_k = history.history['val_accuracy']\n val_acc_max = val_acc_k[min_val_rmse_idx]\n val_acc_max = round(val_acc_max, 4)\n print(\"val_acc_max: \", val_acc_max)\n\n fitness_net = (val_loss_min,)\n\n trained_net = self.mlps\n\n ## Plot training & validation loss about epochs\n # if plotting == True:\n # # summarize history for Loss\n # fig_acc = plt.figure(figsize=(10, 10))\n # plt.plot(history.history['loss'])\n # plt.plot(history.history['val_loss'])\n # plt.title('model loss')\n # plt.ylabel('loss')\n # # plt.ylim(0, 2000)\n # plt.xlabel('epoch')\n # plt.legend(['train', 'test'], loc='upper left')\n # plt.show()\n\n fitness_net\n\n return trained_net, fitness_net\n\n def test_net(self, trained_net=None, best_model=True, plotting=True):\n '''\n Evalute the trained network on test set\n :param trained_net:\n :param best_model:\n :param plotting:\n :return:\n '''\n # Load the trained model\n if best_model:\n estimator = load_model(self.model_path)\n else:\n estimator = load_model(trained_net)\n\n # predict the RUL\n output = estimator.predict(self.test_samples)\n y_true_test = self.label_array_test # ground truth of test samples\n\n output_classes = np.argmax(output, axis=1)\n\n print (\"output_classes\", output_classes)\n print(\"y_true_test\", y_true_test)\n print(\"output_classes.shape\", output_classes.shape)\n print (\"y_true_test.shape\", y_true_test.shape)\n\n y_pred_test = output_classes\n\n pd.set_option('display.max_rows', 1000)\n test_print = pd.DataFrame()\n test_print['y_pred'] = y_pred_test.flatten()\n test_print['y_truth'] = y_true_test.flatten()\n\n y_predicted = test_print['y_pred']\n y_actual = test_print['y_truth']\n acc = accuracy_score(y_actual, y_predicted)\n\n\n\n\n return acc" ]
[ [ "tensorflow.keras.models.load_model", "tensorflow.keras.callbacks.ModelCheckpoint", "numpy.random.seed", "tensorflow.keras.layers.Dense", "tensorflow.keras.optimizers.RMSprop", "tensorflow.keras.callbacks.LearningRateScheduler", "sklearn.metrics.accuracy_score", "pandas.DataFrame", "tensorflow.keras.optimizers.Adam", "numpy.argmax", "tensorflow.keras.metrics.RootMeanSquaredError", "pandas.set_option", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.models.Sequential", "tensorflow.random.set_seed", "tensorflow.keras.optimizers.SGD" ] ]
dhaw92/playground
[ "07c8f52eace37f13b6e411cb2301e6de5a5ba47b" ]
[ "pommerman/forward_model.py" ]
[ "from collections import defaultdict\n\nimport numpy as np\n\nfrom . import constants\nfrom . import characters\nfrom . import utility\n\n\nclass ForwardModel(object):\n \"\"\"Class for helping with the [forward] modeling of the game state.\"\"\"\n\n def run(self, num_times, board, agents, bombs, items, flames, is_partially_observable, agent_view_size, action_space, training_agent=None, is_communicative=False):\n \"\"\"Run the forward model.\n\n Args:\n num_times: The number of times to run it for. This is a maximum and it will stop early if we reach a done.\n board: The board state to run it from.\n agents: The agents to use to run it.\n bombs: The starting bombs.\n items: The starting items.\n flames: The starting flames.\n is_partially_observable: Whether the board is partially observable or not. Only applies to TeamRadio.\n agent_view_size: If it's partially observable, then the size of the square that the agent can view.\n action_space: The actions that each agent can take.\n training_agent: The training agent to pass to done.\n is_communicative: Whether the action depends on communication observations as well.\n\n Returns:\n steps: The list of step results, which are each a dict of \"obs\", \"next_obs\", \"reward\", \"action\".\n board: Updated board.\n agents: Updated agents, same models though.\n bombs: Updated bombs.\n items: Updated items.\n flames: Updated flames.\n done: Whether we completed the game in these steps.\n info: The result of the game if it's completed.\n \"\"\"\n steps = []\n for _ in num_times:\n obs = self.get_observations(\n board, agents, bombs, is_partially_observable, agent_view_size)\n actions = self.act(agents, obs, action_space,\n is_communicative=is_communicative)\n board, agents, bombs, items, flames = self.step(\n actions, board, agents, bombs, items, flames)\n next_obs = self.get_observations(\n board, agents, bombs, is_partially_observable, agent_view_size)\n reward = self.get_rewards(agents, game_type, step_count, max_steps)\n done = self.get_done(agents, game_type, step_count, max_steps,\n training_agent)\n info = self.get_info(done, rewards, game_type, agents)\n\n steps.append({\n \"obs\": obs,\n \"next_obs\": next_obs,\n \"reward\": reward,\n \"actions\": actions,\n })\n if done:\n # Callback to let the agents know that the game has ended.\n for agent in agents:\n agent.episode_end(reward[agent.agent_id])\n break\n return steps, board, agents, bombs, items, flames, done, info\n\n @staticmethod\n def act(agents, obs, action_space, is_communicative=False):\n \"\"\"Returns actions for each agent in this list.\n\n Args:\n agents: A list of agent objects.\n obs: A list of matching observations per agent.\n action_space: The action space for the environment using this model.\n is_communicative: Whether the action depends on communication observations as well.\n\n Returns a list of actions.\n \"\"\"\n def act_ex_communication(agent):\n if agent.is_alive:\n return agent.act(obs[agent.agent_id], action_space=action_space)\n else:\n return constants.Action.Stop.value\n\n def act_with_communication(agent):\n if agent.is_alive:\n action = agent.act(obs[agent.agent_id], action_space=action_space)\n if type(action) == int:\n action = [action] + [0, 0]\n assert(type(action) == list)\n return action\n else:\n return [constants.Action.Stop.value, 0, 0]\n\n ret = []\n for agent in agents:\n if is_communicative:\n ret.append(act_with_communication(agent))\n else:\n ret.append(act_ex_communication(agent))\n return ret\n\n @staticmethod\n def step(actions, curr_board, curr_agents, curr_bombs, curr_items,\n curr_flames):\n board_size = len(curr_board)\n\n # Tick the flames. Replace any dead ones with passages. If there is an item there, then reveal that item.\n flames = []\n for flame in curr_flames:\n position = flame.position\n if flame.is_dead():\n item_value = curr_items.get(position)\n if item_value:\n del curr_items[position]\n else:\n item_value = constants.Item.Passage.value\n curr_board[position] = item_value\n else:\n flame.tick()\n flames.append(flame)\n curr_flames = flames\n\n # Step the living agents.\n # If two agents try to go to the same spot, they should bounce back to their previous spots.\n # This is a little complicated because what if there are three agents all in a row.\n # If the one in the middle tries to go to the left and bounces with the one on the left,\n # and then the one on the right tried to go to the middle one's position, she should also bounce.\n # A way of doing this is to gather all the new positions before taking any actions.\n # Then, if there are disputes, correct those disputes iteratively.\n def make_counter(next_positions):\n counter = defaultdict(list)\n for num, next_position in enumerate(next_positions):\n if next_position is not None:\n counter[next_position].append(num)\n return counter\n\n def has_position_conflict(counter):\n return any([len(agent_ids) > 1 for next_position, agent_ids in counter.items() if next_position])\n\n curr_positions = [agent.position for agent in curr_agents]\n next_positions = [agent.position for agent in curr_agents]\n for agent, action in zip(curr_agents, actions):\n if agent.is_alive:\n position = agent.position\n\n if action == constants.Action.Stop.value:\n agent.stop()\n elif action == constants.Action.Bomb.value:\n bomb = agent.maybe_lay_bomb()\n if bomb:\n curr_bombs.append(bomb)\n elif utility.is_valid_direction(curr_board, position, action):\n next_position = agent.get_next_position(action)\n\n # This might be a bomb position. Only move in that case if the agent can kick.\n if not utility.position_is_bomb(curr_board, next_position):\n next_positions[agent.agent_id] = next_position\n elif not agent.can_kick:\n agent.stop()\n else:\n after_next_position = utility.get_next_position(next_position, constants.Action(action))\n if not utility.position_on_board(curr_board, after_next_position) or not utility.position_is_passage(curr_board, after_next_position):\n agent.stop()\n else:\n next_positions[agent.agent_id] = next_position\n else:\n # The agent made an invalid direction.\n agent.stop()\n else:\n next_positions[agent.agent_id] = None\n\n counter = make_counter(next_positions)\n while has_position_conflict(counter):\n for next_position, agent_ids in counter.items():\n if next_position and len(agent_ids) > 1:\n for agent_id in agent_ids:\n next_positions[agent_id] = curr_positions[agent_id]\n counter = make_counter(next_positions)\n\n for agent, curr_position, next_position, direction in zip(curr_agents, curr_positions, next_positions, actions):\n if not agent.is_alive:\n continue\n\n if curr_position != next_position:\n agent.move(direction)\n if agent.can_kick:\n bombs = [bomb for bomb in curr_bombs if bomb.position == agent.position]\n if bombs:\n bombs[0].moving_direction = constants.Action(direction)\n\n if utility.position_is_powerup(curr_board, agent.position):\n agent.pick_up(constants.Item(curr_board[agent.position]))\n curr_board[agent.position] = constants.Item.Passage.value\n\n # Explode bombs.\n next_bombs = []\n exploded_map = np.zeros_like(curr_board)\n for bomb in curr_bombs:\n bomb.tick()\n if bomb.is_moving():\n invalid_values = list(range(len(constants.Item)+1))[1:]\n if utility.is_valid_direction(curr_board, bomb.position, bomb.moving_direction.value, invalid_values=invalid_values):\n curr_board[bomb.position] = constants.Item.Passage.value\n bomb.move()\n else:\n bomb.stop()\n\n if bomb.exploded():\n bomb.bomber.incr_ammo()\n for _, indices in bomb.explode().items():\n for r, c in indices:\n if not all([r >= 0, c >= 0, r < board_size, c < board_size]):\n break\n if curr_board[r][c] == constants.Item.Rigid.value:\n break\n exploded_map[r][c] = 1\n if curr_board[r][c] == constants.Item.Wood.value:\n break\n else:\n next_bombs.append(bomb)\n\n # Remove bombs that were in the blast radius.\n curr_bombs = []\n for bomb in next_bombs:\n if bomb.in_range(exploded_map):\n bomb.bomber.incr_ammo()\n else:\n curr_bombs.append(bomb)\n\n # Kill these agents.\n for agent in curr_agents:\n if agent.in_range(exploded_map):\n agent.die()\n exploded_map = np.array(exploded_map)\n\n # Update the board\n for bomb in curr_bombs:\n curr_board[bomb.position] = constants.Item.Bomb.value\n\n for agent in curr_agents:\n position = np.where(curr_board == utility.agent_value(agent.agent_id))\n curr_board[position] = constants.Item.Passage.value\n if agent.is_alive:\n curr_board[agent.position] = utility.agent_value(agent.agent_id)\n\n flame_positions = np.where(exploded_map == 1)\n for row, col in zip(flame_positions[0], flame_positions[1]):\n curr_flames.append(characters.Flame((row, col)))\n for flame in curr_flames:\n curr_board[flame.position] = constants.Item.Flames.value\n\n return curr_board, curr_agents, curr_bombs, curr_items, curr_flames\n\n def get_observations(self, curr_board, agents, bombs, is_partially_observable, agent_view_size):\n \"\"\"Gets the observations as an np.array of the visible squares.\n\n The agent gets to choose whether it wants to keep the fogged part in memory.\n \"\"\"\n board_size = len(curr_board)\n\n def make_bomb_maps(position):\n blast_strengths = np.zeros((board_size, board_size))\n life = np.zeros((board_size, board_size))\n\n for bomb in bombs:\n x, y = bomb.position\n if not is_partially_observable or in_view_range(position, x, y):\n blast_strengths[(x, y)] = bomb.blast_strength\n life[(x, y)] = bomb.life\n return blast_strengths, life\n\n def in_view_range(position, vrow, vcol):\n row, col = position\n return all([\n row >= vrow - agent_view_size, row < vrow + agent_view_size,\n col >= vcol - agent_view_size, col < vcol + agent_view_size])\n\n attrs = ['position', 'blast_strength', 'can_kick', 'teammate', 'ammo',\n 'enemies']\n\n observations = []\n for agent in agents:\n agent_obs = {}\n board = curr_board\n if is_partially_observable:\n board = board.copy()\n for row in range(board_size):\n for col in range(board_size):\n if not in_view_range(agent.position, row, col):\n board[row, col] = constants.Item.Fog.value\n agent_obs['board'] = board\n\n bomb_blast_strengths, bomb_life = make_bomb_maps(agent.position)\n agent_obs['bomb_blast_strength'] = bomb_blast_strengths\n agent_obs['bomb_life'] = bomb_life\n\n for attr in attrs:\n assert hasattr(agent, attr)\n agent_obs[attr] = getattr(agent, attr)\n observations.append(agent_obs)\n return observations\n\n @staticmethod\n def get_done(agents, step_count, max_steps, game_type, training_agent):\n alive = [agent for agent in agents if agent.is_alive]\n alive_ids = sorted([agent.agent_id for agent in alive])\n if step_count >= max_steps:\n return True\n elif game_type == constants.GameType.FFA:\n if training_agent is not None and training_agent not in alive_ids:\n return True\n return len(alive) <= 1\n elif any([\n len(alive_ids) <= 1,\n alive_ids == [0, 2],\n alive_ids == [1, 3],\n ]):\n return True\n return False\n\n @staticmethod\n def get_info(done, rewards, game_type, agents):\n if game_type == constants.GameType.FFA:\n alive = [agent for agent in agents if agent.is_alive]\n if done and len(alive) > 1:\n return {\n 'result': constants.Result.Tie,\n }\n elif done:\n return {\n 'result': constants.Result.Win,\n 'winners': [num for num, reward in enumerate(rewards) \\\n if reward == 1]\n }\n else:\n return {\n 'result': constants.Result.Incomplete,\n }\n elif done:\n # We are playing a team game.\n if rewards == [-1]*4:\n return {\n 'result': constants.Result.Tie,\n }\n else:\n return {\n 'result': constants.Result.Win,\n 'winners': [num for num, reward in enumerate(rewards) \\\n if reward == 1],\n }\n else:\n return {\n 'result': constants.Result.Incomplete,\n }\n\n @staticmethod\n def get_rewards(agents, game_type, step_count, max_steps):\n def any_lst_equal(lst, values):\n return any([lst == v for v in values])\n\n alive_agents = [num for num, agent in enumerate(agents) \\\n if agent.is_alive]\n if game_type == constants.GameType.FFA:\n if len(alive_agents) == 1:\n # An agent won. Give them +1, others -1.\n return [2*int(agent.is_alive) - 1 for agent in agents]\n elif step_count >= max_steps:\n # Game is over from time. Everyone gets -1.\n return [-1]*4\n else:\n # Game running: 0 for alive, -1 for dead.\n return [int(agent.is_alive) - 1 for agent in agents]\n else:\n # We are playing a team game.\n if any_lst_equal(alive_agents, [[0, 2], [0], [2]]):\n # Team [0, 2] wins.\n return [1, -1, 1, -1]\n elif any_lst_equal(alive_agents, [[1, 3], [1], [3]]):\n # Team [1, 3] wins.\n return [-1, 1, -1, 1]\n elif step_count >= max_steps:\n # Game is over by max_steps. All agents tie.\n return [-1]*4\n else:\n # No team has yet won or lost.\n return [0]*4\n" ]
[ [ "numpy.array", "numpy.zeros_like", "numpy.where", "numpy.zeros" ] ]
thanh125643/LitterMask
[ "2545173bc5b046331a38da513780292a09f6a566" ]
[ "module/mask.py" ]
[ "import os\n\n# os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\nimport time\nimport numpy as np\nimport json\nimport csv\nimport random\nimport colorsys\nfrom imgaug import augmenters as iaa\nfrom tqdm import tqdm\n\nfrom module.detector.dataset import Taco\nfrom module.detector import model as modellib\nfrom module.detector.model import MaskRCNN\nfrom module.detector.config import Config\nfrom module.detector import visualize\nfrom module.detector import utils\nimport cv2\n\n\nfrom pycocotools.cocoeval import COCOeval\nfrom pycocotools import mask as maskUtils\n\n\nclass Mask:\n def __init__(\n self,\n PathToROOT=\"./detector/models\",\n classMap=\"detector/taco_config/map_10.csv\",\n modelName=\"mask_rcnn_taco_0100\",\n pathToDataset=\"data\",\n splitnumber=0,\n ):\n self.ROOT_DIR = PathToROOT\n # Path to trained weights file\n self.COCO_MODEL_PATH = os.path.join(self.ROOT_DIR, \"mask_rcnn_coco.h5\")\n # Directory to save logs and model checkpoints\n self.DEFAULT_LOGS_DIR = os.path.join(self.ROOT_DIR, \"logs\")\n self.pathCSV = classMap\n self.pathDataset = pathToDataset\n self.modelName = modelName\n self.splitnumber = splitnumber\n self.getclass()\n self.prepareDataset()\n self.model = MaskRCNN(\n mode=\"inference\", config=self.config, model_dir=self.DEFAULT_LOGS_DIR\n )\n self.modelLoad()\n\n def getclass(self):\n self.class_map = {}\n self.map_to_one_class = {}\n with open(self.pathCSV) as csvfile:\n reader = csv.reader(csvfile)\n self.class_map = {row[0]: row[1] for row in reader}\n self.map_to_one_class = {c: \"Litter\" for c in self.class_map}\n\n def prepareDataset(self):\n self.dataset = Taco()\n self.taco = self.dataset.load_taco(\n self.pathDataset,\n self.splitnumber,\n \"test\",\n class_map=self.class_map,\n return_taco=True,\n )\n self.dataset.prepare()\n nr_classes = self.dataset.num_classes\n\n class TacoTestConfig(Config):\n NAME = \"taco\"\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n DETECTION_MIN_CONFIDENCE = 10\n NUM_CLASSES = nr_classes\n USE_OBJECT_ZOOM = False\n\n self.config = TacoTestConfig()\n self.config.display()\n\n def modelLoad(self):\n _, model_path = self.model.get_last_checkpoint(self.modelName)\n self.model.load_weights(model_path, model_path, by_name=True)\n self.model.keras_model._make_predict_function()\n\n def resizeImg(self, img, maxDim, minDim):\n h, w = img.shape[:2]\n orgin = (w, h)\n scale = 1\n scale = max(1, minDim / min(h, w))\n\n image_max = max(h, w)\n if round(image_max * scale) > maxDim:\n scale = maxDim / image_max\n img = cv2.resize(img, (round(w * scale), round(h * scale)))\n\n h, w = img.shape[:2]\n delta_w = maxDim - w\n delta_h = maxDim - h\n top, bottom = delta_h // 2, delta_h - (delta_h // 2)\n left, right = delta_w // 2, delta_w - (delta_w // 2)\n img = cv2.copyMakeBorder(\n img, top, bottom, left, right, cv2.BORDER_CONSTANT, None, [0, 0, 0]\n )\n return img, orgin, (top, bottom, left, right), scale\n\n def randomColor(self, nIds):\n brightness = 0.7\n hsv = [(i / nIds, 1, brightness) for i in range(nIds)]\n colors = list(map(lambda c: np.multiply(colorsys.hsv_to_rgb(*c), 255), hsv))\n random.shuffle(colors)\n return colors\n\n def revertMask(self, rdata, orgin, pad, scale):\n if rdata[\"class_ids\"].shape[0] == 0:\n return rdata\n h, w = rdata[\"masks\"].shape[:2]\n maskfinal = []\n for i in range(rdata[\"masks\"].shape[2]):\n mask = rdata[\"masks\"][pad[0] : h - pad[1], pad[2] : w - pad[3], i]\n mask = cv2.resize(mask.astype(np.uint8), orgin)\n maskfinal.append(mask)\n if len(maskfinal) > 1:\n maskfinal = np.dstack(tuple(maskfinal))\n else:\n maskfinal = np.reshape(\n maskfinal[0], (maskfinal[0].shape[0], maskfinal[0].shape[1], 1)\n )\n rdata[\"masks\"] = maskfinal\n for i in range(len(rdata[\"rois\"])):\n y1, x1, y2, x2 = rdata[\"rois\"][i]\n rdata[\"rois\"][i] = (y1 - pad[0], x1 - pad[2], y2 - pad[0], x2 - pad[2])\n rdata[\"rois\"][i] = np.divide(rdata[\"rois\"][i], scale)\n return rdata\n\n def maskIMG(self, img, rdata, className):\n ncolor = self.randomColor(len(rdata[\"class_ids\"]))\n for i in range(len(ncolor)):\n y1, x1, y2, x2 = rdata[\"rois\"][i]\n color = ncolor[i].tolist()\n img = cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)\n\n score = rdata[\"scores\"][i]\n label = className[rdata[\"class_ids\"][i]]\n caption = \"{} {:.3f}\".format(label, score)\n img = cv2.putText(\n img,\n caption,\n (x1, y1 - 8),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1,\n (255, 255, 255),\n 3,\n cv2.LINE_AA,\n )\n\n img = visualize.apply_mask(img, rdata[\"masks\"][:, :, i], color)\n\n return img\n\n def loadPicGT(self, input, output, ids):\n img = cv2.imread(input, cv2.IMREAD_COLOR)\n anns = self.taco.loadAnns(self.taco.getAnnIds([ids]))\n rois = []\n class_ids = []\n segmentation = []\n for i in anns:\n rois.append(i[\"bbox\"])\n class_ids.append(i[\"category_id\"])\n temp = np.array(\n [\n np.array([[x, y]], dtype=int)\n for x, y in zip(\n i[\"segmentation\"][0][::2], i[\"segmentation\"][0][1::2]\n )\n ]\n )\n segmentation.append(temp)\n overlay = img.copy()\n ncolor = self.randomColor(len(class_ids))\n for i in range(len(ncolor)):\n x, y, w, h = rois[i]\n color = ncolor[i].tolist()\n img = cv2.rectangle(\n img, (int(x), int(y)), (int(x + w), int(y + h)), color, 2\n )\n\n label = self.dataset.class_names[class_ids[i]]\n caption = \"{}\".format(label)\n img = cv2.putText(\n img,\n caption,\n (int(x), int(y) - 8),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1,\n (255, 255, 255),\n 3,\n cv2.LINE_AA,\n )\n overlay = cv2.drawContours(overlay, [segmentation[i]], -1, color, -1)\n cv2.addWeighted(img, 0.7, overlay, 0.3, 0, img)\n cv2.imwrite(output, img)\n\n def detectIMG(self, pathOfImage, OutputPath):\n img = cv2.imread(pathOfImage, cv2.IMREAD_COLOR)\n imgresult = img.copy()\n img, orgin, pad, scale = self.resizeImg(img, 1024, 800)\n r = self.model.detect([img], verbose=0)[0]\n if r[\"class_ids\"].shape[0] > 0:\n r_fuse = utils.fuse_instances(r)\n else:\n r_fuse = r\n rdata = self.revertMask(r_fuse, orgin, pad, scale)\n img = self.maskIMG(imgresult, rdata, self.dataset.class_names)\n cv2.imwrite(OutputPath, img)\n return rdata\n\n def detectBulkIMG(self, InputPath, OutputPath):\n if not os.path.exists(InputPath):\n print(\"no path: \" + InputPath)\n if not os.path.exists(OutputPath):\n os.mkdir(OutputPath)\n res = []\n with tqdm(os.listdir(InputPath)) as tq:\n for i in tq:\n if (os.path.splitext(i)[1]).lower() not in [\".jpg\", \".png\"]:\n continue\n pathImage = os.path.join(InputPath, i)\n OutputImage = os.path.join(OutputPath, i)\n tq.set_description(pathImage)\n img = cv2.imread(pathImage, cv2.IMREAD_COLOR)\n imgresult = img.copy()\n img, orgin, pad, scale = self.resizeImg(img, 1024, 800)\n r = self.model.detect([img], verbose=0)[0]\n if r[\"class_ids\"].shape[0] > 0:\n r_fuse = utils.fuse_instances(r)\n else:\n r_fuse = r\n rdata = self.revertMask(r_fuse, orgin, pad, scale)\n img = self.maskIMG(imgresult, rdata, self.dataset.class_names)\n cv2.imwrite(OutputImage, img)\n res.append(rdata)\n return res\n\n def checkeval(self, outFolder, jsonOutput):\n jsonOut = []\n imgData = self.taco.imgs\n if not os.path.exists(outFolder):\n os.mkdir(outFolder)\n pathPredict = os.path.join(outFolder, \"Predicet\")\n pathGroundTrust = os.path.join(outFolder, \"GT\")\n if not os.path.exists(pathPredict):\n os.mkdir(pathPredict)\n if not os.path.exists(pathGroundTrust):\n os.mkdir(pathGroundTrust)\n with tqdm(imgData.keys()) as pbar:\n for i in pbar:\n inputImg = os.path.join(self.pathDataset, imgData[i][\"file_name\"])\n pbar.set_description(inputImg)\n outputImg = os.path.join(\n pathPredict,\n imgData[i][\"file_name\"].split(\"/\")[0]\n + \"_\"\n + os.path.basename(imgData[i][\"file_name\"]),\n )\n outputGT = os.path.join(\n pathGroundTrust,\n imgData[i][\"file_name\"].split(\"/\")[0]\n + \"_\"\n + os.path.basename(imgData[i][\"file_name\"]),\n )\n self.loadPicGT(inputImg, outputGT, i)\n r = self.detectIMG(inputImg, outputImg)\n rclass = r[\"class_ids\"].tolist()\n\n for a in range(len(r[\"class_ids\"])):\n rle = maskUtils.encode(np.asfortranarray(r[\"masks\"][:, :, a]))\n rle[\"counts\"] = str(rle[\"counts\"], \"utf-8\")\n jsonOut.append(\n {\n \"image_id\": i,\n \"category_id\": rclass[a],\n \"segmentation\": rle,\n \"score\": float(r[\"scores\"][a]),\n }\n )\n with open(jsonOutput, \"w\") as f:\n json.dump(jsonOut, f)\n Pre = self.taco.loadRes(jsonOutput)\n result = COCOeval(self.taco, Pre)\n result.evaluate()\n result.accumulate()\n result.summarize()\n" ]
[ [ "numpy.reshape", "numpy.asfortranarray", "numpy.array", "numpy.divide" ] ]
autolordz/docx-content-modify
[ "88efb96e450b835f00ca8ac78dcc266cd5eec0fc" ]
[ "dcm_df_transform.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 11 17:26:56 2019\n\n@author: autol\n\"\"\"\nimport re\nimport pandas as pd\n#%%\nimport dcm_util as ut\nfrom dcm_globalvar import *\nlocals().update(var.to_dict()) # 设置读取的全局变量\n\n#%%\n\ndef df_transform_stream(df):\n\n df_x=pd.DataFrame();\n\n df = ut.titles_trans_columns(df,titles_cn);df # 中译英方便后面处理\n if flag_check_postal:\n df.apply(lambda x:df_check_format(x), axis=1)\n\n if 0<len(df)<10:\n print_log('>>> 将要打印【%s条】=> %s '%(len(df),\n df['number'].to_list()))\n\n if len(df) and flag_to_postal:\n# try:\n print_log('\\n>>> 开始生成新数据 data_main_temp... ')\n '''获取 datetime|number'''\n number = df[titles_en[:2]]\n number = number.reset_index()\n number.columns.values[0] = 'level_0'\n # user\n '''获取所有用户名包括曾用名'''\n user = df['uname']\n user = user[user != '']\n # user = user.str.strip().str.split(r'[,,。]',expand=True).stack() # divide user\n #user = user.str.strip().str.split(r'[:]',expand=True)# divide character\n user = user.str.strip().str.split(r'[\\/]',expand=True).stack().reset_index(drop=True) # divide user by slash, old is [,,。]\n user = user.str.strip().apply(lambda x: re.sub(r'\\[.*\\]','',x))\n user = user.rename('uname').to_frame()\n print(\"..check.user.....\",user)\n # user = user.str.strip().str.split(r'[、]',expand=True).stack().to_frame(name = 'uname')\n # user = user.reset_index().drop(['level_1','level_2'],axis=1)\n # agent and address\n agent_adr = df[['aname','address']]\n opt = agent_adr.any()\n agent = df['aname']\n adr = df['address']\n\n if all(opt):\n print_log('>>> 有【诉讼代理人】和【地址】...正在处理...')\n adr = make_adr(adr,fix_aname=user['uname'].tolist())\n agent = make_agent(agent,fix_aname=adr['clean_aname'].tolist()) #获取代理人\n usr_agent = merge_user(user,agent)\n df_x = reclean_data(merge_usr_agent_adr(usr_agent,adr))\n df_x = sort_data(df_x,number)\n elif opt.address:\n print_log('>>> 只有【地址】...正在处理...')\n print(\"------user--111---\",user.columns)\n print(\"------user--222---\",user)\n adr = make_adr(adr,fix_aname=user['uname'].tolist())\n adr['uname'] = adr['clean_aname']\n adr = merge_user(user,adr)\n adr = adr.assign(aname='')\n df_x = reclean_data(adr)\n df_x = sort_data(df_x,number)\n elif opt.aname:\n print_log('>>> 只有【诉讼代理人】...正在处理...')\n agent = make_agent(agent)\n agent = merge_user(user,agent)\n agent = agent.assign(address='')\n df_x = reclean_data(agent)\n df_x = sort_data(df_x,number)\n else:\n print_log('>>> 缺失【诉讼代理人】和【地址】...正在处理...')\n agent_adr.index.name = 'level_0'\n agent_adr.reset_index(inplace=True)\n df_x = pd.merge(user,agent_adr,how='left',on=['level_0']).fillna('')\n df_x = sort_data(df_x,number)\n\n if len(df_x):\n data_tmp = os.path.splitext(data_xlsx)[0]+\"_tmp.xlsx\"\n df_save = df_x.copy()\n df_save.columns = ut.titles_switch(df_save.columns.tolist())\n df_save = ut.save_adjust_xlsx(df_save,data_tmp,width=40)\n# print_log('----->'%ret)\n# except Exception as e:\n# input_exit('>>> 错误 \\'%s\\' 生成数据失败,请检查源 \\'%s\\' 文件...退出...'%(e,data_xlsx))\n return df_x\n\n\n#%% df tramsfrom functions\ndef clean_rows_aname(x,names):\n '''Clean agent name for agent to match address's agent name'''\n if names:\n for name in names:\n if not ut.check_cn_str(name):continue # 非中文名跳过\n if name in x:\n x = name;break\n x = re.sub(r'_.*','',x)\n x = re.sub(path_names_clean,'',x)\n return x\n\ndef clean_rows_adr(adr):\n '''clean adr format'''\n y = ut.split_list(r'[,,]',adr)\n if y:\n y = list(map(lambda x: x if re.search(r'\\/地址[::]',x) else adr_tag + x,y))\n adr = ','.join(list(filter(None, y)))\n return adr\n\n#%%\ndef make_adr(adr,fix_aname=[]): #fix_aname = clean_aname\n '''\n clean_aname:合并标识,此处如果没律师,则代理人就是自己\n fix_aname:修正名字错误\n Returns:\n level_0 address clean_aname\n 0 44 XX市XX镇XXX村 张三\n 1 44 XXX市XX区XXX B律师\n '''\n adr = adr[adr != '']\n adr = adr.str.strip().str.split(r'[,,。]',expand=True).stack()\n adr = adr.str.strip().apply(lambda x:clean_rows_adr(x))\n adr = adr.str.strip().str.split(r'\\/地址[::]',expand=True).fillna('')\n adr.columns = ['aname','address']\n adr['clean_aname'] = adr['aname'].str.strip().apply(lambda x:clean_rows_aname(x,fix_aname)) # clean adr\n adr = adr.reset_index().drop(['level_1','aname'],axis=1)\n return adr\n\ndef make_agent(agent,fix_aname=[]):\n '''\n agent = '张三(曾用名张五)/律师张CC_123123_李DD_123123,李四/律师张AA_123123_李BB_123123'\n fix_aname:修正名字错误,假如律师(aname)有多个,则选择第一个律师作为合并标识(clean_aname),注意没有律师的合并就是自己(uname)做代理人\n Returns:\n level_0 uname aname clean_aname\n 0 44 张三 A律师_123123 A律师\n 1 44 李四\n 2 44 王五 B律师_123123、C律师_123123 B律师\n '''\n \n# df = ut.titles_trans_columns(df,titles_cn);df # 中译英方便后面处理\n# agent = df['aname']\n agent = agent[agent != '']\n print('...111....agent.... ',agent)\n agent = agent.str.strip().str.split(r'[,,、。]',expand=True).stack() #Series\n# agent.str.strip().str.split(r'[,,。]',expand=True).stack() \n# agent.str.strip().str.split(r'\\/',expand=True).fillna('')\n agent = agent.str.strip().str.split(r'\\/',expand=True).fillna('') #DataFrame\n print('...222....agent.... ',agent)\n \n agent.columns = ['uname','aname']\n agent['clean_aname'] = agent['aname'].str.strip().apply(lambda x: clean_rows_aname(x,fix_aname))\n dd_l = agent['uname'].str.strip().str.split(r'、',expand=True).stack().to_frame(name = 'uname').reset_index()\n dd_r = agent[agent.columns.difference(['uname'])].reset_index()\n agent = pd.merge(dd_l,dd_r,how='outer',on=['level_0','level_1']).drop(['level_1','level_2'],axis=1).fillna('')\n return agent\n\ndef merge_user(user,agent):\n '''合并后以uname为主,clean_aname是律师标识\n Returns:\n level_0 uname aname clean_aname\n 0 44 张三 A律师_123213123 A律师\n 2 44 王五 B律师_123123132123、C律师_123123 B律师\n '''\n# return pd.merge(user,agent,how='left',on=['level_0','uname']).fillna('')\n return pd.merge(user,agent,how='left',on=['uname']).fillna('')\n\ndef merge_usr_agent_adr(agent,adr):\n ''' clean_aname 去除nan,保留曾用名'''\n\n agent['clean_aname'].replace('',float('nan'),inplace=True)\n agent['clean_aname'] = agent['clean_aname'].fillna(agent['uname']).replace(path_names_clean,'')\n adr['clean_aname'] = adr['clean_aname'].apply(lambda x: clean_rows_aname(x,agent['clean_aname'].tolist()))\n tb = pd.merge(agent,adr,how='outer',on=['level_0','clean_aname']).fillna('')\n tb.dropna(how='all',subset=['uname', 'aname'],inplace=True)\n return tb\n\ndef reclean_data(tb):\n tg = tb.groupby(['level_0','clean_aname','aname','address'])['uname'].apply(lambda x: '、'.join(x.astype(str))).reset_index()\n glist = tg['uname'].str.split(r'、',expand=True).stack().values.tolist()\n rest = tb[tb['uname'].isin(glist) == False]\n x = pd.concat([rest,tg],axis=0,sort=True)\n return x\n\ndef sort_data(x,number):\n\n x = x[['level_0','uname','aname','address']]\n# .sort_values(by=['level_0'])\n\n x['level_0'] = x['level_0'].apply(int);\n number['level_0']=number['level_0'].apply(int);\n x = pd.merge(number,x,how='right',on=['level_0']).drop(['level_0'],axis=1).fillna('')\n return x\n\n#%%\n\ndef df_check_format(x):\n '''check data address and agent format with check flag'''\n if x['aname']!='' and not re.search(r'[\\/_]',x['aname']):\n ut.print_log('>>> 记录\\'%s\\'---- 【诉讼代理人】格式 \\'%s\\' 不正确,如无请留空,请自行修改...'%(x['number'],x['aname']))\n if x['address']!='' and not re.search(r'\\/地址[::]',x['address']):\n ut.print_log('>>> 记录\\'%s\\'---- 【地址】格式 \\'%s\\' 不正确,如无请留空,请自行修改...'%(x['number'],x['address']))\n return x\n\ndef main():\n print('1232')\n \nif __name__ == '__main__':\n main()" ]
[ [ "pandas.concat", "pandas.merge", "pandas.DataFrame" ] ]
JM-IP/HNC
[ "d3a2e77d1e922d2c712dd0ae82cebc0dea525f6a" ]
[ "HNC_github/model/mobilenetv2.py" ]
[ "'''MobileNetV2 in PyTorch.\n\nSee the paper \"Inverted Residuals and Linear Bottlenecks:\nMobile Networks for Classification, Detection and Segmentation\" for more details.\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom model import common\nfrom IPython import embed\n# from torchvision.models import resnet\ndef make_model(args, parent=False):\n return MobileNetV2(args[0])\n\n\nclass ConvBNReLU(nn.Sequential):\n def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):\n padding = (kernel_size - 1) // 2\n super(ConvBNReLU, self).__init__(\n nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),\n nn.BatchNorm2d(out_planes),\n nn.ReLU6(inplace=True)\n )\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, oup, stride, expand_ratio):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n hidden_dim = int(round(inp * expand_ratio))\n self.use_res_connect = self.stride == 1 and inp == oup\n\n layers = []\n if expand_ratio != 1:\n # pw\n layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))\n layers.extend([\n # dw\n ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n ])\n self.conv = nn.Sequential(*layers)\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\nclass Block(nn.Module):\n '''expand + depthwise + pointwise'''\n def __init__(self, in_planes, out_planes, expansion, stride):\n super(Block, self).__init__()\n self.stride = stride\n\n planes = expansion * in_planes\n\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn3 = nn.BatchNorm2d(out_planes)\n\n self.shortcut = nn.Sequential()\n if stride == 1 and in_planes != out_planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(out_planes),\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out = out + self.shortcut(x) if self.stride==1 else out\n return out\n\n\n# (expansion, out_planes, num_blocks, stride)\ncfg = [(1, 16, 1, 1),\n (6, 24, 2, 1), # NOTE: change stride 2 -> 1 for CIFAR10\n (6, 32, 3, 2),\n (6, 64, 4, 2),\n (6, 96, 3, 1),\n (6, 160, 3, 2),\n (6, 320, 1, 1)]\n\ncfg_imagenet = [(1, 16, 1, 1),\n (6, 24, 2, 2),\n (6, 32, 3, 2),\n (6, 64, 4, 2),\n (6, 96, 3, 1),\n (6, 160, 3, 2),\n (6, 320, 1, 1)]\n\n\nclass MobileNetV2(nn.Module):\n\n\n def __init__(self, args, conv3x3=common.default_conv, conv1x1=common.default_conv):\n super(MobileNetV2, self).__init__()\n self.width_mult = args.width_mult\n\n # num_classes = int(args.data_train[5:]) if args.data_train.find('CIFAR') >= 0 else 1000\n if args.data_train.find('CIFAR') >= 0:\n num_classes = int(args.data_train[5:])\n elif args.data_train.find('Tiny') >= 0:\n num_classes = 200\n else:\n num_classes = 1000\n\n if args.data_train == 'ImageNet':\n self.cfg = cfg_imagenet\n else:\n self.cfg = cfg\n\n # NOTE: change conv1 stride 2 -> 1 for CIFAR10\n stride = 1 if args.data_train.find('CIFAR') >= 0 else 2\n\n features = [ConvBNReLU(3, int(32 * self.width_mult), kernel_size=3, stride=stride)]\n features.extend(self._make_layers(in_planes=int(32 * self.width_mult)))\n features.append(ConvBNReLU(int(320 * self.width_mult), int(1280 * self.width_mult), kernel_size=1, stride=1))\n self.features = nn.Sequential(*features)\n # self.conv1 = nn.Conv2d(3, int(32 * self.width_mult), kernel_size=3, stride=1, padding=1, bias=False)\n # self.bn1 = nn.BatchNorm2d((32 *self.width_mult))\n # self.layers = self._make_layers(in_planes=int(32 * self.width_mult))\n # self.conv2 = nn.Conv2d(int(320 * self.width_mult), (1280 * self.width_mult), kernel_size=1, stride=1, padding=0, bias=False)\n # self.bn2 = nn.BatchNorm2d(int(1280 * self.width_mult))\n self.classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(int(1280 * self.width_mult), num_classes))\n\n def _make_layers(self, in_planes):\n layers = []\n for expansion, out_planes, num_blocks, stride in self.cfg:\n out_planes = int(out_planes * self.width_mult)\n strides = [stride] + [1]*(num_blocks-1)\n for stride in strides:\n layers.append(InvertedResidual(in_planes, out_planes, stride, expansion))\n in_planes = out_planes\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.features(x)\n # NOTE: change pooling kernel_size 7 -> 4 for CIFAR10\n # out = F.avg_pool2d(out, 4)\n # out = out.view(out.size(0), -1)\n out = out.mean([2, 3])\n out = self.classifier(out)\n return out\n\n\ndef test():\n net = MobileNetV2()\n x = torch.randn(2,3,32,32)\n y = net(x)\n print(y.size())\n\n# test()\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.ReLU6", "torch.randn", "torch.nn.Conv2d", "torch.nn.BatchNorm2d" ] ]
borellim/aiida_core
[ "934b4ccdc73a993f2a6656caf516500470e3da08" ]
[ "aiida/orm/nodes/data/array/trajectory.py" ]
[ "# -*- coding: utf-8 -*-\n###########################################################################\n# Copyright (c), The AiiDA team. All rights reserved. #\n# This file is part of the AiiDA code. #\n# #\n# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #\n# For further information on the license, see the LICENSE.txt file #\n# For further information please visit http://www.aiida.net #\n###########################################################################\n\"\"\"\nAiiDA class to deal with crystal structure trajectories.\n\"\"\"\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport collections\nimport six\nfrom six.moves import range, zip\n\nfrom .array import ArrayData\n\n\nclass TrajectoryData(ArrayData):\n \"\"\"\n Stores a trajectory (a sequence of crystal structures with timestamps, and\n possibly with velocities).\n \"\"\"\n\n def __init__(self, structurelist=None, **kwargs):\n super(TrajectoryData, self).__init__(**kwargs)\n if structurelist is not None:\n self.set_structurelist(structurelist)\n\n def _internal_validate(self, stepids, cells, symbols, positions, times, velocities): # pylint: disable=too-many-arguments,too-many-locals,no-self-use,too-many-branches\n \"\"\"\n Internal function to validate the type and shape of the arrays. See\n the documentation of py:meth:`.set_trajectory` for a description of the\n valid shape and type of the parameters.\n \"\"\"\n import numpy\n\n if not isinstance(symbols, collections.Iterable):\n raise TypeError(\"TrajectoryData.symbols must be of type list\")\n if any([not isinstance(i, six.string_types) for i in symbols]):\n raise TypeError(\"TrajectoryData.symbols must be a 1d list of strings\")\n if not isinstance(positions, numpy.ndarray) or positions.dtype != float:\n raise TypeError(\"TrajectoryData.positions must be a numpy array of floats\")\n if stepids is not None:\n if not isinstance(stepids, numpy.ndarray) or stepids.dtype != int:\n raise TypeError(\"TrajectoryData.stepids must be a numpy array of integers\")\n if cells is not None:\n if not isinstance(cells, numpy.ndarray) or cells.dtype != float:\n raise TypeError(\"TrajectoryData.cells must be a numpy array of floats\")\n if times is not None:\n if not isinstance(times, numpy.ndarray) or times.dtype != float:\n raise TypeError(\"TrajectoryData.times must be a numpy array of floats\")\n if velocities is not None:\n if not isinstance(velocities, numpy.ndarray) or velocities.dtype != float:\n raise TypeError(\"TrajectoryData.velocities must be a numpy array of floats, or None\")\n if stepids is not None:\n numsteps = stepids.size\n if stepids.shape != (numsteps,):\n raise ValueError(\"TrajectoryData.stepids must be a 1d array\")\n else:\n numsteps = positions.shape[0]\n if cells is not None:\n if cells.shape != (numsteps, 3, 3):\n raise ValueError(\"TrajectoryData.cells must have shape (s,3,3), with s=number of steps\")\n numatoms = len(symbols)\n if positions.shape != (numsteps, numatoms, 3):\n raise ValueError(\"TrajectoryData.positions must have shape (s,n,3), \"\n \"with s=number of steps and n=number of symbols\")\n if times is not None:\n if times.shape != (numsteps,):\n raise ValueError(\"TrajectoryData.times must have shape (s,), with s=number of steps\")\n if velocities is not None:\n if velocities.shape != (numsteps, numatoms, 3):\n raise ValueError(\"TrajectoryData.velocities, if not None, must \"\n \"have shape (s,n,3), \"\n \"with s=number of steps and n=number of symbols\")\n\n def set_trajectory(self, symbols, positions, stepids=None, cells=None, times=None, velocities=None): # pylint: disable=too-many-arguments\n r\"\"\"\n Store the whole trajectory, after checking that types and dimensions\n are correct.\n\n Parameters ``stepids``, ``cells`` and ``velocities`` are optional\n variables. If nothing is passed for ``cells`` or ``velocities``\n nothing will be stored. However, if no input is given for ``stepids``\n a consecutive sequence [0,1,2,...,len(positions)-1] will be assumed.\n\n\n :param symbols: string list with dimension ``n``, where ``n`` is the\n number of atoms (i.e., sites) in the structure.\n The same list is used for each step. Normally, the string\n should be a valid chemical symbol, but actually any unique\n string works and can be used as the name of the atomic kind\n (see also the :py:meth:`.get_step_structure()` method).\n :param positions: float array with dimension :math:`s \\times n \\times 3`,\n where ``s`` is the\n length of the ``stepids`` array and ``n`` is the length\n of the ``symbols`` array. Units are angstrom.\n In particular,\n ``positions[i,j,k]`` is the ``k``-th component of the\n ``j``-th atom (or site) in the structure at the time step\n with index ``i`` (identified\n by step number ``step[i]`` and with timestamp ``times[i]``).\n :param stepids: integer array with dimension ``s``, where ``s`` is the\n number of steps. Typically represents an internal counter\n within the code. For instance, if you want to store a\n trajectory with one step every 10, starting from step 65,\n the array will be ``[65,75,85,...]``.\n No checks are done on duplicate elements\n or on the ordering, but anyway this array should be\n sorted in ascending order, without duplicate elements.\n (If not specified, stepids will be set to ``numpy.arange(s)``\n by default) It is internally stored as an array named 'steps'.\n :param cells: if specified float array with dimension\n :math:`s \\times 3 \\times 3`, where ``s`` is the\n length of the ``stepids`` array. Units are angstrom.\n In particular, ``cells[i,j,k]`` is the ``k``-th component\n of the ``j``-th cell vector at the time step with index\n ``i`` (identified by step number ``stepid[i]`` and with\n timestamp ``times[i]``).\n :param times: if specified, float array with dimension ``s``, where\n ``s`` is the length of the ``stepids`` array. Contains the\n timestamp of each step in picoseconds (ps).\n :param velocities: if specified, must be a float array with the same\n dimensions of the ``positions`` array.\n The array contains the velocities in the atoms.\n\n .. todo :: Choose suitable units for velocities\n \"\"\"\n\n import numpy\n\n self._internal_validate(stepids, cells, symbols, positions, times, velocities)\n # set symbols as attribute for easier querying\n self.set_attribute('symbols', list(symbols))\n self.set_array('positions', positions)\n if stepids is not None: # use input stepids\n self.set_array('steps', stepids)\n else: # use consecutive sequence if not given\n self.set_array('steps', numpy.arange(positions.shape[0]))\n if cells is not None:\n self.set_array('cells', cells)\n else:\n # Delete cells array, if it was present\n try:\n self.delete_array('cells')\n except KeyError:\n pass\n if times is not None:\n self.set_array('times', times)\n else:\n # Delete times array, if it was present\n try:\n self.delete_array('times')\n except KeyError:\n pass\n if velocities is not None:\n self.set_array('velocities', velocities)\n else:\n # Delete velocities array, if it was present\n try:\n self.delete_array('velocities')\n except KeyError:\n pass\n\n def set_structurelist(self, structurelist):\n \"\"\"\n Create trajectory from the list of\n :py:class:`aiida.orm.nodes.data.structure.StructureData` instances.\n\n :param structurelist: a list of\n :py:class:`aiida.orm.nodes.data.structure.StructureData` instances.\n\n :raises ValueError: if symbol lists of supplied structures are\n different\n \"\"\"\n import numpy\n\n stepids = numpy.arange(len(structurelist))\n cells = numpy.array([x.cell for x in structurelist])\n symbols_first = [str(s.kind_name) for s in structurelist[0].sites]\n for symbols_now in [[str(s.kind_name) for s in structurelist[i].sites] for i in stepids]:\n if symbols_first != symbols_now:\n raise ValueError(\"Symbol lists have to be the same for all of the supplied structures\")\n symbols = list(symbols_first)\n positions = numpy.array([[list(s.position) for s in x.sites] for x in structurelist])\n self.set_trajectory(stepids=stepids, cells=cells, symbols=symbols, positions=positions)\n\n def _validate(self):\n \"\"\"\n Verify that the required arrays are present and that their type and\n dimension are correct.\n \"\"\"\n # check dimensions, types\n from aiida.common.exceptions import ValidationError\n\n try:\n self._internal_validate(self.get_stepids(), self.get_cells(), self.symbols, self.get_positions(),\n self.get_times(), self.get_velocities())\n # Should catch TypeErrors, ValueErrors, and KeyErrors for missing arrays\n except Exception as exception:\n raise ValidationError(\"The TrajectoryData did not validate. \"\n \"Error: {} with message {}\".format(type(exception).__name__, exception))\n\n @property\n def numsteps(self):\n \"\"\"\n Return the number of stored steps, or zero if nothing has been stored yet.\n \"\"\"\n try:\n return self.get_shape('steps')[0]\n except (AttributeError, KeyError, IndexError):\n return 0\n\n @property\n def numsites(self):\n \"\"\"\n Return the number of stored sites, or zero if nothing has been stored yet.\n \"\"\"\n try:\n return len(self.symbols)\n except (AttributeError, KeyError, IndexError):\n return 0\n\n def get_stepids(self):\n \"\"\"\n Return the array of steps, if it has already been set.\n\n .. versionadded:: 0.7\n Renamed from get_steps\n\n :raises KeyError: if the trajectory has not been set yet.\n \"\"\"\n return self.get_array('steps')\n\n def get_times(self):\n \"\"\"\n Return the array of times (in ps), if it has already been set.\n\n :raises KeyError: if the trajectory has not been set yet.\n \"\"\"\n try:\n return self.get_array('times')\n except (AttributeError, KeyError):\n return None\n\n def get_cells(self):\n \"\"\"\n Return the array of cells, if it has already been set.\n\n :raises KeyError: if the trajectory has not been set yet.\n \"\"\"\n try:\n return self.get_array('cells')\n except (AttributeError, KeyError):\n return None\n\n @property\n def symbols(self):\n \"\"\"\n Return the array of symbols, if it has already been set.\n\n :raises KeyError: if the trajectory has not been set yet.\n \"\"\"\n return self.get_attribute('symbols')\n\n def get_positions(self):\n \"\"\"\n Return the array of positions, if it has already been set.\n\n :raises KeyError: if the trajectory has not been set yet.\n \"\"\"\n return self.get_array('positions')\n\n def get_velocities(self):\n \"\"\"\n Return the array of velocities, if it has already been set.\n\n .. note :: This function (differently from all other ``get_*``\n functions, will not raise an exception if the velocities are not\n set, but rather return ``None`` (both if no trajectory was not set yet,\n and if it the trajectory was set but no velocities were specified).\n \"\"\"\n try:\n return self.get_array('velocities')\n except (AttributeError, KeyError):\n return None\n\n def get_index_from_stepid(self, stepid):\n \"\"\"\n Given a value for the stepid (i.e., a value among those of the ``steps``\n array), return the array index of that stepid, that can be used in other\n methods such as :py:meth:`.get_step_data` or\n :py:meth:`.get_step_structure`.\n\n .. versionadded:: 0.7\n Renamed from get_step_index\n\n .. note:: Note that this function returns the first index found\n (i.e. if multiple steps are present with the same value,\n only the index of the first one is returned).\n\n :raises ValueError: if no step with the given value is found.\n \"\"\"\n import numpy\n\n try:\n return numpy.where(self.get_stepids() == stepid)[0][0]\n except IndexError:\n raise ValueError(\"{} not among the stepids\".format(stepid))\n\n def get_step_data(self, index):\n r\"\"\"\n Return a tuple with all information concerning\n the stepid with given index (0 is the first step, 1 the second step\n and so on). If you know only the step value, use the\n :py:meth:`.get_index_from_stepid` method to get the\n corresponding index.\n\n If no velocities were specified, None is returned as the last element.\n\n :return: A tuple in the format\n ``(stepid, time, cell, symbols, positions, velocities)``,\n where ``stepid`` is an integer, ``time`` is a float, ``cell`` is a\n :math:`3 \\times 3` matrix, ``symbols`` is an array of length ``n``,\n positions is a :math:`n \\times 3` array, and velocities is either\n ``None`` or a :math:`n \\times 3` array\n\n :param index: The index of the step that you want to retrieve, from\n 0 to ``self.numsteps - 1``.\n :raises IndexError: if you require an index beyond the limits.\n :raises KeyError: if you did not store the trajectory yet.\n \"\"\"\n if index >= self.numsteps:\n raise IndexError(\"You have only {} steps, but you are looking beyond\"\n \" (index={})\".format(self.numsteps, index))\n\n vel = self.get_velocities()\n if vel is not None:\n vel = vel[index, :, :]\n time = self.get_times()\n if time is not None:\n time = time[index]\n cells = self.get_cells()\n if cells is not None:\n cell = cells[index, :, :]\n return (self.get_stepids()[index], time, cell, self.symbols, self.get_positions()[index, :, :], vel)\n\n def get_step_structure(self, index, custom_kinds=None):\n \"\"\"\n Return an AiiDA :py:class:`aiida.orm.nodes.data.structure.StructureData` node\n (not stored yet!) with the coordinates of the given step, identified by\n its index. If you know only the step value, use the\n :py:meth:`.get_index_from_stepid` method to get the corresponding index.\n\n .. note:: The periodic boundary conditions are always set to True.\n\n .. versionadded:: 0.7\n Renamed from step_to_structure\n\n :param index: The index of the step that you want to retrieve, from\n 0 to ``self.numsteps- 1``.\n :param custom_kinds: (Optional) If passed must be a list of\n :py:class:`aiida.orm.nodes.data.structure.Kind` objects. There must be one\n kind object for each different string in the ``symbols`` array, with\n ``kind.name`` set to this string.\n If this parameter is omitted, the automatic kind generation of AiiDA\n :py:class:`aiida.orm.nodes.data.structure.StructureData` nodes is used,\n meaning that the strings in the ``symbols`` array must be valid\n chemical symbols.\n \"\"\"\n from aiida.orm.nodes.data.structure import StructureData, Kind, Site\n\n # ignore step, time, and velocities\n _, _, cell, symbols, positions, _ = self.get_step_data(index)\n\n if custom_kinds is not None:\n kind_names = []\n for k in custom_kinds:\n if not isinstance(k, Kind):\n raise TypeError(\"Each element of the custom_kinds list must \"\n \"be a aiida.orm.nodes.data.structure.Kind object\")\n kind_names.append(k.name)\n if len(kind_names) != len(set(kind_names)):\n raise ValueError(\"Multiple kinds with the same name passed as custom_kinds\")\n if set(kind_names) != set(symbols):\n raise ValueError(\"If you pass custom_kinds, you have to \"\n \"pass one Kind object for each symbol \"\n \"that is present in the trajectory. You \"\n \"passed {}, but the symbols are {}\".format(sorted(kind_names), sorted(symbols)))\n\n struc = StructureData(cell=cell)\n if custom_kinds is not None:\n for _k in custom_kinds:\n struc.append_kind(_k)\n for _s, _p in zip(symbols, positions):\n struc.append_site(Site(kind_name=_s, position=_p))\n else:\n for _s, _p in zip(symbols, positions):\n # Automatic species generation\n struc.append_atom(symbols=_s, position=_p)\n\n return struc\n\n def _prepare_xsf(self, index=None, main_file_name=\"\"): # pylint: disable=unused-argument\n \"\"\"\n Write the given trajectory to a string of format XSF (for XCrySDen).\n \"\"\"\n from aiida.common.constants import elements\n _atomic_numbers = {data['symbol']: num for num, data in elements.items()}\n\n indices = list(range(self.numsteps))\n if index is not None:\n indices = [index]\n return_string = \"ANIMSTEPS {}\\nCRYSTAL\\n\".format(len(indices))\n # Do the checks once and for all here:\n structure = self.get_step_structure(index=0)\n if structure.is_alloy or structure.has_vacancies:\n raise NotImplementedError(\"XSF for alloys or systems with vacancies not implemented.\")\n cells = self.get_cells()\n if cells is None:\n raise ValueError(\"No cell parameters have been supplied for TrajectoryData\")\n positions = self.get_positions()\n symbols = self.symbols\n atomic_numbers_list = [_atomic_numbers[s] for s in symbols]\n nat = len(symbols)\n\n for idx in indices:\n return_string += \"PRIMVEC {}\\n\".format(idx + 1)\n for cell_vector in cells[idx]:\n return_string += ' '.join([\"{:18.5f}\".format(i) for i in cell_vector])\n return_string += \"\\n\"\n return_string += \"PRIMCOORD {}\\n\".format(idx + 1)\n return_string += \"{} 1\\n\".format(nat)\n for atn, pos in zip(atomic_numbers_list, positions[idx]):\n try:\n return_string += \"{} {:18.10f} {:18.10f} {:18.10f}\\n\".format(atn, pos[0], pos[1], pos[2])\n except:\n print(atn, pos)\n raise\n return return_string.encode('utf-8'), {}\n\n def _prepare_cif(self, trajectory_index=None, main_file_name=\"\"): # pylint: disable=unused-argument\n \"\"\"\n Write the given trajectory to a string of format CIF.\n \"\"\"\n from aiida.orm.nodes.data.cif \\\n import ase_loops, cif_from_ase, pycifrw_from_cif\n from aiida.common.utils import Capturing\n\n cif = \"\"\n indices = list(range(self.numsteps))\n if trajectory_index is not None:\n indices = [trajectory_index]\n for idx in indices:\n structure = self.get_step_structure(idx)\n ciffile = pycifrw_from_cif(cif_from_ase(structure.get_ase()), ase_loops)\n with Capturing():\n cif = cif + ciffile.WriteOut()\n return cif.encode('utf-8'), {}\n\n def _get_aiida_structure(self, store=False, **kwargs):\n \"\"\"\n Creates :py:class:`aiida.orm.nodes.data.structure.StructureData`.\n\n :param converter: specify the converter. Default 'ase'.\n :param store: If True, intermediate calculation gets stored in the\n AiiDA database for record. Default False.\n :return: :py:class:`aiida.orm.nodes.data.structure.StructureData` node.\n \"\"\"\n import warnings\n from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin\n warnings.warn( # pylint: disable=no-member\n 'This method has been deprecated and will be renamed to get_structure() in AiiDA v1.0', DeprecationWarning)\n return self.get_structure(store=store, **kwargs)\n\n def get_structure(self, store=False, **kwargs):\n \"\"\"\n Creates :py:class:`aiida.orm.nodes.data.structure.StructureData`.\n\n .. versionadded:: 1.0\n Renamed from _get_aiida_structure\n\n :param converter: specify the converter. Default 'ase'.\n :param store: If True, intermediate calculation gets stored in the\n AiiDA database for record. Default False.\n :return: :py:class:`aiida.orm.nodes.data.structure.StructureData` node.\n \"\"\"\n from aiida.orm.nodes.data.dict import Dict\n from aiida.tools.data.array.trajectory import _get_aiida_structure_inline\n\n param = Dict(dict=kwargs)\n\n ret_dict = _get_aiida_structure_inline(trajectory=self, parameters=param, metadata={'store_provenance': store}) # pylint: disable=unexpected-keyword-arg\n return ret_dict['structure']\n\n def _get_cif(self, index=None, **kwargs):\n \"\"\"\n Creates :py:class:`aiida.orm.nodes.data.cif.CifData`\n \"\"\"\n import warnings\n from aiida.common.warnings import AiidaDeprecationWarning as DeprecationWarning # pylint: disable=redefined-builtin\n warnings.warn( # pylint: disable=no-member\n 'This method has been deprecated and will be renamed to get_cif() in AiiDA v1.0', DeprecationWarning)\n return self.get_cif(index=index, **kwargs)\n\n def get_cif(self, index=None, **kwargs):\n \"\"\"\n Creates :py:class:`aiida.orm.nodes.data.cif.CifData`\n\n .. versionadded:: 1.0\n Renamed from _get_cif\n \"\"\"\n struct = self.get_structure(index=index, **kwargs)\n cif = struct.get_cif(**kwargs)\n return cif\n\n def _parse_xyz_pos(self, inputstring):\n \"\"\"\n Load positions from a XYZ file.\n\n .. note:: The steps and symbols must be set manually before calling this\n import function as a consistency measure. Even though the symbols\n and steps could be extracted from the XYZ file, the data present in\n the XYZ file may or may not be correct and the same logic would have\n to be present in the XYZ-velocities function. It was therefore\n decided not to implement it at all but require it to be set\n explicitly.\n\n Usage::\n\n from aiida.orm.nodes.data.array.trajectory import TrajectoryData\n\n t = TrajectoryData()\n # get sites and number of timesteps\n t.set_array('steps', arange(ntimesteps))\n t.set_array('symbols', array([site.kind for site in s.sites]))\n t.importfile('some-calc/AIIDA-PROJECT-pos-1.xyz', 'xyz_pos')\n \"\"\"\n\n from aiida.common.exceptions import ValidationError\n from aiida.tools.data.structure import xyz_parser_iterator\n from numpy import array\n\n numsteps = self.numsteps\n if numsteps == 0:\n raise ValidationError(\"steps must be set before importing positional data\")\n\n numsites = self.numsites\n if numsites == 0:\n raise ValidationError(\"symbols must be set before importing positional data\")\n\n positions = array(\n [[list(position) for _, position in atoms] for _, _, atoms in xyz_parser_iterator(inputstring)])\n\n if positions.shape != (numsteps, numsites, 3):\n raise ValueError(\"TrajectoryData.positions must have shape (s,n,3), \"\n \"with s=number of steps={} and \"\n \"n=number of symbols={}\".format(numsteps, numsites))\n\n self.set_array('positions', positions)\n\n def _parse_xyz_vel(self, inputstring):\n \"\"\"\n Load velocities from a XYZ file.\n\n .. note:: The steps and symbols must be set manually before calling this\n import function as a consistency measure. See also comment for\n :py:meth:`._parse_xyz_pos`\n \"\"\"\n\n from aiida.common.exceptions import ValidationError\n from aiida.tools.data.structure import xyz_parser_iterator\n from numpy import array\n\n numsteps = self.numsteps\n if numsteps == 0:\n raise ValidationError(\"steps must be set before importing positional data\")\n\n numsites = self.numsites\n if numsites == 0:\n raise ValidationError(\"symbols must be set before importing positional data\")\n\n velocities = array(\n [[list(velocity) for _, velocity in atoms] for _, _, atoms in xyz_parser_iterator(inputstring)])\n\n if velocities.shape != (numsteps, numsites, 3):\n raise ValueError(\"TrajectoryData.positions must have shape (s,n,3), \"\n \"with s=number of steps={} and \"\n \"n=number of symbols={}\".format(numsteps, numsites))\n\n self.set_array('velocities', velocities)\n\n def show_mpl_pos(self, **kwargs): # pylint: disable=too-many-locals\n \"\"\"\n Shows the positions as a function of time, separate for XYZ coordinates\n\n :param int stepsize: The stepsize for the trajectory, set higher than 1 to\n reduce number of points\n :param int mintime: Time to start from\n :param int maxtime: Maximum time\n :param list elements:\n A list of atomic symbols that should be displayed.\n If not specified, all atoms are displayed.\n :param list indices:\n A list of indices of that atoms that can be displayed.\n If not specified, all atoms of the correct species are displayed.\n :param bool dont_block: If True, interpreter is not blocked when figure is displayed.\n \"\"\"\n from ase.data import atomic_numbers\n from aiida.common.exceptions import InputValidationError\n\n # Reading the arrays I need:\n positions = self.get_positions()\n times = self.get_times()\n symbols = self.symbols\n\n # Try to get the units.\n try:\n positions_unit = self.get_attribute('units|positions')\n except KeyError:\n positions_unit = 'A'\n try:\n times_unit = self.get_attribute('units|times')\n except KeyError:\n times_unit = 'ps'\n\n # Getting the keyword input\n stepsize = kwargs.pop('stepsize', 1)\n maxtime = kwargs.pop('maxtime', times[-1])\n mintime = kwargs.pop('mintime', times[0])\n element_list = kwargs.pop('elements', None)\n index_list = kwargs.pop('indices', None)\n dont_block = kwargs.pop('dont_block', False)\n label = kwargs.pop('label', None) or self.label or self.__repr__()\n # Choosing the color scheme\n\n colors = kwargs.pop('colors', 'jmol')\n if colors == 'jmol':\n from ase.data.colors import jmol_colors as colors\n elif colors == 'cpk':\n from ase.data.colors import cpk_colors as colors\n else:\n raise InputValidationError(\"Unknown color spec {}\".format(colors))\n if kwargs:\n raise InputValidationError(\"Unrecognized keyword {}\".format(kwargs.keys()))\n\n if element_list is None:\n # If not all elements are allowed\n allowed_elements = set(symbols)\n else:\n # A subset of elements are allowed\n allowed_elements = set(element_list)\n color_dict = {s: colors[atomic_numbers[s]] for s in set(symbols)}\n # Here I am trying to find out the atoms to show\n if index_list is None:\n # If not index_list was provided, I will see if an element_list\n # was given to me\n indices_to_show = [i for i, sym in enumerate(symbols) if sym in allowed_elements]\n else:\n indices_to_show = index_list\n # I refrain from checking if indices are ok, will crash if not...\n\n # The color_list is a list of colors (RGB) that I will\n # pass, so the different species give different colors in the plot\n color_list = [color_dict[s] for s in symbols]\n\n # Reducing array size based on stepsize variable\n _times = times[::stepsize]\n _positions = positions[::stepsize]\n\n # Calling\n plot_positions_XYZ(\n _times,\n _positions,\n indices_to_show,\n color_list,\n label,\n positions_unit,\n times_unit,\n dont_block,\n mintime,\n maxtime,\n )\n\n def show_mpl_heatmap(self, **kwargs): # pylint: disable=invalid-name,too-many-arguments,too-many-locals,too-many-statements,too-many-branches\n \"\"\"\n Show a heatmap of the trajectory with matplotlib.\n \"\"\"\n import numpy as np\n from scipy import stats\n try:\n from mayavi import mlab\n except ImportError:\n raise ImportError(\"Unable to import the mayavi package, that is required to\"\n \"use the plotting feature you requested. \"\n \"Please install it first and then call this command again \"\n \"(note that the installation of mayavi is quite complicated \"\n \"and requires that you already installed the python numpy \"\n \"package, as well as the vtk package\")\n from ase.data.colors import jmol_colors\n from ase.data import atomic_numbers\n\n # pylint: disable=invalid-name\n\n def collapse_into_unit_cell(point, cell):\n \"\"\"\n Applies linear transformation to coordinate system based on crystal\n lattice, vectors. The inverse of that inverse transformation matrix with the\n point given results in the point being given as a multiples of lattice vectors\n Than take the integer of the rows to find how many times you have to shift\n the point back\"\"\"\n invcell = np.matrix(cell).T.I\n # point in crystal coordinates\n points_in_crystal = np.dot(invcell, point).tolist()[0]\n #point collapsed into unit cell\n points_in_unit_cell = [i % 1 for i in points_in_crystal]\n return np.dot(cell.T, points_in_unit_cell).tolist()\n\n elements = kwargs.pop('elements', None)\n mintime = kwargs.pop('mintime', None)\n maxtime = kwargs.pop('maxtime', None)\n stepsize = kwargs.pop('stepsize', None) or 1\n contours = np.array(kwargs.pop('contours', None) or (0.1, 0.5))\n sampling_stepsize = int(kwargs.pop('sampling_stepsize', None) or 0)\n\n times = self.get_times()\n if mintime is None:\n minindex = 0\n else:\n minindex = np.argmax(times > mintime)\n if maxtime is None:\n maxindex = len(times)\n else:\n maxindex = np.argmin(times < maxtime)\n positions = self.get_positions()[minindex:maxindex:stepsize]\n\n try:\n if self.get_attribute('units|positions') in ('bohr', 'atomic'):\n from aiida.common.constants import bohr_to_ang\n positions *= bohr_to_ang\n except KeyError:\n pass\n\n symbols = self.symbols\n if elements is None:\n elements = set(symbols)\n\n cells = self.get_cells()\n if cells is None:\n raise ValueError(\"No cell parameters have been supplied for TrajectoryData\")\n else:\n cell = np.array(cells[0])\n storage_dict = {s: {} for s in elements}\n for ele in elements:\n storage_dict[ele] = [np.array([]), np.array([]), np.array([])]\n for iat, ele in enumerate(symbols):\n if ele in elements:\n for idim in range(3):\n storage_dict[ele][idim] = np.concatenate((storage_dict[ele][idim],\n positions[:, iat, idim].flatten()))\n\n for ele in elements:\n storage_dict[ele] = np.array(storage_dict[ele]).T\n storage_dict[ele] = np.array([collapse_into_unit_cell(pos, cell) for pos in storage_dict[ele]]).T\n\n white = (1, 1, 1)\n mlab.figure(bgcolor=white, size=(1080, 720))\n\n for i1, a in enumerate(cell):\n i2 = (i1 + 1) % 3\n i3 = (i1 + 2) % 3\n for b in [np.zeros(3), cell[i2]]:\n for c in [np.zeros(3), cell[i3]]:\n p1 = b + c\n p2 = p1 + a\n mlab.plot3d([p1[0], p2[0]], [p1[1], p2[1]], [p1[2], p2[2]], tube_radius=0.1)\n\n for ele, data in storage_dict.items():\n kde = stats.gaussian_kde(data, bw_method=0.15)\n\n _x = data[0, :]\n _y = data[1, :]\n _z = data[2, :]\n xmin, ymin, zmin = _x.min(), _y.min(), _z.min()\n xmax, ymax, zmax = _x.max(), _y.max(), _z.max()\n\n _xi, _yi, _zi = np.mgrid[xmin:xmax:60j, ymin:ymax:30j, zmin:zmax:30j] # pylint: disable=invalid-slice-index\n coords = np.vstack([item.ravel() for item in [_xi, _yi, _zi]])\n density = kde(coords).reshape(_xi.shape)\n\n # Plot scatter with mayavi\n #~ figure = mlab.figure('DensityPlot')\n grid = mlab.pipeline.scalar_field(_xi, _yi, _zi, density)\n #~ min = density.min()\n maxdens = density.max()\n #~ mlab.pipeline.volume(grid, vmin=min, vmax=min + .5*(max-min))\n surf = mlab.pipeline.iso_surface(grid, opacity=0.5, colormap='cool', contours=(maxdens * contours).tolist())\n lut = surf.module_manager.scalar_lut_manager.lut.table.to_array()\n\n # The lut is a 255x4 array, with the columns representing RGBA\n # (red, green, blue, alpha) coded with integers going from 0 to 255.\n\n # We modify the alpha channel to add a transparency gradient\n lut[:, -1] = np.linspace(100, 255, 256)\n lut[:, 0:3] = 255 * jmol_colors[atomic_numbers[ele]]\n # and finally we put this LUT back in the surface object. We could have\n # added any 255*4 array rather than modifying an existing LUT.\n surf.module_manager.scalar_lut_manager.lut.table = lut\n\n if sampling_stepsize > 0:\n mlab.points3d(\n _x[::sampling_stepsize],\n _y[::sampling_stepsize],\n _z[::sampling_stepsize],\n color=tuple(jmol_colors[atomic_numbers[ele]].tolist()),\n scale_mode='none',\n scale_factor=0.3,\n opacity=0.3)\n\n mlab.view(azimuth=155, elevation=70, distance='auto')\n mlab.show()\n\n\ndef plot_positions_XYZ( # pylint: disable=too-many-arguments,too-many-locals,invalid-name\n times,\n positions,\n indices_to_show,\n color_list,\n label,\n positions_unit='A',\n times_unit='ps',\n dont_block=False,\n mintime=None,\n maxtime=None,\n label_sparsity=10):\n \"\"\"\n Plot with matplotlib the positions of the coordinates of the atoms\n over time for a trajectory\n\n :param times: array of times\n :param positions: array of positions\n :param indices_to_show: list of indices of to show (0, 1, 2 for X, Y, Z)\n :param color_list: list of valid color specifications for matplotlib\n :param label: label for this plot to put in the title\n :param positions_unit: label for the units of positions (for the x label)\n :param times_unit: label for the units of times (for the y label)\n :param dont_block: passed to plt.show() as ``block=not dont_block``\n :param mintime: if specified, cut the time axis at the specified min value\n :param maxtime: if specified, cut the time axis at the specified max value\n :param label_sparsity: how often to put a label with the pair (t, coord)\n \"\"\"\n from matplotlib import pyplot as plt\n from matplotlib.gridspec import GridSpec\n import numpy as np\n\n tlim = [times[0], times[-1]]\n index_range = [0, len(times)]\n if mintime is not None:\n tlim[0] = mintime\n index_range[0] = np.argmax(times > mintime)\n if maxtime is not None:\n tlim[1] = maxtime\n index_range[1] = np.argmin(times < maxtime)\n\n trajectories = zip(*positions.tolist()) # only used in enumerate() below\n fig = plt.figure(figsize=(12, 7))\n\n plt.suptitle(r'Trajectory of {}'.format(label), fontsize=16)\n nr_of_axes = 3\n gridspec = GridSpec(nr_of_axes, 1, hspace=0.0)\n\n ax1 = fig.add_subplot(gridspec[0])\n plt.ylabel(r'X Position $\\left[{}\\right]$'.format(positions_unit))\n plt.xticks([])\n plt.xlim(*tlim)\n ax2 = fig.add_subplot(gridspec[1])\n plt.ylabel(r'Y Position $\\left[{}\\right]$'.format(positions_unit))\n plt.xticks([])\n plt.xlim(*tlim)\n ax3 = fig.add_subplot(gridspec[2])\n plt.ylabel(r'Z Position $\\left[{}\\right]$'.format(positions_unit))\n plt.xlabel('Time [{}]'.format(times_unit))\n plt.xlim(*tlim)\n sparse_indices = np.linspace(*index_range, num=label_sparsity, dtype=int)\n\n for index, traj in enumerate(trajectories):\n if index not in indices_to_show:\n continue\n color = color_list[index]\n _x, _y, _z = list(zip(*traj))\n ax1.plot(times, _x, color=color)\n ax2.plot(times, _y, color=color)\n ax3.plot(times, _z, color=color)\n for i in sparse_indices:\n ax1.text(times[i], _x[i], str(index), color=color, fontsize=5)\n ax2.text(times[i], _x[i], str(index), color=color, fontsize=5)\n ax3.text(times[i], _x[i], str(index), color=color, fontsize=5)\n for axes in ax1, ax2, ax3:\n yticks = axes.yaxis.get_major_ticks()\n yticks[0].label1.set_visible(False)\n\n plt.show(block=not dont_block)\n" ]
[ [ "numpy.array", "numpy.matrix", "numpy.dot", "numpy.linspace", "numpy.arange", "matplotlib.pyplot.xlim", "numpy.argmax", "numpy.argmin", "matplotlib.gridspec.GridSpec", "scipy.stats.gaussian_kde", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
clovaai/embedding-expansion
[ "1aa68cbf4ca8f020084ea9784300093d5c381878" ]
[ "runner/evaluator.py" ]
[ "'''\nCopyright (c) 2020-present NAVER Corp.\nMIT license\n'''\n# encoding: utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom tqdm import tqdm\n\nimport mxnet as mx\nimport numpy as np\n\n\nclass Evaluator(object):\n def __init__(self, model, test_loader, ctx):\n self.model = model\n self.test_loader = test_loader\n self.ctx = ctx\n\n def _eval_step(self, inputs):\n images, instance_ids, category_ids, view_ids = inputs\n data = mx.gluon.utils.split_and_load(images, self.ctx, even_split=False)\n instance_ids = instance_ids.asnumpy()\n view_ids = view_ids.asnumpy()\n feats = []\n for d in data:\n feats.append(self.model(d))\n feats = mx.nd.concatenate(feats, axis=0)\n return feats, instance_ids, view_ids\n\n \n def get_distmat(self):\n print('Extracting eval features...')\n features, labels = [], []\n for batch_idx, inputs in tqdm(enumerate(self.test_loader), total=len(self.test_loader)):\n feature, instance_ids, view_ids = self._eval_step(inputs)\n features.append(feature.asnumpy())\n labels.extend(instance_ids)\n features = np.concatenate(features)\n labels = np.asarray(labels)\n \n m = features.shape[0]\n squared_sum_features = np.sum(features ** 2.0, axis=1, keepdims=True)\n distmat = squared_sum_features + squared_sum_features.transpose() - (2.0 * np.dot(features, features.transpose()))\n\n return distmat, labels\n\n\n def get_metric_at_ranks(self, distmat, labels, ranks):\n np.fill_diagonal(distmat, 100000.0)\n\n recall_at_ranks = []\n\n recall_dict = {k: 0 for k in ranks}\n\n max_k = np.max(ranks)\n\n # do partition\n arange_idx = np.arange(len(distmat))[:,None]\n part_idx = np.argpartition(distmat, max_k, axis=1)[:,:max_k]\n part_mat = distmat[arange_idx, part_idx]\n\n # do sort\n sorted_idx = np.argsort(part_mat, axis=1)#[::-1]\n top_k_idx = part_idx[arange_idx, sorted_idx]\n\n for top_k, gt in zip(top_k_idx, labels):\n top_k_labels = labels[top_k]\n for r in ranks:\n if gt in top_k_labels[:r]:\n recall_dict[r] += 1\n\n for r in ranks:\n recall_at_ranks.append(recall_dict[r] / len(distmat))\n\n return recall_at_ranks\n" ]
[ [ "numpy.asarray", "numpy.concatenate", "numpy.max", "numpy.fill_diagonal", "numpy.argpartition", "numpy.argsort", "numpy.sum" ] ]
brunodferrari/bdp
[ "d320add1e451c85b6777ae34901bbd6fd3797114" ]
[ "dbdp_instances/GraficosTG/GeraModelos_lastCopia.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 7 13:38:07 2021\n\n@author: bferrari\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import SVC\nfrom itertools import combinations\n\nfrom xgboost import XGBClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import cross_val_score, cross_val_predict, GridSearchCV, ParameterGrid, train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix, recall_score, f1_score, ConfusionMatrixDisplay, plot_confusion_matrix\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.preprocessing import StandardScaler\n\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.preprocessing import label_binarize\n\n\ndados_org = pd.read_excel('final_results.xlsx')\ndados_scatter = pd.concat([dados_org.iloc[:,1:11]],axis=1)\n\nX = dados_scatter.drop(['V1', 'V2', 'deg_min'],axis=1)\n\ncrossing_mh = ['Crossing_vns', 'Crossing_ts', 'Crossing_gs_vns']\nbest_mh = dados_org[crossing_mh].idxmin(axis=1).fillna('Crossing_gs_vns')\ndummy_mh = pd.get_dummies(best_mh)\n\nbest_mh_labeled = best_mh.replace({'Crossing_vns': 1,\n 'Crossing_ts': 1, \n 'Crossing_gs_vns': 3})\n\n###################### SVM ##########################\npoly_kernel_svm_clf = Pipeline([ \n (\"scaler\", StandardScaler()), \n (\"model\", SVC(probability=True,random_state=42)) \n ]) \n\ntuned_parameters = [{'model__kernel': ['rbf'], 'model__gamma': [1e4, 1e3, 1e2, 1e1, 1, 1e-3, 1e-4],\n 'model__C': [1e3, 1e2, 1e1, 1, 1e-3, 1e-4]},\n {'model__kernel': ['poly'], 'model__degree': [1,2], 'model__C': [1e3, 1e2, 1e1, 1, 1e-3, 1e-4]}]\n\nsvc_grid = GridSearchCV(poly_kernel_svm_clf, tuned_parameters, n_jobs=-1, verbose=1)\n\nbest_svm = svc_grid.fit(X, best_mh_labeled)\n\nacc_svm = cross_val_score(best_svm.best_estimator_, X, best_mh_labeled, n_jobs=5, verbose=1, scoring='accuracy')\nprint('Accuracy')\nprint(round(acc_svm.mean(), 2))\nprint(round(acc_svm.std(), 2))\nroc_svm = cross_val_score(best_svm.best_estimator_, X, best_mh_labeled, n_jobs=5, verbose=1, scoring='roc_auc_ovr')\nprint('ROC')\nprint(round(roc_svm.mean(), 2))\nprint(round(roc_svm.std(), 2))\nsvm_hat = cross_val_predict(best_svm.best_estimator_, X, best_mh_labeled, n_jobs=-1, verbose=1)\n\n###################### AD ##########################\nXtr, Xval, Ytr, Yval = train_test_split(X, best_mh_labeled)\nfrom sklearn.tree import plot_tree\n\ntuned_parameters = [{'model__max_features': [None, 'sqrt'], 'model__max_depth': [2,3,5,8,13,21,34],\n 'model__min_samples_leaf':[3,5,8,13,21,34]}]\n\nad = Pipeline([ \n (\"model\", DecisionTreeClassifier(max_depth=2, random_state=42))\n ]) \n\nad.fit(Xtr,Ytr)\nprint(ad.score(Xval,Yval))\nimport pylab as pl\npl.figure(figsize=(20,16))\nplot_tree(ad.named_steps['model'], feature_names=X.columns)\n\n\nad_grid = GridSearchCV(ad, tuned_parameters, n_jobs=5, verbose=1)\nbest_ad = ad_grid.fit(X, best_mh_labeled)\nacc_ad = cross_val_score(best_ad.best_estimator_, X, best_mh_labeled, n_jobs=5, verbose=1, scoring='accuracy')\nprint('Accuracy')\nprint(round(acc_ad.mean(), 2))\nprint(round(acc_ad.std(), 2))\nprint('ROC')\nroc_ad = cross_val_score(best_ad.best_estimator_, X, best_mh_labeled, n_jobs=5, verbose=1, scoring='roc_auc_ovr')\nprint(round(roc_ad.mean(), 2))\nprint(round(roc_ad.std(), 2))\nad_hat = cross_val_predict(best_ad.best_estimator_, X, best_mh_labeled, n_jobs=-1, verbose=1)\n\n\n###################### RF ##########################\nXtr, Xval, Ytr, Yval = train_test_split(X, best_mh_labeled)\n\ntuned_parameters = [{'model__max_features': [None, 'sqrt'], \n 'model__min_samples_leaf': [1,2,3,5,8,13,21,34],\n 'model__n_estimators': [10, 100, 1000]}]\n\nrf = Pipeline([ \n (\"model\", RandomForestClassifier(random_state=42,n_jobs=-1))\n ]) \n\nrf.fit(Xtr,Ytr)\nprint(rf.score(Xval,Yval))\n\n\n\nrf_grid = GridSearchCV(rf, tuned_parameters, n_jobs=5, verbose=1)\nbest_rf = rf_grid.fit(X, best_mh_labeled)\nacc_rf = cross_val_score(best_rf.best_estimator_, X, best_mh_labeled, n_jobs=5, verbose=1, scoring='accuracy')\nprint('Accuracy')\nprint(round(acc_rf.mean(), 2))\nprint(round(acc_rf.std(), 2))\nprint('ROC')\nroc_rf = cross_val_score(best_rf.best_estimator_, X, best_mh_labeled, n_jobs=5, verbose=1, scoring='roc_auc_ovr')\nprint(round(roc_rf.mean(), 2))\nprint(round(roc_rf.std(), 2))\nrf_hat = cross_val_predict(best_rf.best_estimator_, X, best_mh_labeled, n_jobs=-1, verbose=1)\n\n\n###################### KNN ##########################\nXtr, Xval, Ytr, Yval = train_test_split(X, best_mh_labeled)\n\ntuned_parameters = [{'scaler': ['passthrough', StandardScaler()],\n 'model__n_neighbors': [3,5,7,13,15,21], \n 'model__p': [1,2,3,5]}]\n \n\nknn = Pipeline([ \n (\"scaler\", StandardScaler()), \n (\"model\", KNeighborsClassifier(n_neighbors=15, p=5, n_jobs=-1))\n ]) \n\nknn.fit(Xtr,Ytr)\nprint(knn.score(Xval,Yval))\n\nknn_grid = GridSearchCV(knn, tuned_parameters, n_jobs=5, verbose=1)\nbest_knn = knn_grid.fit(X, best_mh_labeled)\n\nacc_knn = cross_val_score(best_knn.best_estimator_, X, best_mh_labeled, n_jobs=5, verbose=1, scoring='accuracy')\nprint('Accuracy')\nprint(round(acc_knn.mean(), 2))\nprint(round(acc_knn.std(), 2))\nprint('ROC')\nroc_knn = cross_val_score(best_knn.best_estimator_, X, best_mh_labeled, n_jobs=5, verbose=1, scoring='roc_auc_ovr')\nprint(round(roc_knn.mean(), 2))\nprint(round(roc_knn.std(), 2))\nknn_hat = cross_val_predict(best_knn.best_estimator_, X, best_mh_labeled, n_jobs=-1, verbose=1)\n\n###################### CM ##########################\n#fig, ax = plt.subplots(2, 2, figsize=(12,12))\n#pos = [(i,j) for i in range(2) for j in range(2)]\nfor var in globals():\n #print(var)\n if str(var).find(\"_hat\") > 0:\n #pos_ = pos.pop(0)\n ConfusionMatrixDisplay(confusion_matrix(best_mh_labeled, globals()[var]), ['VND/TABU', 'GRASP']).plot()\n pl.title(var.split(\"_\")[0].upper())\n pl.xlabel('Valor Previsto')\n pl.ylabel('Valor Atribuído') \n " ]
[ [ "pandas.concat", "pandas.read_excel", "sklearn.model_selection.cross_val_score", "sklearn.model_selection.GridSearchCV", "sklearn.model_selection.cross_val_predict", "sklearn.ensemble.RandomForestClassifier", "sklearn.model_selection.train_test_split", "sklearn.neighbors.KNeighborsClassifier", "sklearn.tree.DecisionTreeClassifier", "sklearn.tree.plot_tree", "sklearn.svm.SVC", "sklearn.preprocessing.StandardScaler", "pandas.get_dummies" ] ]
loveher147/zvt
[ "e16d8e20daa5d0c069294f063d8f5a021bb8e8bf" ]
[ "zvt/recorders/em/em_stock_actor_summary_recorder.py" ]
[ "# -*- coding: utf-8 -*-\nfrom typing import List\n\nimport pandas as pd\n\nfrom zvt.api.utils import to_report_period_type, value_to_pct\nfrom zvt.contract import ActorType\nfrom zvt.contract.api import df_to_db\nfrom zvt.contract.recorder import TimestampsDataRecorder\nfrom zvt.domain import Stock\nfrom zvt.domain.actor.stock_actor import StockActorSummary\nfrom zvt.recorders.em.common import get_ii_holder_report_dates, actor_type_to_org_type, get_ii_summary\nfrom zvt.utils import to_pd_timestamp, to_time_str\n\n\n# [{'CHANGE_RATIO': -1.045966694333,\n# 'IS_COMPLETE': '1',\n# 'ORG_TYPE': '07',\n# 'REPORT_DATE': '2021-03-31 00:00:00',\n# 'SECUCODE': '000338.SZ',\n# 'SECURITY_CODE': '000338',\n# 'TOTAL_FREE_SHARES': 2598718411,\n# 'TOTAL_MARKET_CAP': 49999342227.64,\n# 'TOTAL_ORG_NUM': 5,\n# 'TOTAL_SHARES_RATIO': 29.51742666}]\n\nclass EMStockActorSummaryRecorder(TimestampsDataRecorder):\n entity_provider = 'joinquant'\n entity_schema = Stock\n\n provider = 'em'\n data_schema = StockActorSummary\n\n def init_timestamps(self, entity_item) -> List[pd.Timestamp]:\n result = get_ii_holder_report_dates(code=entity_item.code)\n if result:\n return [to_pd_timestamp(item['REPORT_DATE']) for item in result]\n\n def record(self, entity, start, end, size, timestamps):\n for timestamp in timestamps:\n the_date = to_time_str(timestamp)\n self.logger.info(f'to {entity.code} {the_date}')\n for actor_type in ActorType:\n if actor_type == ActorType.private_equity or actor_type == ActorType.individual:\n continue\n result = get_ii_summary(code=entity.code, report_date=the_date,\n org_type=actor_type_to_org_type(actor_type))\n if result:\n summary_list = [{'id': f'{entity.entity_id}_{the_date}_{actor_type.value}',\n 'entity_id': entity.entity_id,\n 'timestamp': timestamp,\n 'code': entity.code,\n 'name': entity.name,\n\n 'actor_type': actor_type.value,\n 'actor_count': item['TOTAL_ORG_NUM'],\n\n 'report_date': timestamp,\n 'report_period': to_report_period_type(timestamp),\n\n 'change_ratio': value_to_pct(item['CHANGE_RATIO'], default=1),\n 'is_complete': item['IS_COMPLETE'],\n 'holding_numbers': item['TOTAL_FREE_SHARES'],\n 'holding_ratio': value_to_pct(item['TOTAL_SHARES_RATIO'], default=0),\n 'holding_values': item['TOTAL_MARKET_CAP']\n } for item in result]\n df = pd.DataFrame.from_records(summary_list)\n df_to_db(data_schema=self.data_schema, df=df, provider=self.provider,\n force_update=True, drop_duplicates=True)\n\n\nif __name__ == '__main__':\n EMStockActorSummaryRecorder(codes=['000338']).run()\n# the __all__ is generated\n__all__ = ['EMStockActorSummaryRecorder']\n" ]
[ [ "pandas.DataFrame.from_records" ] ]
arturohernandez10/pose-interpreter-networks
[ "b8cfa19bed62bdd9179f8c4a01675cd6644e8f99" ]
[ "segmentation/utils.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import colors\n\n\ndef accuracy(output, target):\n _, pred = output.max(1)\n pred = pred.view(-1)\n target = target.view(-1)\n correct = pred.eq(target)\n score = correct.float().sum(0).mul(100.0 / correct.size(0))\n return score.item()\n\n\ndef fast_hist(pred, label, n):\n k = (label >= 0) & (label < n)\n return np.bincount(\n n * label[k].astype(int) + pred[k], minlength=n ** 2).reshape(n, n)\n\n\ndef per_class_iou(hist):\n return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))\n\n\nobject_colors = [\n 'k', # 0 background\n 'm', # 1 oil_bottle\n 'w', # 2 fluid_bottle\n 'c', # 3 oilfilter\n 'g', # 4 funnel\n 'b', # 5 engine\n 'r', # 6 blue_funnel\n 'orange', # 7 tissue_box\n 'brown', # 8 drill\n 'lime', # 9 cracker_box\n 'yellow' # 10 spam\n]\ncmap = colors.ListedColormap(object_colors)\n\n\ndef visualize(ax, image, label):\n ax.imshow(image)\n ax.imshow(label, cmap=cmap, alpha=0.5, vmin=0, vmax=len(object_colors) - 1)\n\n\ndef render_batch(visualize_fn, input, target, output):\n batch_size = input.shape[0]\n fig, axes = plt.subplots(nrows=batch_size, ncols=3, figsize=(12, 4*batch_size))\n plt.subplots_adjust(left=0.05, bottom=0, right=0.95, top=1, hspace=0)\n for i in range(batch_size):\n ax = axes if batch_size == 1 else axes[i] # otherwise won't work if nrows is 1\n ax[0].imshow(input[i])\n visualize_fn(ax[1], input[i], target[i])\n visualize_fn(ax[2], input[i], output[i])\n fig.canvas.draw()\n data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') / 255.\n data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n plt.close(fig)\n return data\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n" ]
[ [ "numpy.diag", "matplotlib.pyplot.subplots", "matplotlib.colors.ListedColormap", "matplotlib.pyplot.close", "matplotlib.pyplot.subplots_adjust" ] ]
HaoZeke/aiida-core
[ "84c2098318bf234641219e55795726f99dc25a16", "84c2098318bf234641219e55795726f99dc25a16" ]
[ "tests/tools/importexport/test_complex.py", "aiida/orm/nodes/data/array/bands.py" ]
[ "# -*- coding: utf-8 -*-\n###########################################################################\n# Copyright (c), The AiiDA team. All rights reserved. #\n# This file is part of the AiiDA code. #\n# #\n# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #\n# For further information on the license, see the LICENSE.txt file #\n# For further information please visit http://www.aiida.net #\n###########################################################################\n\"\"\"Complex tests for the export and import routines\"\"\"\n# pylint: disable=too-many-locals\n\nimport os\n\nfrom aiida import orm\nfrom aiida.backends.testbase import AiidaTestCase\nfrom aiida.common.links import LinkType\nfrom aiida.tools.importexport import import_data, export\n\nfrom tests.utils.configuration import with_temp_dir\n\n\nclass TestComplex(AiidaTestCase):\n \"\"\"Test complex ex-/import cases\"\"\"\n\n def setUp(self):\n self.reset_database()\n\n def tearDown(self):\n self.reset_database()\n\n @with_temp_dir\n def test_complex_graph_import_export(self, temp_dir):\n \"\"\"\n This test checks that a small and bit complex graph can be correctly\n exported and imported.\n\n It will create the graph, store it to the database, export it to a file\n and import it. In the end it will check if the initial nodes are present\n at the imported graph.\n \"\"\"\n from aiida.common.exceptions import NotExistent\n\n calc1 = orm.CalcJobNode()\n calc1.computer = self.computer\n calc1.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1})\n calc1.label = 'calc1'\n calc1.store()\n\n pd1 = orm.Dict()\n pd1.label = 'pd1'\n pd1.store()\n\n pd2 = orm.Dict()\n pd2.label = 'pd2'\n pd2.store()\n\n rd1 = orm.RemoteData()\n rd1.label = 'rd1'\n rd1.set_remote_path('/x/y.py')\n rd1.computer = self.computer\n rd1.store()\n rd1.add_incoming(calc1, link_type=LinkType.CREATE, link_label='link')\n\n calc2 = orm.CalcJobNode()\n calc2.computer = self.computer\n calc2.set_option('resources', {'num_machines': 1, 'num_mpiprocs_per_machine': 1})\n calc2.label = 'calc2'\n calc2.add_incoming(pd1, link_type=LinkType.INPUT_CALC, link_label='link1')\n calc2.add_incoming(pd2, link_type=LinkType.INPUT_CALC, link_label='link2')\n calc2.add_incoming(rd1, link_type=LinkType.INPUT_CALC, link_label='link3')\n calc2.store()\n\n fd1 = orm.FolderData()\n fd1.label = 'fd1'\n fd1.store()\n fd1.add_incoming(calc2, link_type=LinkType.CREATE, link_label='link')\n\n calc1.seal()\n calc2.seal()\n\n node_uuids_labels = {\n calc1.uuid: calc1.label,\n pd1.uuid: pd1.label,\n pd2.uuid: pd2.label,\n rd1.uuid: rd1.label,\n calc2.uuid: calc2.label,\n fd1.uuid: fd1.label\n }\n\n filename = os.path.join(temp_dir, 'export.aiida')\n export([fd1], filename=filename, silent=True)\n\n self.clean_db()\n self.create_user()\n\n import_data(filename, silent=True, ignore_unknown_nodes=True)\n\n for uuid, label in node_uuids_labels.items():\n try:\n orm.load_node(uuid)\n except NotExistent:\n self.fail('Node with UUID {} and label {} was not found.'.format(uuid, label))\n\n @with_temp_dir\n def test_reexport(self, temp_dir):\n \"\"\"\n Export something, import and reexport and check if everything is valid.\n The export is rather easy::\n\n ___ ___ ___\n | | INP | | CREATE | |\n | p | --> | c | -----> | a |\n |___| |___| |___|\n\n \"\"\"\n import numpy as np\n import string\n import random\n from datetime import datetime\n\n from aiida.common.hashing import make_hash\n\n def get_hash_from_db_content(grouplabel):\n \"\"\"Helper function to get hash\"\"\"\n builder = orm.QueryBuilder()\n builder.append(orm.Dict, tag='param', project='*')\n builder.append(orm.CalculationNode, tag='calc', project='*', edge_tag='p2c', edge_project=('label', 'type'))\n builder.append(orm.ArrayData, tag='array', project='*', edge_tag='c2a', edge_project=('label', 'type'))\n builder.append(orm.Group, filters={'label': grouplabel}, project='*', tag='group', with_node='array')\n # I want the query to contain something!\n self.assertTrue(builder.count() > 0)\n # The hash is given from the preservable entries in an export-import cycle,\n # uuids, attributes, labels, descriptions, arrays, link-labels, link-types:\n hash_ = make_hash([(\n item['param']['*'].attributes,\n item['param']['*'].uuid,\n item['param']['*'].label,\n item['param']['*'].description,\n item['calc']['*'].uuid,\n item['calc']['*'].attributes,\n item['array']['*'].attributes,\n [item['array']['*'].get_array(name).tolist() for name in item['array']['*'].get_arraynames()],\n item['array']['*'].uuid,\n item['group']['*'].uuid,\n item['group']['*'].label,\n item['p2c']['label'],\n item['p2c']['type'],\n item['c2a']['label'],\n item['c2a']['type'],\n item['group']['*'].label,\n ) for item in builder.dict()])\n return hash_\n\n # Creating a folder for the import/export files\n chars = string.ascii_uppercase + string.digits\n size = 10\n grouplabel = 'test-group'\n\n nparr = np.random.random((4, 3, 2)) # pylint: disable=no-member\n trial_dict = {}\n # give some integers:\n trial_dict.update({str(k): np.random.randint(100) for k in range(10)})\n # give some floats:\n trial_dict.update({str(k): np.random.random() for k in range(10, 20)}) # pylint: disable=no-member\n # give some booleans:\n trial_dict.update({str(k): bool(np.random.randint(1)) for k in range(20, 30)})\n # give some text:\n trial_dict.update({str(k): ''.join(random.choice(chars) for _ in range(size)) for k in range(20, 30)})\n\n param = orm.Dict(dict=trial_dict)\n param.label = str(datetime.now())\n param.description = 'd_' + str(datetime.now())\n param.store()\n calc = orm.CalculationNode()\n # setting also trial dict as attributes, but randomizing the keys)\n for key, value in trial_dict.items():\n calc.set_attribute(str(int(key) + np.random.randint(10)), value)\n array = orm.ArrayData()\n array.set_array('array', nparr)\n array.store()\n # LINKS\n # the calculation has input the parameters-instance\n calc.add_incoming(param, link_type=LinkType.INPUT_CALC, link_label='input_parameters')\n calc.store()\n # I want the array to be an output of the calculation\n array.add_incoming(calc, link_type=LinkType.CREATE, link_label='output_array')\n group = orm.Group(label='test-group')\n group.store()\n group.add_nodes(array)\n\n calc.seal()\n\n hash_from_dbcontent = get_hash_from_db_content(grouplabel)\n\n # I export and reimport 3 times in a row:\n for i in range(3):\n # Always new filename:\n filename = os.path.join(temp_dir, 'export-{}.aiida'.format(i))\n # Loading the group from the string\n group = orm.Group.get(label=grouplabel)\n # exporting based on all members of the group\n # this also checks if group memberships are preserved!\n export([group] + list(group.nodes), filename=filename, silent=True)\n # cleaning the DB!\n self.clean_db()\n self.create_user()\n # reimporting the data from the file\n import_data(filename, silent=True, ignore_unknown_nodes=True)\n # creating the hash from db content\n new_hash = get_hash_from_db_content(grouplabel)\n # I check for equality against the first hash created, which implies that hashes\n # are equal in all iterations of this process\n self.assertEqual(hash_from_dbcontent, new_hash)\n", "# -*- coding: utf-8 -*-\n###########################################################################\n# Copyright (c), The AiiDA team. All rights reserved. #\n# This file is part of the AiiDA code. #\n# #\n# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #\n# For further information on the license, see the LICENSE.txt file #\n# For further information please visit http://www.aiida.net #\n###########################################################################\n# pylint: disable=too-many-lines\n\"\"\"\nThis module defines the classes related to band structures or dispersions\nin a Brillouin zone, and how to operate on them.\n\"\"\"\nfrom string import Template\n\nimport numpy\n\nfrom aiida.common.exceptions import ValidationError\nfrom aiida.common.utils import prettify_labels, join_labels\nfrom .kpoints import KpointsData\n\n\ndef prepare_header_comment(uuid, plot_info, comment_char='#'):\n \"\"\"Prepare the header.\"\"\"\n from aiida import get_file_header\n\n filetext = []\n filetext += get_file_header(comment_char='').splitlines()\n filetext.append('')\n filetext.append('Dumped from BandsData UUID={}'.format(uuid))\n filetext.append('\\tpoints\\tbands')\n filetext.append('\\t{}\\t{}'.format(*plot_info['y'].shape))\n filetext.append('')\n filetext.append('\\tlabel\\tpoint')\n for label in plot_info['raw_labels']:\n filetext.append('\\t{}\\t{:.8f}'.format(label[1], label[0]))\n\n return '\\n'.join('{} {}'.format(comment_char, line) for line in filetext)\n\n\ndef find_bandgap(bandsdata, number_electrons=None, fermi_energy=None):\n \"\"\"\n Tries to guess whether the bandsdata represent an insulator.\n This method is meant to be used only for electronic bands (not phonons)\n By default, it will try to use the occupations to guess the number of\n electrons and find the Fermi Energy, otherwise, it can be provided\n explicitely.\n Also, there is an implicit assumption that the kpoints grid is\n \"sufficiently\" dense, so that the bandsdata are not missing the\n intersection between valence and conduction band if present.\n Use this function with care!\n\n :param number_electrons: (optional, float) number of electrons in the unit cell\n :param fermi_energy: (optional, float) value of the fermi energy.\n\n :note: By default, the algorithm uses the occupations array\n to guess the number of electrons and the occupied bands. This is to be\n used with care, because the occupations could be smeared so at a\n non-zero temperature, with the unwanted effect that the conduction bands\n might be occupied in an insulator.\n Prefer to pass the number_of_electrons explicitly\n\n :note: Only one between number_electrons and fermi_energy can be specified at the\n same time.\n\n :return: (is_insulator, gap), where is_insulator is a boolean, and gap a\n float. The gap is None in case of a metal, zero when the homo is\n equal to the lumo (e.g. in semi-metals).\n \"\"\"\n\n # pylint: disable=too-many-return-statements,too-many-branches,too-many-statements,no-else-return\n\n def nint(num):\n \"\"\"\n Stable rounding function\n \"\"\"\n if num > 0:\n return int(num + .5)\n return int(num - .5)\n\n if fermi_energy and number_electrons:\n raise ValueError('Specify either the number of electrons or the Fermi energy, but not both')\n\n try:\n stored_bands = bandsdata.get_bands()\n except KeyError:\n raise KeyError('Cannot do much of a band analysis without bands')\n\n if len(stored_bands.shape) == 3:\n # I write the algorithm for the generic case of having both the spin up and spin down array\n # put all spins on one band per kpoint\n bands = numpy.concatenate(stored_bands, axis=1)\n else:\n bands = stored_bands\n\n # analysis on occupations:\n if fermi_energy is None:\n\n num_kpoints = len(bands)\n\n if number_electrons is None:\n try:\n _, stored_occupations = bandsdata.get_bands(also_occupations=True)\n except KeyError:\n raise KeyError(\"Cannot determine metallicity if I don't have either fermi energy, or occupations\")\n\n # put the occupations in the same order of bands, also in case of multiple bands\n if len(stored_occupations.shape) == 3:\n # I write the algorithm for the generic case of having both the\n # spin up and spin down array\n\n # put all spins on one band per kpoint\n occupations = numpy.concatenate(stored_occupations, axis=1)\n else:\n occupations = stored_occupations\n\n # now sort the bands by energy\n # Note: I am sort of assuming that I have an electronic ground state\n\n # sort the bands by energy, and reorder the occupations accordingly\n # since after joining the two spins, I might have unsorted stuff\n bands, occupations = [\n numpy.array(y) for y in zip(\n *[\n list(zip(*j)) for j in [\n sorted(zip(i[0].tolist(), i[1].tolist()), key=lambda x: x[0])\n for i in zip(bands, occupations)\n ]\n ]\n )\n ]\n number_electrons = int(round(sum([sum(i) for i in occupations]) / num_kpoints))\n\n homo_indexes = [numpy.where(numpy.array([nint(_) for _ in x]) > 0)[0][-1] for x in occupations]\n if len(set(homo_indexes)) > 1: # there must be intersections of valence and conduction bands\n return False, None\n\n homo = [_[0][_[1]] for _ in zip(bands, homo_indexes)]\n try:\n lumo = [_[0][_[1] + 1] for _ in zip(bands, homo_indexes)]\n except IndexError:\n raise ValueError(\n 'To understand if it is a metal or insulator, '\n 'need more bands than n_band=number_electrons'\n )\n\n else:\n bands = numpy.sort(bands)\n number_electrons = int(number_electrons)\n\n # find the zero-temperature occupation per band (1 for spin-polarized\n # calculation, 2 otherwise)\n number_electrons_per_band = 4 - len(stored_bands.shape) # 1 or 2\n # gather the energies of the homo band, for every kpoint\n homo = [i[number_electrons // number_electrons_per_band - 1] for i in bands] # take the nth level\n try:\n # gather the energies of the lumo band, for every kpoint\n lumo = [i[number_electrons // number_electrons_per_band] for i in bands] # take the n+1th level\n except IndexError:\n raise ValueError(\n 'To understand if it is a metal or insulator, '\n 'need more bands than n_band=number_electrons'\n )\n\n if number_electrons % 2 == 1 and len(stored_bands.shape) == 2:\n # if #electrons is odd and we have a non spin polarized calculation\n # it must be a metal and I don't need further checks\n return False, None\n\n # if the nth band crosses the (n+1)th, it is an insulator\n gap = min(lumo) - max(homo)\n if gap == 0.:\n return False, 0.\n\n if gap < 0.:\n return False, None\n\n return True, gap\n\n # analysis on the fermi energy\n else:\n # reorganize the bands, rather than per kpoint, per energy level\n\n # I need the bands sorted by energy\n bands.sort()\n\n levels = bands.transpose()\n max_mins = [(max(i), min(i)) for i in levels]\n\n if fermi_energy > bands.max():\n raise ValueError(\"The Fermi energy is above all band energies, don't know what to do\")\n if fermi_energy < bands.min():\n raise ValueError(\"The Fermi energy is below all band energies, don't know what to do.\")\n\n # one band is crossed by the fermi energy\n if any(i[1] < fermi_energy and fermi_energy < i[0] for i in max_mins): # pylint: disable=chained-comparison\n return False, None\n\n # case of semimetals, fermi energy at the crossing of two bands\n # this will only work if the dirac point is computed!\n if (any(i[0] == fermi_energy for i in max_mins) and any(i[1] == fermi_energy for i in max_mins)):\n return False, 0.\n\n # insulating case, take the max of the band maxima below the fermi energy\n homo = max([i[0] for i in max_mins if i[0] < fermi_energy])\n # take the min of the band minima above the fermi energy\n lumo = min([i[1] for i in max_mins if i[1] > fermi_energy])\n gap = lumo - homo\n if gap <= 0.:\n raise Exception('Something wrong has been implemented. Revise the code!')\n return True, gap\n\n\nclass BandsData(KpointsData):\n \"\"\"\n Class to handle bands data\n \"\"\"\n\n def set_kpointsdata(self, kpointsdata):\n \"\"\"\n Load the kpoints from a kpoint object.\n :param kpointsdata: an instance of KpointsData class\n \"\"\"\n if not isinstance(kpointsdata, KpointsData):\n raise ValueError('kpointsdata must be of the KpointsData class')\n try:\n self.cell = kpointsdata.cell\n except AttributeError:\n pass\n try:\n self.pbc = kpointsdata.pbc\n except AttributeError:\n pass\n try:\n the_kpoints = kpointsdata.get_kpoints()\n except AttributeError:\n the_kpoints = None\n try:\n the_weights = kpointsdata.get_kpoints(also_weights=True)[1]\n except AttributeError:\n the_weights = None\n self.set_kpoints(the_kpoints, weights=the_weights)\n try:\n self.labels = kpointsdata.labels\n except (AttributeError, TypeError):\n self.labels = []\n\n def _validate_bands_occupations(self, bands, occupations=None, labels=None):\n \"\"\"\n Validate the list of bands and of occupations before storage.\n Kpoints must be set in advance.\n Bands and occupations must be convertible into arrays of\n Nkpoints x Nbands floats or Nspins x Nkpoints x Nbands; Nkpoints must\n correspond to the number of kpoints.\n \"\"\"\n # pylint: disable=too-many-branches\n try:\n kpoints = self.get_kpoints()\n except AttributeError:\n raise AttributeError('Must first set the kpoints, then the bands')\n\n the_bands = numpy.array(bands)\n\n if len(the_bands.shape) not in [2, 3]:\n raise ValueError(\n 'Bands must be an array of dimension 2'\n '([N_kpoints, N_bands]) or of dimension 3 '\n ' ([N_arrays, N_kpoints, N_bands]), found instead {}'.format(len(the_bands.shape))\n )\n\n list_of_arrays_to_be_checked = []\n\n # check that the shape of everything is consistent with the kpoints\n num_kpoints_from_bands = the_bands.shape[0] if len(the_bands.shape) == 2 else the_bands.shape[1]\n if num_kpoints_from_bands != len(kpoints):\n raise ValueError('There must be energy values for every kpoint')\n\n if occupations is not None:\n the_occupations = numpy.array(occupations)\n if the_occupations.shape != the_bands.shape:\n raise ValueError(\n 'Shape of occupations {} different from shape'\n 'shape of bands {}'.format(the_occupations.shape, the_bands.shape)\n )\n\n if not the_bands.dtype.type == numpy.float64:\n list_of_arrays_to_be_checked.append([the_occupations, 'occupations'])\n else:\n the_occupations = None\n # list_of_arrays_to_be_checked = [ [the_bands,'bands'] ]\n\n # check that there every element is a float\n if not the_bands.dtype.type == numpy.float64:\n list_of_arrays_to_be_checked.append([the_bands, 'bands'])\n\n for x, msg in list_of_arrays_to_be_checked:\n try:\n [float(_) for _ in x.flatten() if _ is not None]\n except (TypeError, ValueError):\n raise ValueError('The {} array can only contain float or None values'.format(msg))\n\n # check the labels\n if labels is not None:\n if isinstance(labels, str):\n the_labels = [str(labels)]\n elif isinstance(labels, (tuple, list)) and all([isinstance(_, str) for _ in labels]):\n the_labels = [str(_) for _ in labels]\n else:\n raise ValidationError(\n 'Band labels have an unrecognized type ({})'\n 'but should be a string or a list of strings'.format(labels.__class__)\n )\n\n if len(the_bands.shape) == 2 and len(the_labels) != 1:\n raise ValidationError('More array labels than the number of arrays')\n elif len(the_bands.shape) == 3 and len(the_labels) != the_bands.shape[0]:\n raise ValidationError('More array labels than the number of arrays')\n else:\n the_labels = None\n\n return the_bands, the_occupations, the_labels\n\n def set_bands(self, bands, units=None, occupations=None, labels=None):\n \"\"\"\n Set an array of band energies of dimension (nkpoints x nbands).\n Kpoints must be set in advance. Can contain floats or None.\n :param bands: a list of nkpoints lists of nbands bands, or a 2D array\n of shape (nkpoints x nbands), with band energies for each kpoint\n :param units: optional, energy units\n :param occupations: optional, a 2D list or array of floats of same\n shape as bands, with the occupation associated to each band\n \"\"\"\n # checks bands and occupations\n the_bands, the_occupations, the_labels = self._validate_bands_occupations(bands, occupations, labels)\n # set bands and their units\n self.set_array('bands', the_bands)\n self.units = units\n\n if the_labels is not None:\n self.set_attribute('array_labels', the_labels)\n\n if the_occupations is not None:\n # set occupations\n self.set_array('occupations', the_occupations)\n\n @property\n def array_labels(self):\n \"\"\"\n Get the labels associated with the band arrays\n \"\"\"\n return self.get_attribute('array_labels', None)\n\n @property\n def units(self):\n \"\"\"\n Units in which the data in bands were stored. A string\n \"\"\"\n # return copy.deepcopy(self._pbc)\n return self.get_attribute('units')\n\n @units.setter\n def units(self, value):\n \"\"\"\n Set the value of pbc, i.e. a tuple of three booleans, indicating if the\n cell is periodic in the 1,2,3 crystal direction\n \"\"\"\n the_str = str(value)\n self.set_attribute('units', the_str)\n\n def _set_pbc(self, value):\n \"\"\"\n validate the pbc, then store them\n \"\"\"\n from aiida.common.exceptions import ModificationNotAllowed\n from aiida.orm.nodes.data.structure import get_valid_pbc\n\n if self.is_stored:\n raise ModificationNotAllowed('The KpointsData object cannot be modified, it has already been stored')\n the_pbc = get_valid_pbc(value)\n self.set_attribute('pbc1', the_pbc[0])\n self.set_attribute('pbc2', the_pbc[1])\n self.set_attribute('pbc3', the_pbc[2])\n\n def get_bands(self, also_occupations=False, also_labels=False):\n \"\"\"\n Returns an array (nkpoints x num_bands or nspins x nkpoints x num_bands)\n of energies.\n :param also_occupations: if True, returns also the occupations array.\n Default = False\n \"\"\"\n try:\n bands = numpy.array(self.get_array('bands'))\n except KeyError:\n raise AttributeError('No stored bands has been found')\n\n to_return = [bands]\n\n if also_occupations:\n try:\n occupations = numpy.array(self.get_array('occupations'))\n except KeyError:\n raise AttributeError('No occupations were set')\n to_return.append(occupations)\n\n if also_labels:\n to_return.append(self.array_labels)\n\n if len(to_return) == 1:\n return bands\n\n return to_return\n\n def _get_bandplot_data(self, cartesian, prettify_format=None, join_symbol=None, get_segments=False, y_origin=0.):\n \"\"\"\n Get data to plot a band structure\n\n :param cartesian: if True, distances (for the x-axis) are computed in\n cartesian coordinates, otherwise they are computed in reciprocal\n coordinates. cartesian=True will fail if no cell has been set.\n :param prettify_format: by default, strings are not prettified. If you want\n to prettify them, pass a valid prettify_format string (see valid options\n in the docstring of :py:func:prettify_labels).\n :param join_symbols: by default, strings are not joined. If you pass a string,\n this is used to join strings that are much closer than a given threshold.\n The most typical string is the pipe symbol: ``|``.\n :param get_segments: if True, also computes the band split into segments\n :param y_origin: if present, shift bands so to set the value specified at ``y=0``\n :return: a plot_info dictiorary, whose keys are ``x`` (array of distances\n for the x axis of the plot); ``y`` (array of bands), ``labels`` (list\n of tuples in the format (float x value of the label, label string),\n ``band_type_idx`` (array containing an index for each band: if there is only\n one spin, then it's an array of zeros, of length equal to the number of bands\n at each point; if there are two spins, then it's an array of zeros or ones\n depending on the type of spin; the length is always equalt to the total\n number of bands per kpoint).\n \"\"\"\n # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n # load the x and y's of the graph\n stored_bands = self.get_bands()\n if len(stored_bands.shape) == 2:\n bands = stored_bands\n band_type_idx = numpy.array([0] * stored_bands.shape[1])\n two_band_types = False\n elif len(stored_bands.shape) == 3:\n bands = numpy.concatenate(stored_bands, axis=1)\n band_type_idx = numpy.array([0] * stored_bands.shape[2] + [1] * stored_bands.shape[2])\n two_band_types = True\n else:\n raise ValueError('Unexpected shape of bands')\n\n bands -= y_origin\n\n # here I build the x distances on the graph (in cartesian coordinates\n # if cartesian==True AND if the cell was set, otherwise in reciprocal\n # coordinates)\n try:\n kpoints = self.get_kpoints(cartesian=cartesian)\n except AttributeError:\n # this error is happening if cartesian==True and if no cell has been\n # set -> we switch to reciprocal coordinates to compute distances\n kpoints = self.get_kpoints()\n # I take advantage of the path to recognize discontinuities\n try:\n labels = self.labels\n labels_indices = [i[0] for i in labels]\n except (AttributeError, TypeError):\n labels = []\n labels_indices = []\n\n # since I can have discontinuous paths, I set on those points the distance to zero\n # as a result, where there are discontinuities in the path,\n # I have two consecutive points with the same x coordinate\n distances = [\n numpy.linalg.norm(kpoints[i] -\n kpoints[i - 1]) if not (i in labels_indices and i - 1 in labels_indices) else 0.\n for i in range(1, len(kpoints))\n ]\n x = [float(sum(distances[:i])) for i in range(len(distances) + 1)]\n\n # transform the index of the labels in the coordinates of x\n raw_labels = [(x[i[0]], i[1]) for i in labels]\n\n the_labels = raw_labels\n\n if prettify_format:\n the_labels = prettify_labels(the_labels, format=prettify_format)\n if join_symbol:\n the_labels = join_labels(the_labels, join_symbol=join_symbol)\n\n plot_info = {}\n plot_info['x'] = x\n plot_info['y'] = bands\n plot_info['band_type_idx'] = band_type_idx\n plot_info['raw_labels'] = raw_labels\n plot_info['labels'] = the_labels\n\n if get_segments:\n plot_info['path'] = []\n plot_info['paths'] = []\n\n if len(labels) > 1:\n # I add an empty label that points to the first band if the first label does not do it\n if labels[0][0] != 0:\n labels.insert(0, (0, ''))\n # I add an empty label that points to the last band if the last label does not do it\n if labels[-1][0] != len(bands) - 1:\n labels.append((len(bands) - 1, ''))\n for (position_from, label_from), (position_to, label_to) in zip(labels[:-1], labels[1:]):\n if position_to - position_from > 1:\n # Create a new path line only if there are at least two points,\n # otherwise it is probably just a discontinuity point in the band\n # structure (e.g. Gamma-X|Y-Gamma), where X and Y would be two\n # consecutive points, but there is no path between them\n plot_info['path'].append([label_from, label_to])\n path_dict = {\n 'length': position_to - position_from,\n 'from': label_from,\n 'to': label_to,\n 'values': bands[position_from:position_to + 1, :].transpose().tolist(),\n 'x': x[position_from:position_to + 1],\n 'two_band_types': two_band_types,\n }\n plot_info['paths'].append(path_dict)\n else:\n label_from = '0'\n label_to = '1'\n path_dict = {\n 'length': bands.shape[0] - 1,\n 'from': label_from,\n 'to': label_to,\n 'values': bands.transpose().tolist(),\n 'x': x,\n 'two_band_types': two_band_types,\n }\n plot_info['paths'].append(path_dict)\n plot_info['path'].append([label_from, label_to])\n\n return plot_info\n\n def _prepare_agr_batch(self, main_file_name='', comments=True, prettify_format=None):\n \"\"\"\n Prepare two files, data and batch, to be plot with xmgrace as:\n xmgrace -batch file.dat\n\n :param main_file_name: if the user asks to write the main content on a\n file, this contains the filename. This should be used to infer a\n good filename for the additional files.\n In this case, we remove the extension, and add '_data.dat'\n :param comments: if True, print comments (if it makes sense for the given\n format)\n :param prettify_format: if None, use the default prettify format. Otherwise\n specify a string with the prettifier to use.\n \"\"\"\n # pylint: disable=too-many-locals\n import os\n\n dat_filename = os.path.splitext(main_file_name)[0] + '_data.dat'\n\n if prettify_format is None:\n # Default. Specified like this to allow caller functions to pass 'None'\n prettify_format = 'agr_seekpath'\n\n plot_info = self._get_bandplot_data(cartesian=True, prettify_format=prettify_format, join_symbol='|')\n\n bands = plot_info['y']\n x = plot_info['x']\n labels = plot_info['labels']\n\n num_bands = bands.shape[1]\n\n # axis limits\n y_max_lim = bands.max()\n y_min_lim = bands.min()\n x_min_lim = min(x) # this isn't a numpy array, but a list\n x_max_lim = max(x)\n\n # first prepare the xy coordinates of the sets\n raw_data, _ = self._prepare_dat_blocks(plot_info)\n\n batch = []\n if comments:\n batch.append(prepare_header_comment(self.uuid, plot_info, comment_char='#'))\n\n batch.append('READ XY \"{}\"'.format(dat_filename))\n\n # axis limits\n batch.append('world {}, {}, {}, {}'.format(x_min_lim, y_min_lim, x_max_lim, y_max_lim))\n\n # axis label\n batch.append('yaxis label \"Dispersion\"')\n\n # axis ticks\n batch.append('xaxis tick place both')\n batch.append('xaxis tick spec type both')\n batch.append('xaxis tick spec {}'.format(len(labels)))\n # set the name of the special points\n for index, label in enumerate(labels):\n batch.append('xaxis tick major {}, {}'.format(index, label[0]))\n batch.append('xaxis ticklabel {}, \"{}\"'.format(index, label[1]))\n batch.append('xaxis tick major color 7')\n batch.append('xaxis tick major grid on')\n\n # minor graphical tweak\n batch.append('yaxis tick minor ticks 3')\n batch.append('frame linewidth 1.0')\n\n # use helvetica fonts\n batch.append('map font 4 to \"Helvetica\", \"Helvetica\"')\n batch.append('yaxis label font 4')\n batch.append('xaxis label font 4')\n\n # set color and linewidths of bands\n for index in range(num_bands):\n batch.append('s{} line color 1'.format(index))\n batch.append('s{} linewidth 1'.format(index))\n\n batch_data = '\\n'.join(batch) + '\\n'\n extra_files = {dat_filename: raw_data}\n\n return batch_data.encode('utf-8'), extra_files\n\n def _prepare_dat_multicolumn(self, main_file_name='', comments=True): # pylint: disable=unused-argument\n \"\"\"\n Write an N x M matrix. First column is the distance between kpoints,\n The other columns are the bands. Header contains number of kpoints and\n the number of bands (commented).\n\n :param comments: if True, print comments (if it makes sense for the given\n format)\n \"\"\"\n plot_info = self._get_bandplot_data(cartesian=True, prettify_format=None, join_symbol='|')\n\n bands = plot_info['y']\n x = plot_info['x']\n\n return_text = []\n if comments:\n return_text.append(prepare_header_comment(self.uuid, plot_info, comment_char='#'))\n\n for i in zip(x, bands):\n line = ['{:.8f}'.format(i[0])] + ['{:.8f}'.format(j) for j in i[1]]\n return_text.append('\\t'.join(line))\n\n return ('\\n'.join(return_text) + '\\n').encode('utf-8'), {}\n\n def _prepare_dat_blocks(self, main_file_name='', comments=True): # pylint: disable=unused-argument\n \"\"\"\n Format suitable for gnuplot using blocks.\n Columns with x and y (path and band energy). Several blocks, separated\n by two empty lines, one per energy band.\n\n :param comments: if True, print comments (if it makes sense for the given\n format)\n \"\"\"\n plot_info = self._get_bandplot_data(cartesian=True, prettify_format=None, join_symbol='|')\n\n bands = plot_info['y']\n x = plot_info['x']\n\n return_text = []\n if comments:\n return_text.append(prepare_header_comment(self.uuid, plot_info, comment_char='#'))\n\n for band in numpy.transpose(bands):\n for i in zip(x, band):\n line = ['{:.8f}'.format(i[0]), '{:.8f}'.format(i[1])]\n return_text.append('\\t'.join(line))\n return_text.append('')\n return_text.append('')\n\n return '\\n'.join(return_text).encode('utf-8'), {}\n\n def _matplotlib_get_dict(\n self,\n main_file_name='',\n comments=True,\n title='',\n legend=None,\n legend2=None,\n y_max_lim=None,\n y_min_lim=None,\n y_origin=0.,\n prettify_format=None,\n **kwargs\n ): # pylint: disable=unused-argument\n \"\"\"\n Prepare the data to send to the python-matplotlib plotting script.\n\n :param comments: if True, print comments (if it makes sense for the given\n format)\n :param plot_info: a dictionary\n :param setnumber_offset: an offset to be applied to all set numbers\n (i.e. s0 is replaced by s[offset], s1 by s[offset+1], etc.)\n :param color_number: the color number for lines, symbols, error bars\n and filling (should be less than the parameter MAX_NUM_AGR_COLORS\n defined below)\n :param title: the title\n :param legend: the legend (applied only to the first of the set)\n :param legend2: the legend for second-type spins\n (applied only to the first of the set)\n :param y_max_lim: the maximum on the y axis (if None, put the\n maximum of the bands)\n :param y_min_lim: the minimum on the y axis (if None, put the\n minimum of the bands)\n :param y_origin: the new origin of the y axis -> all bands are replaced\n by bands-y_origin\n :param prettify_format: if None, use the default prettify format. Otherwise\n specify a string with the prettifier to use.\n :param kwargs: additional customization variables; only a subset is\n accepted, see internal variable 'valid_additional_keywords\n \"\"\"\n # pylint: disable=too-many-arguments,too-many-locals\n\n # Only these keywords are accepted in kwargs, and then set into the json\n valid_additional_keywords = [\n 'bands_color', # Color of band lines\n 'bands_linewidth', # linewidth of bands\n 'bands_linestyle', # linestyle of bands\n 'bands_marker', # marker for bands\n 'bands_markersize', # size of the marker of bands\n 'bands_markeredgecolor', # marker edge color for bands\n 'bands_markeredgewidth', # marker edge width for bands\n 'bands_markerfacecolor', # marker face color for bands\n 'bands_color2', # Color of band lines (for other spin, if present)\n 'bands_linewidth2', # linewidth of bands (for other spin, if present)\n 'bands_linestyle2', # linestyle of bands (for other spin, if present)\n 'bands_marker2', # marker for bands (for other spin, if present)\n 'bands_markersize2', # size of the marker of bands (for other spin, if present)\n 'bands_markeredgecolor2', # marker edge color for bands (for other spin, if present)\n 'bands_markeredgewidth2', # marker edge width for bands (for other spin, if present)\n 'bands_markerfacecolor2', # marker face color for bands (for other spin, if present)\n 'plot_zero_axis', # If true, plot an axis at y=0\n 'zero_axis_color', # Color of the axis at y=0\n 'zero_axis_linestyle', # linestyle of the axis at y=0\n 'zero_axis_linewidth', # linewidth of the axis at y=0\n 'use_latex', # If true, use latex to render captions\n ]\n\n # Note: I do not want to import matplotlib here, for two reasons:\n # 1. I would like to be able to print the script for the user\n # 2. I don't want to mess up with the user matplotlib backend\n # (that I should do if the user does not have a X server, but that\n # I do not want to do if he's e.g. in jupyter)\n # Therefore I just create a string that can be executed as needed, e.g. with eval.\n # I take care of sanitizing the output.\n if prettify_format is None:\n # Default. Specified like this to allow caller functions to pass 'None'\n prettify_format = 'latex_seekpath'\n\n # The default for use_latex is False\n join_symbol = r'\\textbar{}' if kwargs.get('use_latex', False) else '|'\n\n plot_info = self._get_bandplot_data(\n cartesian=True,\n prettify_format=prettify_format,\n join_symbol=join_symbol,\n get_segments=True,\n y_origin=y_origin\n )\n\n all_data = {}\n\n bands = plot_info['y']\n x = plot_info['x']\n labels = plot_info['labels']\n # prepare xticks labels\n if labels:\n tick_pos, tick_labels = zip(*labels)\n else:\n tick_pos = []\n tick_labels = []\n\n all_data['paths'] = plot_info['paths']\n all_data['band_type_idx'] = plot_info['band_type_idx'].tolist()\n\n all_data['tick_pos'] = tick_pos\n all_data['tick_labels'] = tick_labels\n all_data['legend_text'] = legend\n all_data['legend_text2'] = legend2\n all_data['yaxis_label'] = 'Dispersion ({})'.format(self.units)\n all_data['title'] = title\n if comments:\n all_data['comment'] = prepare_header_comment(self.uuid, plot_info, comment_char='#')\n\n # axis limits\n if y_max_lim is None:\n y_max_lim = numpy.array(bands).max()\n if y_min_lim is None:\n y_min_lim = numpy.array(bands).min()\n x_min_lim = min(x) # this isn't a numpy array, but a list\n x_max_lim = max(x)\n all_data['x_min_lim'] = x_min_lim\n all_data['x_max_lim'] = x_max_lim\n all_data['y_min_lim'] = y_min_lim\n all_data['y_max_lim'] = y_max_lim\n\n for key, value in kwargs.items():\n if key not in valid_additional_keywords:\n raise TypeError(\"_matplotlib_get_dict() got an unexpected keyword argument '{}'\".format(key))\n all_data[key] = value\n\n return all_data\n\n def _prepare_mpl_singlefile(self, *args, **kwargs):\n \"\"\"\n Prepare a python script using matplotlib to plot the bands\n\n For the possible parameters, see documentation of\n :py:meth:`~aiida.orm.nodes.data.array.bands.BandsData._matplotlib_get_dict`\n \"\"\"\n from aiida.common import json\n\n all_data = self._matplotlib_get_dict(*args, **kwargs)\n\n s_header = MATPLOTLIB_HEADER_TEMPLATE.substitute()\n s_import = MATPLOTLIB_IMPORT_DATA_INLINE_TEMPLATE.substitute(all_data_json=json.dumps(all_data, indent=2))\n s_body = self._get_mpl_body_template(all_data['paths'])\n s_footer = MATPLOTLIB_FOOTER_TEMPLATE_SHOW.substitute()\n\n string = s_header + s_import + s_body + s_footer\n\n return string.encode('utf-8'), {}\n\n def _prepare_mpl_withjson(self, main_file_name='', *args, **kwargs): # pylint: disable=keyword-arg-before-vararg\n \"\"\"\n Prepare a python script using matplotlib to plot the bands, with the JSON\n returned as an independent file.\n\n For the possible parameters, see documentation of\n :py:meth:`~aiida.orm.nodes.data.array.bands.BandsData._matplotlib_get_dict`\n \"\"\"\n import os\n\n from aiida.common import json\n\n all_data = self._matplotlib_get_dict(*args, main_file_name=main_file_name, **kwargs)\n\n json_fname = os.path.splitext(main_file_name)[0] + '_data.json'\n # Escape double_quotes\n json_fname = json_fname.replace('\"', '\\\"')\n\n ext_files = {json_fname: json.dumps(all_data, indent=2).encode('utf-8')}\n\n s_header = MATPLOTLIB_HEADER_TEMPLATE.substitute()\n s_import = MATPLOTLIB_IMPORT_DATA_FROMFILE_TEMPLATE.substitute(json_fname=json_fname)\n s_body = self._get_mpl_body_template(all_data['paths'])\n s_footer = MATPLOTLIB_FOOTER_TEMPLATE_SHOW.substitute()\n\n string = s_header + s_import + s_body + s_footer\n\n return string.encode('utf-8'), ext_files\n\n def _prepare_mpl_pdf(self, main_file_name='', *args, **kwargs): # pylint: disable=keyword-arg-before-vararg,unused-argument\n \"\"\"\n Prepare a python script using matplotlib to plot the bands, with the JSON\n returned as an independent file.\n\n For the possible parameters, see documentation of\n :py:meth:`~aiida.orm.nodes.data.array.bands.BandsData._matplotlib_get_dict`\n \"\"\"\n import os\n import tempfile\n import subprocess\n import sys\n\n from aiida.common import json\n\n all_data = self._matplotlib_get_dict(*args, **kwargs)\n\n # Use the Agg backend\n s_header = MATPLOTLIB_HEADER_AGG_TEMPLATE.substitute()\n s_import = MATPLOTLIB_IMPORT_DATA_INLINE_TEMPLATE.substitute(all_data_json=json.dumps(all_data, indent=2))\n s_body = self._get_mpl_body_template(all_data['paths'])\n\n # I get a temporary file name\n handle, filename = tempfile.mkstemp()\n os.close(handle)\n os.remove(filename)\n\n escaped_fname = filename.replace('\"', '\\\"')\n\n s_footer = MATPLOTLIB_FOOTER_TEMPLATE_EXPORTFILE.substitute(fname=escaped_fname, format='pdf')\n\n string = s_header + s_import + s_body + s_footer\n\n # I don't exec it because I might mess up with the matplotlib backend etc.\n # I run instead in a different process, with the same executable\n # (so it should work properly with virtualenvs)\n with tempfile.NamedTemporaryFile(mode='w+') as handle:\n handle.write(string)\n handle.flush()\n subprocess.check_output([sys.executable, handle.name])\n\n if not os.path.exists(filename):\n raise RuntimeError('Unable to generate the PDF...')\n\n with open(filename, 'rb', encoding=None) as handle:\n imgdata = handle.read()\n os.remove(filename)\n\n return imgdata, {}\n\n def _prepare_mpl_png(self, main_file_name='', *args, **kwargs): # pylint: disable=keyword-arg-before-vararg,unused-argument\n \"\"\"\n Prepare a python script using matplotlib to plot the bands, with the JSON\n returned as an independent file.\n\n For the possible parameters, see documentation of\n :py:meth:`~aiida.orm.nodes.data.array.bands.BandsData._matplotlib_get_dict`\n \"\"\"\n import json\n import os\n import tempfile\n import subprocess\n import sys\n\n all_data = self._matplotlib_get_dict(*args, **kwargs)\n\n # Use the Agg backend\n s_header = MATPLOTLIB_HEADER_AGG_TEMPLATE.substitute()\n s_import = MATPLOTLIB_IMPORT_DATA_INLINE_TEMPLATE.substitute(all_data_json=json.dumps(all_data, indent=2))\n s_body = self._get_mpl_body_template(all_data['paths'])\n\n # I get a temporary file name\n handle, filename = tempfile.mkstemp()\n os.close(handle)\n os.remove(filename)\n\n escaped_fname = filename.replace('\"', '\\\"')\n\n s_footer = MATPLOTLIB_FOOTER_TEMPLATE_EXPORTFILE_WITH_DPI.substitute(fname=escaped_fname, format='png', dpi=300)\n\n string = s_header + s_import + s_body + s_footer\n\n # I don't exec it because I might mess up with the matplotlib backend etc.\n # I run instead in a different process, with the same executable\n # (so it should work properly with virtualenvs)\n with tempfile.NamedTemporaryFile(mode='w+') as handle:\n handle.write(string)\n handle.flush()\n subprocess.check_output([sys.executable, handle.name])\n\n if not os.path.exists(filename):\n raise RuntimeError('Unable to generate the PNG...')\n\n with open(filename, 'rb', encoding=None) as handle:\n imgdata = handle.read()\n os.remove(filename)\n\n return imgdata, {}\n\n @staticmethod\n def _get_mpl_body_template(paths):\n \"\"\"\n :param paths: paths of k-points\n \"\"\"\n if len(paths) == 1:\n s_body = MATPLOTLIB_BODY_TEMPLATE.substitute(plot_code=SINGLE_KP)\n else:\n s_body = MATPLOTLIB_BODY_TEMPLATE.substitute(plot_code=MULTI_KP)\n return s_body\n\n def show_mpl(self, **kwargs):\n \"\"\"\n Call a show() command for the band structure using matplotlib.\n This uses internally the 'mpl_singlefile' format, with empty\n main_file_name.\n\n Other kwargs are passed to self._exportcontent.\n \"\"\"\n exec(*self._exportcontent(fileformat='mpl_singlefile', main_file_name='', **kwargs)) # pylint: disable=exec-used\n\n def _prepare_gnuplot(\n self,\n main_file_name=None,\n title='',\n comments=True,\n prettify_format=None,\n y_max_lim=None,\n y_min_lim=None,\n y_origin=0.\n ):\n \"\"\"\n Prepare an gnuplot script to plot the bands, with the .dat file\n returned as an independent file.\n\n :param main_file_name: if the user asks to write the main content on a\n file, this contains the filename. This should be used to infer a\n good filename for the additional files.\n In this case, we remove the extension, and add '_data.dat'\n :param title: if specified, add a title to the plot\n :param comments: if True, print comments (if it makes sense for the given\n format)\n :param prettify_format: if None, use the default prettify format. Otherwise\n specify a string with the prettifier to use.\n \"\"\"\n # pylint: disable=too-many-arguments,too-many-locals\n import os\n\n main_file_name = main_file_name or 'band.dat'\n dat_filename = os.path.splitext(main_file_name)[0] + '_data.dat'\n\n if prettify_format is None:\n # Default. Specified like this to allow caller functions to pass 'None'\n prettify_format = 'gnuplot_seekpath'\n\n plot_info = self._get_bandplot_data(\n cartesian=True, prettify_format=prettify_format, join_symbol='|', y_origin=y_origin\n )\n\n bands = plot_info['y']\n x = plot_info['x']\n\n # axis limits\n if y_max_lim is None:\n y_max_lim = bands.max()\n if y_min_lim is None:\n y_min_lim = bands.min()\n x_min_lim = min(x) # this isn't a numpy array, but a list\n x_max_lim = max(x)\n\n # first prepare the xy coordinates of the sets\n raw_data, _ = self._prepare_dat_blocks(plot_info, comments=comments)\n\n xtics_string = ', '.join('\"{}\" {}'.format(label, pos) for pos, label in plot_info['labels'])\n\n script = []\n # Start with some useful comments\n\n if comments:\n script.append(prepare_header_comment(self.uuid, plot_info=plot_info, comment_char='# '))\n script.append('')\n\n script.append(\n \"\"\"## Uncomment the next two lines to write directly to PDF\n## Note: You need to have gnuplot installed with pdfcairo support!\n#set term pdfcairo\n#set output 'out.pdf'\n\n### Uncomment one of the options below to change font\n### For the LaTeX fonts, you can download them from here:\n### https://sourceforge.net/projects/cm-unicode/\n### And then install them in your system\n## LaTeX Serif font, if installed\n#set termopt font \"CMU Serif, 12\"\n## LaTeX Sans Serif font, if installed\n#set termopt font \"CMU Sans Serif, 12\"\n## Classical Times New Roman\n#set termopt font \"Times New Roman, 12\"\n\"\"\"\n )\n\n # Actual logic\n script.append('set termopt enhanced') # Properly deals with e.g. subscripts\n script.append('set encoding utf8') # To deal with Greek letters\n script.append('set xtics ({})'.format(xtics_string))\n script.append('unset key')\n script.append('set yrange [{}:{}]'.format(y_min_lim, y_max_lim))\n script.append('set ylabel \"{}\"'.format('Dispersion ({})'.format(self.units)))\n\n if title:\n script.append('set title \"{}\"'.format(title.replace('\"', '\\\"')))\n\n # Plot, escaping filename\n if len(x) > 1:\n script.append('set xrange [{}:{}]'.format(x_min_lim, x_max_lim))\n script.append('set grid xtics lt 1 lc rgb \"#888888\"')\n script.append('plot \"{}\" with l lc rgb \"#000000\"'.format(os.path.basename(dat_filename).replace('\"', '\\\"')))\n else:\n script.append('set xrange [-1.0:1.0]')\n script.append(\n 'plot \"{}\" using ($1-0.25):($2):(0.5):(0) with vectors nohead lc rgb \"#000000\"'.format(\n os.path.basename(dat_filename).replace('\"', '\\\"')\n )\n )\n\n script_data = '\\n'.join(script) + '\\n'\n extra_files = {dat_filename: raw_data}\n\n return script_data.encode('utf-8'), extra_files\n\n def _prepare_agr(\n self,\n main_file_name='',\n comments=True,\n setnumber_offset=0,\n color_number=1,\n color_number2=2,\n legend='',\n title='',\n y_max_lim=None,\n y_min_lim=None,\n y_origin=0.,\n prettify_format=None\n ):\n \"\"\"\n Prepare an xmgrace agr file.\n\n :param comments: if True, print comments\n (if it makes sense for the given format)\n :param plot_info: a dictionary\n :param setnumber_offset: an offset to be applied to all set numbers\n (i.e. s0 is replaced by s[offset], s1 by s[offset+1], etc.)\n :param color_number: the color number for lines, symbols, error bars\n and filling (should be less than the parameter MAX_NUM_AGR_COLORS\n defined below)\n :param color_number2: the color number for lines, symbols, error bars\n and filling for the second-type spins (should be less than the\n parameter MAX_NUM_AGR_COLORS defined below)\n :param legend: the legend (applied only to the first set)\n :param title: the title\n :param y_max_lim: the maximum on the y axis (if None, put the\n maximum of the bands); applied *after* shifting the origin\n by ``y_origin``\n :param y_min_lim: the minimum on the y axis (if None, put the\n minimum of the bands); applied *after* shifting the origin\n by ``y_origin``\n :param y_origin: the new origin of the y axis -> all bands are replaced\n by bands-y_origin\n :param prettify_format: if None, use the default prettify format. Otherwise\n specify a string with the prettifier to use.\n \"\"\"\n # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,unused-argument\n if prettify_format is None:\n # Default. Specified like this to allow caller functions to pass 'None'\n prettify_format = 'agr_seekpath'\n\n plot_info = self._get_bandplot_data(\n cartesian=True, prettify_format=prettify_format, join_symbol='|', y_origin=y_origin\n )\n\n import math\n # load the x and y of every set\n if color_number > MAX_NUM_AGR_COLORS:\n raise ValueError('Color number is too high (should be less than {})'.format(MAX_NUM_AGR_COLORS))\n if color_number2 > MAX_NUM_AGR_COLORS:\n raise ValueError('Color number 2 is too high (should be less than {})'.format(MAX_NUM_AGR_COLORS))\n\n bands = plot_info['y']\n x = plot_info['x']\n the_bands = numpy.transpose(bands)\n labels = plot_info['labels']\n num_labels = len(labels)\n\n # axis limits\n if y_max_lim is None:\n y_max_lim = the_bands.max()\n if y_min_lim is None:\n y_min_lim = the_bands.min()\n x_min_lim = min(x) # this isn't a numpy array, but a list\n x_max_lim = max(x)\n ytick_spacing = 10**int(math.log10((y_max_lim - y_min_lim)))\n\n # prepare xticks labels\n sx1 = ''\n for i, label in enumerate(labels):\n sx1 += AGR_SINGLE_XTICK_TEMPLATE.substitute(\n index=i,\n coord=label[0],\n name=label[1],\n )\n xticks = AGR_XTICKS_TEMPLATE.substitute(\n num_labels=num_labels,\n single_xtick_templates=sx1,\n )\n\n # build the arrays with the xy coordinates\n all_sets = []\n for band in the_bands:\n this_set = ''\n for i in zip(x, band):\n line = '{:.8f}'.format(i[0]) + '\\t' + '{:.8f}'.format(i[1]) + '\\n'\n this_set += line\n all_sets.append(this_set)\n\n set_descriptions = ''\n for i, (this_set, band_type) in enumerate(zip(all_sets, plot_info['band_type_idx'])):\n if band_type % 2 == 0:\n linecolor = color_number\n else:\n linecolor = color_number2\n width = str(2.0)\n set_descriptions += AGR_SET_DESCRIPTION_TEMPLATE.substitute(\n set_number=i + setnumber_offset,\n linewidth=width,\n color_number=linecolor,\n legend=legend if i == 0 else ''\n )\n\n units = self.units\n\n graphs = AGR_GRAPH_TEMPLATE.substitute(\n x_min_lim=x_min_lim,\n y_min_lim=y_min_lim,\n x_max_lim=x_max_lim,\n y_max_lim=y_max_lim,\n yaxislabel='Dispersion ({})'.format(units),\n xticks_template=xticks,\n set_descriptions=set_descriptions,\n ytick_spacing=ytick_spacing,\n title=title,\n )\n sets = []\n for i, this_set in enumerate(all_sets):\n sets.append(AGR_SINGLESET_TEMPLATE.substitute(set_number=i + setnumber_offset, xydata=this_set))\n the_sets = '&\\n'.join(sets)\n\n string = AGR_TEMPLATE.substitute(graphs=graphs, sets=the_sets)\n\n if comments:\n string = prepare_header_comment(self.uuid, plot_info, comment_char='#') + '\\n' + string\n\n return string.encode('utf-8'), {}\n\n def _get_band_segments(self, cartesian):\n \"\"\"Return the band segments.\"\"\"\n plot_info = self._get_bandplot_data(\n cartesian=cartesian, prettify_format=None, join_symbol=None, get_segments=True\n )\n\n out_dict = {'label': self.label}\n\n out_dict['path'] = plot_info['path']\n out_dict['paths'] = plot_info['paths']\n\n return out_dict\n\n def _prepare_json(self, main_file_name='', comments=True): # pylint: disable=unused-argument\n \"\"\"\n Prepare a json file in a format compatible with the AiiDA band visualizer\n\n :param comments: if True, print comments (if it makes sense for the given\n format)\n \"\"\"\n from aiida import get_file_header\n from aiida.common import json\n\n json_dict = self._get_band_segments(cartesian=True)\n json_dict['original_uuid'] = self.uuid\n\n if comments:\n json_dict['comments'] = get_file_header(comment_char='')\n\n return json.dumps(json_dict).encode('utf-8'), {}\n\n\nMAX_NUM_AGR_COLORS = 15\n\nAGR_TEMPLATE = Template(\n \"\"\"\n # Grace project file\n #\n @version 50122\n @page size 792, 612\n @page scroll 5%\n @page inout 5%\n @link page off\n @map font 8 to \"Courier\", \"Courier\"\n @map font 10 to \"Courier-Bold\", \"Courier-Bold\"\n @map font 11 to \"Courier-BoldOblique\", \"Courier-BoldOblique\"\n @map font 9 to \"Courier-Oblique\", \"Courier-Oblique\"\n @map font 4 to \"Helvetica\", \"Helvetica\"\n @map font 6 to \"Helvetica-Bold\", \"Helvetica-Bold\"\n @map font 7 to \"Helvetica-BoldOblique\", \"Helvetica-BoldOblique\"\n @map font 5 to \"Helvetica-Oblique\", \"Helvetica-Oblique\"\n @map font 14 to \"NimbusMonoL-BoldOblique\", \"NimbusMonoL-BoldOblique\"\n @map font 15 to \"NimbusMonoL-Regular\", \"NimbusMonoL-Regular\"\n @map font 16 to \"NimbusMonoL-RegularOblique\", \"NimbusMonoL-RegularOblique\"\n @map font 17 to \"NimbusRomanNo9L-Medium\", \"NimbusRomanNo9L-Medium\"\n @map font 18 to \"NimbusRomanNo9L-MediumItalic\", \"NimbusRomanNo9L-MediumItalic\"\n @map font 19 to \"NimbusRomanNo9L-Regular\", \"NimbusRomanNo9L-Regular\"\n @map font 20 to \"NimbusRomanNo9L-RegularItalic\", \"NimbusRomanNo9L-RegularItalic\"\n @map font 21 to \"NimbusSansL-Bold\", \"NimbusSansL-Bold\"\n @map font 22 to \"NimbusSansL-BoldCondensed\", \"NimbusSansL-BoldCondensed\"\n @map font 23 to \"NimbusSansL-BoldCondensedItalic\", \"NimbusSansL-BoldCondensedItalic\"\n @map font 24 to \"NimbusSansL-BoldItalic\", \"NimbusSansL-BoldItalic\"\n @map font 25 to \"NimbusSansL-Regular\", \"NimbusSansL-Regular\"\n @map font 26 to \"NimbusSansL-RegularCondensed\", \"NimbusSansL-RegularCondensed\"\n @map font 27 to \"NimbusSansL-RegularCondensedItalic\", \"NimbusSansL-RegularCondensedItalic\"\n @map font 28 to \"NimbusSansL-RegularItalic\", \"NimbusSansL-RegularItalic\"\n @map font 29 to \"StandardSymbolsL-Regular\", \"StandardSymbolsL-Regular\"\n @map font 12 to \"Symbol\", \"Symbol\"\n @map font 31 to \"Symbol-Regular\", \"Symbol-Regular\"\n @map font 2 to \"Times-Bold\", \"Times-Bold\"\n @map font 3 to \"Times-BoldItalic\", \"Times-BoldItalic\"\n @map font 1 to \"Times-Italic\", \"Times-Italic\"\n @map font 0 to \"Times-Roman\", \"Times-Roman\"\n @map font 36 to \"URWBookmanL-DemiBold\", \"URWBookmanL-DemiBold\"\n @map font 37 to \"URWBookmanL-DemiBoldItalic\", \"URWBookmanL-DemiBoldItalic\"\n @map font 38 to \"URWBookmanL-Light\", \"URWBookmanL-Light\"\n @map font 39 to \"URWBookmanL-LightItalic\", \"URWBookmanL-LightItalic\"\n @map font 40 to \"URWChanceryL-MediumItalic\", \"URWChanceryL-MediumItalic\"\n @map font 41 to \"URWGothicL-Book\", \"URWGothicL-Book\"\n @map font 42 to \"URWGothicL-BookOblique\", \"URWGothicL-BookOblique\"\n @map font 43 to \"URWGothicL-Demi\", \"URWGothicL-Demi\"\n @map font 44 to \"URWGothicL-DemiOblique\", \"URWGothicL-DemiOblique\"\n @map font 45 to \"URWPalladioL-Bold\", \"URWPalladioL-Bold\"\n @map font 46 to \"URWPalladioL-BoldItalic\", \"URWPalladioL-BoldItalic\"\n @map font 47 to \"URWPalladioL-Italic\", \"URWPalladioL-Italic\"\n @map font 48 to \"URWPalladioL-Roman\", \"URWPalladioL-Roman\"\n @map font 13 to \"ZapfDingbats\", \"ZapfDingbats\"\n @map color 0 to (255, 255, 255), \"white\"\n @map color 1 to (0, 0, 0), \"black\"\n @map color 2 to (255, 0, 0), \"red\"\n @map color 3 to (0, 255, 0), \"green\"\n @map color 4 to (0, 0, 255), \"blue\"\n @map color 5 to (255, 215, 0), \"yellow\"\n @map color 6 to (188, 143, 143), \"brown\"\n @map color 7 to (220, 220, 220), \"grey\"\n @map color 8 to (148, 0, 211), \"violet\"\n @map color 9 to (0, 255, 255), \"cyan\"\n @map color 10 to (255, 0, 255), \"magenta\"\n @map color 11 to (255, 165, 0), \"orange\"\n @map color 12 to (114, 33, 188), \"indigo\"\n @map color 13 to (103, 7, 72), \"maroon\"\n @map color 14 to (64, 224, 208), \"turquoise\"\n @map color 15 to (0, 139, 0), \"green4\"\n @reference date 0\n @date wrap off\n @date wrap year 1950\n @default linewidth 1.0\n @default linestyle 1\n @default color 1\n @default pattern 1\n @default font 0\n @default char size 1.000000\n @default symbol size 1.000000\n @default sformat \"%.8g\"\n @background color 0\n @page background fill on\n @timestamp off\n @timestamp 0.03, 0.03\n @timestamp color 1\n @timestamp rot 0\n @timestamp font 0\n @timestamp char size 1.000000\n @timestamp def \"Wed Jul 30 16:44:34 2014\"\n @r0 off\n @link r0 to g0\n @r0 type above\n @r0 linestyle 1\n @r0 linewidth 1.0\n @r0 color 1\n @r0 line 0, 0, 0, 0\n @r1 off\n @link r1 to g0\n @r1 type above\n @r1 linestyle 1\n @r1 linewidth 1.0\n @r1 color 1\n @r1 line 0, 0, 0, 0\n @r2 off\n @link r2 to g0\n @r2 type above\n @r2 linestyle 1\n @r2 linewidth 1.0\n @r2 color 1\n @r2 line 0, 0, 0, 0\n @r3 off\n @link r3 to g0\n @r3 type above\n @r3 linestyle 1\n @r3 linewidth 1.0\n @r3 color 1\n @r3 line 0, 0, 0, 0\n @r4 off\n @link r4 to g0\n @r4 type above\n @r4 linestyle 1\n @r4 linewidth 1.0\n @r4 color 1\n @r4 line 0, 0, 0, 0\n $graphs\n $sets\n \"\"\"\n)\n\nAGR_XTICKS_TEMPLATE = Template(\"\"\"\n @ xaxis tick spec $num_labels\n $single_xtick_templates\n \"\"\")\n\nAGR_SINGLE_XTICK_TEMPLATE = Template(\n \"\"\"\n @ xaxis tick major $index, $coord\n @ xaxis ticklabel $index, \"$name\"\n \"\"\"\n)\n\nAGR_GRAPH_TEMPLATE = Template(\n \"\"\"\n @g0 on\n @g0 hidden false\n @g0 type XY\n @g0 stacked false\n @g0 bar hgap 0.000000\n @g0 fixedpoint off\n @g0 fixedpoint type 0\n @g0 fixedpoint xy 0.000000, 0.000000\n @g0 fixedpoint format general general\n @g0 fixedpoint prec 6, 6\n @with g0\n @ world $x_min_lim, $y_min_lim, $x_max_lim, $y_max_lim\n @ stack world 0, 0, 0, 0\n @ znorm 1\n @ view 0.150000, 0.150000, 1.150000, 0.850000\n @ title \"$title\"\n @ title font 0\n @ title size 1.500000\n @ title color 1\n @ subtitle \"\"\n @ subtitle font 0\n @ subtitle size 1.000000\n @ subtitle color 1\n @ xaxes scale Normal\n @ yaxes scale Normal\n @ xaxes invert off\n @ yaxes invert off\n @ xaxis on\n @ xaxis type zero false\n @ xaxis offset 0.000000 , 0.000000\n @ xaxis bar on\n @ xaxis bar color 1\n @ xaxis bar linestyle 1\n @ xaxis bar linewidth 1.0\n @ xaxis label \"\"\n @ xaxis label layout para\n @ xaxis label place auto\n @ xaxis label char size 1.000000\n @ xaxis label font 4\n @ xaxis label color 1\n @ xaxis label place normal\n @ xaxis tick on\n @ xaxis tick major 5\n @ xaxis tick minor ticks 0\n @ xaxis tick default 6\n @ xaxis tick place rounded true\n @ xaxis tick in\n @ xaxis tick major size 1.000000\n @ xaxis tick major color 1\n @ xaxis tick major linewidth 1.0\n @ xaxis tick major linestyle 1\n @ xaxis tick major grid on\n @ xaxis tick minor color 1\n @ xaxis tick minor linewidth 1.0\n @ xaxis tick minor linestyle 1\n @ xaxis tick minor grid off\n @ xaxis tick minor size 0.500000\n @ xaxis ticklabel on\n @ xaxis ticklabel format general\n @ xaxis ticklabel prec 5\n @ xaxis ticklabel formula \"\"\n @ xaxis ticklabel append \"\"\n @ xaxis ticklabel prepend \"\"\n @ xaxis ticklabel angle 0\n @ xaxis ticklabel skip 0\n @ xaxis ticklabel stagger 0\n @ xaxis ticklabel place normal\n @ xaxis ticklabel offset auto\n @ xaxis ticklabel offset 0.000000 , 0.010000\n @ xaxis ticklabel start type auto\n @ xaxis ticklabel start 0.000000\n @ xaxis ticklabel stop type auto\n @ xaxis ticklabel stop 0.000000\n @ xaxis ticklabel char size 1.500000\n @ xaxis ticklabel font 4\n @ xaxis ticklabel color 1\n @ xaxis tick place both\n @ xaxis tick spec type both\n $xticks_template\n @ yaxis on\n @ yaxis type zero false\n @ yaxis offset 0.000000 , 0.000000\n @ yaxis bar on\n @ yaxis bar color 1\n @ yaxis bar linestyle 1\n @ yaxis bar linewidth 1.0\n @ yaxis label \"$yaxislabel\"\n @ yaxis label layout para\n @ yaxis label place auto\n @ yaxis label char size 1.500000\n @ yaxis label font 4\n @ yaxis label color 1\n @ yaxis label place normal\n @ yaxis tick on\n @ yaxis tick major $ytick_spacing\n @ yaxis tick minor ticks 1\n @ yaxis tick default 6\n @ yaxis tick place rounded true\n @ yaxis tick in\n @ yaxis tick major size 1.000000\n @ yaxis tick major color 1\n @ yaxis tick major linewidth 1.0\n @ yaxis tick major linestyle 1\n @ yaxis tick major grid off\n @ yaxis tick minor color 1\n @ yaxis tick minor linewidth 1.0\n @ yaxis tick minor linestyle 1\n @ yaxis tick minor grid off\n @ yaxis tick minor size 0.500000\n @ yaxis ticklabel on\n @ yaxis ticklabel format general\n @ yaxis ticklabel prec 5\n @ yaxis ticklabel formula \"\"\n @ yaxis ticklabel append \"\"\n @ yaxis ticklabel prepend \"\"\n @ yaxis ticklabel angle 0\n @ yaxis ticklabel skip 0\n @ yaxis ticklabel stagger 0\n @ yaxis ticklabel place normal\n @ yaxis ticklabel offset auto\n @ yaxis ticklabel offset 0.000000 , 0.010000\n @ yaxis ticklabel start type auto\n @ yaxis ticklabel start 0.000000\n @ yaxis ticklabel stop type auto\n @ yaxis ticklabel stop 0.000000\n @ yaxis ticklabel char size 1.250000\n @ yaxis ticklabel font 4\n @ yaxis ticklabel color 1\n @ yaxis tick place both\n @ yaxis tick spec type none\n @ altxaxis off\n @ altyaxis off\n @ legend on\n @ legend loctype view\n @ legend 0.85, 0.8\n @ legend box color 1\n @ legend box pattern 1\n @ legend box linewidth 1.0\n @ legend box linestyle 1\n @ legend box fill color 0\n @ legend box fill pattern 1\n @ legend font 0\n @ legend char size 1.000000\n @ legend color 1\n @ legend length 4\n @ legend vgap 1\n @ legend hgap 1\n @ legend invert false\n @ frame type 0\n @ frame linestyle 1\n @ frame linewidth 1.0\n @ frame color 1\n @ frame pattern 1\n @ frame background color 0\n @ frame background pattern 0\n $set_descriptions\n \"\"\"\n)\n\nAGR_SET_DESCRIPTION_TEMPLATE = Template(\n \"\"\"\n @ s$set_number hidden false\n @ s$set_number type xy\n @ s$set_number symbol 0\n @ s$set_number symbol size 1.000000\n @ s$set_number symbol color $color_number\n @ s$set_number symbol pattern 1\n @ s$set_number symbol fill color $color_number\n @ s$set_number symbol fill pattern 0\n @ s$set_number symbol linewidth 1.0\n @ s$set_number symbol linestyle 1\n @ s$set_number symbol char 65\n @ s$set_number symbol char font 0\n @ s$set_number symbol skip 0\n @ s$set_number line type 1\n @ s$set_number line linestyle 1\n @ s$set_number line linewidth $linewidth\n @ s$set_number line color $color_number\n @ s$set_number line pattern 1\n @ s$set_number baseline type 0\n @ s$set_number baseline off\n @ s$set_number dropline off\n @ s$set_number fill type 0\n @ s$set_number fill rule 0\n @ s$set_number fill color $color_number\n @ s$set_number fill pattern 1\n @ s$set_number avalue off\n @ s$set_number avalue type 2\n @ s$set_number avalue char size 1.000000\n @ s$set_number avalue font 0\n @ s$set_number avalue color 1\n @ s$set_number avalue rot 0\n @ s$set_number avalue format general\n @ s$set_number avalue prec 3\n @ s$set_number avalue prepend \"\"\n @ s$set_number avalue append \"\"\n @ s$set_number avalue offset 0.000000 , 0.000000\n @ s$set_number errorbar on\n @ s$set_number errorbar place both\n @ s$set_number errorbar color $color_number\n @ s$set_number errorbar pattern 1\n @ s$set_number errorbar size 1.000000\n @ s$set_number errorbar linewidth 1.0\n @ s$set_number errorbar linestyle 1\n @ s$set_number errorbar riser linewidth 1.0\n @ s$set_number errorbar riser linestyle 1\n @ s$set_number errorbar riser clip off\n @ s$set_number errorbar riser clip length 0.100000\n @ s$set_number comment \"Cols 1:2\"\n @ s$set_number legend \"$legend\"\n \"\"\"\n)\n\nAGR_SINGLESET_TEMPLATE = Template(\"\"\"\n @target G0.S$set_number\n @type xy\n $xydata\n \"\"\")\n\n# text.latex.preview=True is needed to have a proper alignment of\n# tick marks with and without subscripts\n# see e.g. http://matplotlib.org/1.3.0/examples/pylab_examples/usetex_baseline_test.html\nMATPLOTLIB_HEADER_AGG_TEMPLATE = Template(\n \"\"\"# -*- coding: utf-8 -*-\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nfrom matplotlib import rc\n# Uncomment to change default font\n#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('font', **{'family': 'serif', 'serif': ['Computer Modern', 'CMU Serif', 'Times New Roman', 'DejaVu Serif']})\n# To use proper font for, e.g., Gamma if usetex is set to False\nrc('mathtext', fontset='cm')\n\nrc('text', usetex=True)\nimport matplotlib.pyplot as plt\nplt.rcParams.update({'text.latex.preview': True})\n\nimport pylab as pl\n\n# I use json to make sure the input is sanitized\nimport json\n\nprint_comment = False\n\"\"\"\n)\n\n# text.latex.preview=True is needed to have a proper alignment of\n# tick marks with and without subscripts\n# see e.g. http://matplotlib.org/1.3.0/examples/pylab_examples/usetex_baseline_test.html\nMATPLOTLIB_HEADER_TEMPLATE = Template(\n \"\"\"# -*- coding: utf-8 -*-\n\nfrom matplotlib import rc\n# Uncomment to change default font\n#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\nrc('font', **{'family': 'serif', 'serif': ['Computer Modern', 'CMU Serif', 'Times New Roman', 'DejaVu Serif']})\n# To use proper font for, e.g., Gamma if usetex is set to False\nrc('mathtext', fontset='cm')\n\nrc('text', usetex=True)\nimport matplotlib.pyplot as plt\nplt.rcParams.update({'text.latex.preview': True})\n\nimport pylab as pl\n\n# I use json to make sure the input is sanitized\nimport json\n\nprint_comment = False\n\"\"\"\n)\n\nMATPLOTLIB_IMPORT_DATA_INLINE_TEMPLATE = Template('''all_data_str = r\"\"\"$all_data_json\"\"\"\n''')\n\nMATPLOTLIB_IMPORT_DATA_FROMFILE_TEMPLATE = Template(\n \"\"\"with open(\"$json_fname\", encoding='utf8') as f:\n all_data_str = f.read()\n\"\"\"\n)\n\nMULTI_KP = \"\"\"\nfor path in paths:\n if path['length'] <= 1:\n # Avoid printing empty lines\n continue\n x = path['x']\n #for band in bands:\n for band, band_type in zip(path['values'], all_data['band_type_idx']):\n\n # For now we support only two colors\n if band_type % 2 == 0:\n further_plot_options = further_plot_options1\n else:\n further_plot_options = further_plot_options2\n\n # Put the legend text only once\n label = None\n if first_band_1 and band_type % 2 == 0:\n first_band_1 = False\n label = all_data.get('legend_text', None)\n elif first_band_2 and band_type % 2 == 1:\n first_band_2 = False\n label = all_data.get('legend_text2', None)\n\n p.plot(x, band, label=label,\n **further_plot_options\n )\n\"\"\"\n\nSINGLE_KP = \"\"\"\npath = paths[0]\nvalues = path['values']\nx = [path['x'] for _ in values]\np.scatter(x, values, marker=\"_\")\n\"\"\"\n\nMATPLOTLIB_BODY_TEMPLATE = Template(\n \"\"\"all_data = json.loads(all_data_str)\n\nif not all_data.get('use_latex', False):\n rc('text', usetex=False)\n\n#x = all_data['x']\n#bands = all_data['bands']\npaths = all_data['paths']\ntick_pos = all_data['tick_pos']\ntick_labels = all_data['tick_labels']\n\n# Option for bands (all, or those of type 1 if there are two spins)\nfurther_plot_options1 = {}\nfurther_plot_options1['color'] = all_data.get('bands_color', 'k')\nfurther_plot_options1['linewidth'] = all_data.get('bands_linewidth', 0.5)\nfurther_plot_options1['linestyle'] = all_data.get('bands_linestyle', None)\nfurther_plot_options1['marker'] = all_data.get('bands_marker', None)\nfurther_plot_options1['markersize'] = all_data.get('bands_markersize', None)\nfurther_plot_options1['markeredgecolor'] = all_data.get('bands_markeredgecolor', None)\nfurther_plot_options1['markeredgewidth'] = all_data.get('bands_markeredgewidth', None)\nfurther_plot_options1['markerfacecolor'] = all_data.get('bands_markerfacecolor', None)\n\n# Options for second-type of bands if present (e.g. spin up vs. spin down)\nfurther_plot_options2 = {}\nfurther_plot_options2['color'] = all_data.get('bands_color2', 'r')\n# Use the values of further_plot_options1 by default\nfurther_plot_options2['linewidth'] = all_data.get('bands_linewidth2',\n further_plot_options1['linewidth']\n )\nfurther_plot_options2['linestyle'] = all_data.get('bands_linestyle2',\n further_plot_options1['linestyle']\n )\nfurther_plot_options2['marker'] = all_data.get('bands_marker2',\n further_plot_options1['marker']\n )\nfurther_plot_options2['markersize'] = all_data.get('bands_markersize2',\n further_plot_options1['markersize']\n )\nfurther_plot_options2['markeredgecolor'] = all_data.get('bands_markeredgecolor2',\n further_plot_options1['markeredgecolor']\n )\nfurther_plot_options2['markeredgewidth'] = all_data.get('bands_markeredgewidth2',\n further_plot_options1['markeredgewidth']\n )\nfurther_plot_options2['markerfacecolor'] = all_data.get('bands_markerfacecolor2',\n further_plot_options1['markerfacecolor']\n )\n\nfig = pl.figure()\np = fig.add_subplot(1,1,1)\n\nfirst_band_1 = True\nfirst_band_2 = True\n\n${plot_code}\n\np.set_xticks(tick_pos)\np.set_xticklabels(tick_labels)\np.set_xlim([all_data['x_min_lim'], all_data['x_max_lim']])\np.set_ylim([all_data['y_min_lim'], all_data['y_max_lim']])\np.xaxis.grid(True, which='major', color='#888888', linestyle='-', linewidth=0.5)\n\nif all_data.get('plot_zero_axis', False):\n p.axhline(\n 0.,\n color=all_data.get('zero_axis_color', '#888888'),\n linestyle=all_data.get('zero_axis_linestyle', '--'),\n linewidth=all_data.get('zero_axis_linewidth', 0.5),\n )\nif all_data['title']:\n p.set_title(all_data['title'])\nif all_data['legend_text']:\n p.legend(loc='best')\np.set_ylabel(all_data['yaxis_label'])\n\ntry:\n if print_comment:\n print(all_data['comment'])\nexcept KeyError:\n pass\n\"\"\"\n)\n\nMATPLOTLIB_FOOTER_TEMPLATE_SHOW = Template(\"\"\"pl.show()\"\"\")\n\nMATPLOTLIB_FOOTER_TEMPLATE_EXPORTFILE = Template(\"\"\"pl.savefig(\"$fname\", format=\"$format\")\"\"\")\n\nMATPLOTLIB_FOOTER_TEMPLATE_EXPORTFILE_WITH_DPI = Template(\"\"\"pl.savefig(\"$fname\", format=\"$format\", dpi=$dpi)\"\"\")\n" ]
[ [ "numpy.random.random", "numpy.random.randint" ], [ "numpy.linalg.norm", "numpy.sort", "numpy.concatenate", "numpy.transpose", "numpy.array" ] ]
continual-ml/forgetful-networks
[ "8a3c9afdd489137cc11bd68c46d243ec52ec1670" ]
[ "utils.py" ]
[ "import numpy as np\nimport torch\nfrom matplotlib.patches import Ellipse\nimport matplotlib.transforms as transforms\nimport matplotlib.pyplot as plt\n\n\ndef entropy(x: torch.Tensor) -> torch.Tensor:\n x = x / x.sum(dim=1, keepdim=True)\n return -(x.clamp(min=0.001).log()*x).sum(dim=1)\n\n\ndef prediction_probability(x: torch.Tensor) -> torch.Tensor:\n proba, pred = x.max(dim=1)\n return proba\n\ndef ellipse(x, y, color, label, n_std=1, linestyle='solid'):\n cov = np.cov(x, y)\n pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])\n ell_radius_x = np.sqrt(1 + pearson)\n ell_radius_y = np.sqrt(1 - pearson)\n result = Ellipse((0, 0),\n width=ell_radius_x * 2,\n height=ell_radius_y * 2,\n edgecolor=color,\n facecolor=\"None\",\n linestyle=linestyle,\n linewidth=2.5,\n alpha=0.5,\n label=label)\n\n\n scale_x = np.sqrt(cov[0, 0]) * n_std\n mean_x = np.mean(x)\n scale_y = np.sqrt(cov[1, 1]) * n_std\n mean_y = np.mean(y)\n\n transf = transforms.Affine2D()\n tansf = transf.rotate_deg(45).scale(scale_x, scale_y).translate(mean_x, mean_y)\n\n return result, transf\n\n\ndef histogram_report(predictions, uncertainty, ground_truth, title):\n correct = (predictions.max(dim=1)[1] == ground_truth)\n print(\"acc = \", correct.float().mean().item())\n correct_uncertainty = uncertainty[correct].numpy()\n incorrect_uncertainty = uncertainty[~correct].numpy()\n xmin = min(correct_uncertainty.min(), incorrect_uncertainty.min())\n xmax = max(correct_uncertainty.max(), incorrect_uncertainty.max())\n # now we plot the two distributions\n plt.clf()\n _, bins, _ = plt.hist(\n incorrect_uncertainty,\n bins=50,\n range=[xmin, xmax],\n label='incorrect',\n density=True,\n color='red')\n plt.hist(correct_uncertainty,\n bins=bins,\n alpha=0.5,\n label='correct',\n density=True,\n color='blue')\n plt.legend()\n plt.yticks([])\n plt.xlabel(\"uncertainty measure\")\n if title:\n plt.title(title)\n plt.savefig(title.replace(\" \", \"_\").replace(\"'\",\"\"), dpi=80)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.patches.Ellipse", "numpy.sqrt", "matplotlib.pyplot.title", "matplotlib.transforms.Affine2D", "numpy.cov", "numpy.mean", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.yticks", "matplotlib.pyplot.hist" ] ]
vansin/table_detection
[ "4e3626b47bb4ce8d50a89a410f82b7c4e3c5f658" ]
[ "tools/visualization/vis_iou_f1.py" ]
[ "import json\nimport os\n\nimport matplotlib.pyplot as plt\nimport mmcv\nimport pandas as pd\nimport seaborn as sns\nimport time\nimport numpy as np\n\ntmp_pkl_name = '/home/tml/Nutstore Files/ubuntu/paper/data/csv/tmp.pkl'\n\nprefix = '/home/tml/Nutstore Files/ubuntu/paper/data/iou'\n\n\nif __name__ == '__main__':\n\n work_dirs = os.listdir('work_dirs')\n\n results = []\n\n\n if os.path.exists(tmp_pkl_name):\n results_dict = mmcv.load(tmp_pkl_name)\n else:\n results_dict = dict()\n\n\n best_f1 = []\n\n algorithm_list = []\n\n for root, dirs, files in os.walk('work_dirs'):\n print(\"root\", root) # 当前目录路径\n # print(\"dirs\", dirs) # 当前路径下所有子目录\n print(\"files\", files) # 当前路径下所有非目录子文件\n\n if files.__len__()>0:\n\n algorithm_list.append([root, files])\n \n\n for i, element in enumerate(algorithm_list):\n\n\n root, work_dir_files = element\n\n evals = []\n config_file = None\n for file_name in work_dir_files:\n if file_name.endswith('_eval.json'):\n evals.append(root + '/' + file_name)\n if file_name.endswith('.py'):\n config_file = file_name\n \n \n for j, name in enumerate(evals):\n eval_files = []\n print('===========', i, algorithm_list.__len__(),\n j, evals.__len__(), '=============', name, '============')\n \n epoch = int(name.split('/')[-1].split('_')[1])\n\n if (name, epoch) in results_dict:\n eval_files = results_dict[(name, epoch)]\n continue\n\n data_origin = mmcv.load(name)\n # epoch = int(name.split('/')[-1].split('_')[1])\n # data_origin['metric'][1]['detail'] = None\n\n config_name = data_origin['config'].split('/')[-1]\n algorithm = data_origin['config'].split('/')[2]\n dataset = data_origin['config'].split('/')[1]\n checkpoint_size = data_origin['checkpoint_size']\n\n\n for iou, eval_detail_result in data_origin['metric'][1].items():\n data = dict()\n data['epoch'] = epoch\n data['dataset'] = dataset\n data['config'] = config_name\n data['algorithm'] = algorithm\n data['checkpoint_size'] = checkpoint_size\n data['iou'] = float(iou)\n\n detail = eval_detail_result['detail']\n\n recalls = eval_detail_result['detail'][0]['recall']\n precisions = eval_detail_result['detail'][0]['precision']\n recalls = np.array(recalls)\n\n\n precisions = np.array(precisions)\n num_gts = eval_detail_result['detail'][0]['num_gts']\n num_dets = eval_detail_result['detail'][0]['num_dets']\n f1_scores = (2*recalls*precisions)/(recalls+precisions)\n f1_scores[np.isnan(f1_scores)] = 0\n\n if num_dets==0:\n recall_in_max_f1_score = 0\n precision_in_max_f1_score = 0\n max_f1_score = 0\n else:\n max_index = np.argmax(f1_scores)\n recall_in_max_f1_score = recalls[max_index]\n precision_in_max_f1_score = precisions[max_index]\n max_f1_score = f1_scores[max_index]\n\n eval_detail_result['recall_in_max_f1_score'] = recall_in_max_f1_score\n eval_detail_result['precision_in_max_f1_score'] = precision_in_max_f1_score\n eval_detail_result['max_f1_score'] = max_f1_score\n\n eval_detail_result['detail'] = None\n data.update(eval_detail_result)\n eval_files.append(data)\n\n results_dict[(name, epoch)] = eval_files\n\n\n # eval_files.sort(key=lambda x: x['epoch'])\n\n\n\n\n # try:\n # best_f1.append(\n # (work_dir, max(eval_files, key=lambda x: x['f1_score'])['f1_score']))\n # except Exception as e:\n # print(e)\n\n mmcv.dump(results_dict, tmp_pkl_name)\n\n # work_dirs = os.listdir('work_dirs')\n # for i, work_dir in enumerate(work_dirs):\n # work_dir_files = os.listdir('work_dirs/' + work_dir)\n # eval_files = []\n # config_file = None\n # for file_name in work_dir_files:\n # if file_name.endswith('_eval.json'):\n\n # name = 'work_dirs/' + work_dir + '/' + file_name\n # data_origin = mmcv.load(name)\n\n # epoch = int(name.split('/')[-1].split('_')[1])\n\n # data = dict()\n # data['epoch'] = epoch\n # config_name = data_origin['config'].split('/')[-1]\n # data['config'] = config_name\n # data.update(data_origin['metric'])\n # eval_files.append(data)\n # try:\n # iou_curves = data_origin['metric']['iou_infos']\n # df = pd.DataFrame.from_dict(iou_curves)\n # df.to_csv(prefix + '/' + work_dir + '=' + str(epoch) + '.csv')\n # # g = sns.lineplot(x='iou', y='f1_score', data=df, markers=True, dashes=False)\n # # g.legend(loc='right', bbox_to_anchor=(1.5, 0.5), ncol=1)\n # # plt.show()\n # # print(plt)\n # eval_files.append(data)\n # except Exception as e:\n # print(e)\n # if file_name.endswith('.py'):\n # config_file = 'work_dirs/' + work_dir + '/' + file_name\n\n # eval_files.sort(key=lambda x: x['epoch'])\n # try:\n # best_f1.append(\n # (work_dir, max(eval_files, key=lambda x: x['f1_score'])['f1_score']))\n # except Exception as e:\n # print(e)\n # results.append(eval_files)\n # print(results)\n\n\nintput_data = []\n\n\n# for result in results:\n# intput_data.extend(result)\n\nfor k,v in results_dict.items():\n intput_data.extend(v)\n\nintput_data.sort(key=lambda x: x['epoch'])\n\nout_file_name = 'test_result_' + str(int(time.time()))\n\n# with open('/home/tml/Nutstore Files/ubuntu/paper/data/json/' +'out.json', 'w') as f:\n# json.dump(results_dict, f)\n\ndf = pd.DataFrame.from_dict(intput_data)\n\n\n# df.to_csv('/home/tml/Nutstore Files/ubuntu/paper/data/csv/'+out_file_name+'.csv')\n# df.to_excel('/home/tml/Nutstore Files/ubuntu/paper/data/csv/' +\n# out_file_name + '.xlsx')\ndf.to_csv('/home/tml/Nutstore Files/ubuntu/paper/data/csv/' +\n 'latest' + '.csv')\ndf.to_excel('/home/tml/Nutstore Files/ubuntu/paper/data/csv/' +\n 'latest' + '.xlsx')\n\n\n# g = sns.lineplot(x='epoch', y='bbox_mAP', data=df, hue='config',\n# style='config', markers=True, dashes=False)\n# g.legend(loc='right', bbox_to_anchor=(1.5, 0.5), ncol=1)\n\n# plt.show()\n# print(plt)\n# for result in results:\n#\n# sns.set_theme(style='darkgrid')\n# # Load an example dataset with long-form data\n# df = pd.DataFrame.from_dict(result)\n#\n# # Plot the responses for different events and regions\n# sns.lineplot(x='epoch', y='bbox_mAP',\n# data=df)\n#\n# plt.show()\n" ]
[ [ "numpy.isnan", "numpy.array", "numpy.argmax", "pandas.DataFrame.from_dict" ] ]
CHUSRadOncPhys/FluoMV
[ "6fc226ee0ca1427495f0ab483c63a2ca5195954e" ]
[ "ServiceApplication_SourceFiles/Mod_FlexmapImage.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\nimport sys\nimport os\n#===============================================================================================================\nclass FlexmapImage:\n\t\n\tdef __init__(self, thisSettingsObj):\n\t\t\n\t\tself.Status = True\n\t\tself.SettingsObj = thisSettingsObj\n\n#--------------------------------------------------------------------------------------------------------------------------------------------\n\tdef Execute(self,thisImArray):\n\t\t\n\t\tself.ImArrayFiltered = thisImArray\n\t\t#Zone de l'image ou doit se trouver le ballbearing\n\t\tself.ImArrayBallBearing = self.ImArrayFiltered[462:562,462:562]#[y][x]\n\t\t\t\n\t\tself.Normalisation = 0 #Intensite du 100% du champ de radiation.\n\t\tself.LowThreshold = 0 #Intensite de l'image self.ImArrayBallBearing. Toutes les pixels avec des niveaux de gris inférieurs à self.LowThreshold appartienne au Ballbearing.\n\t\t\n\t\tself.BBXPosi = 0\n\t\tself.BBYPosi = 0\n\t\t\n\t\t#Appel des fonctions\n\t\tself.GetThresholds() \t#Fonction pour déterminer self.Normalisation, self.LowThreshold\n\t\tself.GetBallBearing()\n\t\t\n\t\treturn np.around(self.BBXPosi,1), np.around(self.BBYPosi,1)\n#------------------------------------------------------------------------------------------\n\tdef GetThresholds(self):\n\n\t\tPixList = list()\n\t\tfor i in range(0,self.ImArrayBallBearing.shape[0]):\n\t\t\tfor j in range(0,self.ImArrayBallBearing.shape[1]):\n\t\t\t\tPixList.append(self.ImArrayBallBearing[i][j])\n\t\t\t\t\n\t\tn,bin = np.histogram(PixList,10)\n\t\tself.LowThreshold = bin[2] #Approximation for the gray level of the BB\n\t\tself.Normalisation = (bin[-2] + bin[-1])*0.5 # Approximation for the gray level at the center of the field\n\n#------------------------------------------------------------------------------------------------------------------------------------\n\tdef GetBallBearing(self):\n\t\tXlist = list()\n\t\tYlist = list()\n\n\t\tfor y in range(462,562):\n\t\t\tfor x in range(462,562):\n\t\t\t\tif(self.ImArrayFiltered[y][x] <= self.LowThreshold):\n\t\t\t\t\tYlist.append(y)\n\t\t\t\t\tXlist.append(x)\n\n\t\tself.BBXPosiMedian = np.median(Xlist)\n\t\tself.BBYPosiMedian = np.median(Ylist)\n\t\tself.BBXPosiMoyen = np.mean(Xlist)\n\t\tself.BBYPosiMoyen = np.mean(Ylist)\n\t\t\n\t\tif abs(self.BBXPosiMedian - self.BBXPosiMoyen)<=1:\n\t\t\tself.BBXPosi = self.BBXPosiMoyen\n\t\telse:\n\t\t\tself.BBXPosi = self.BBXPosiMedian\n\n\t\tif abs(self.BBYPosiMedian - self.BBYPosiMoyen)<=1:\n\t\t\tself.BBYPosi = self.BBYPosiMoyen\n\t\telse:\n\t\t\tself.BBYPosi = self.BBYPosiMedian\t\t\t\n\t\t\n\t\t\n\t\t#print 'Ball bearing position X (pixels):',self.BBXPosi\n\t\t#print 'Ball bearing position Y (pixels):',self.BBYPosi\n#==============================================================================================================================================\n" ]
[ [ "numpy.around", "numpy.median", "numpy.histogram", "numpy.mean" ] ]
RealPolitiX/animo
[ "cb9e3af5ae11e2c316a665004f715a5c0ef756fb" ]
[ "animo/animo.py" ]
[ "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: R. Patrick Xian\n\"\"\"\n\nfrom __future__ import print_function, division\nfrom abc import ABCMeta, abstractmethod\nfrom JSAnimation.IPython_display import display_animation\nfrom matplotlib import animation\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.colors as colors\n\n\n# ===== Utility functions ===== #\n\nclass MidpointNormalize(colors.Normalize):\n\n def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):\n self.midpoint = midpoint\n colors.Normalize.__init__(self, vmin, vmax, clip)\n\n def __call__(self, value, clip=None):\n x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]\n return np.ma.masked_array(np.interp(value, x, y))\n\n\ndef parse_norm(data, cscale):\n \"\"\"\n Parser of string for color normalization\n \"\"\"\n if isinstance(cscale, str):\n if cscale == 'log': # log scale\n return mpl.colors.LogNorm()\n elif cscale == 'linear': # linear scale (default)\n return mpl.colors.Normalize()\n elif isinstance(cscale, dict):\n mp = cscale.pop('midpoint', 0.)\n cvmin = cscale.pop('vmin', np.min(data))\n cvmax = cscale.pop('vmax', np.max(data))\n return MidpointNormalize(vmin=cvmin, vmax=cvmax, midpoint=mp)\n\n\n# ===== Animator classes ===== #\n\nclass PlotAnimate(object):\n \"\"\"\n The animator metaclass\n \"\"\"\n \n __metaclass__ = ABCMeta\n \n @abstractmethod\n def animator(self, iframe):\n \"\"\"\n Construct animation\n \"\"\"\n return\n \n @abstractmethod\n def view_frame(self, iframe):\n \"\"\"\n Display a single frame of animation\n \"\"\"\n return\n \n @abstractmethod\n def view_anim(self, backend):\n \"\"\"\n Display the entire animation\n \"\"\"\n return\n \n\nclass LineAnimate(PlotAnimate):\n \"\"\"\n Class for 1D line animation\n \"\"\"\n \n def __init__(self, x, y, nframes, fixed='x', **kwargs):\n self.x = x\n self.y = y\n self.nframes = nframes\n self.fixed = fixed\n self.figsize = kwargs.get('figsize', (6,4))\n self.interval = kwargs.get('interval', 100)\n self.zorder = kwargs.get('zorder', 0)\n self.label = kwargs.get('label', '')\n self.legend = kwargs.get('legend', False)\n self.lgdloc = kwargs.get('legendloc', 'upper right')\n self.lgdttl = kwargs.get('legendtitle', '')\n self.linewidth = kwargs.get('linewidth', 2)\n self.linecolor = kwargs.get('linecolor', 'k')\n self.linestyle = kwargs.get('linestyle', '-')\n if {'fig', 'ax'} <= set(kwargs.keys()):\n self.f, self.ax = kwargs['fig'], kwargs['ax']\n else:\n self.f, self.ax = plt.subplots(figsize=self.figsize)\n\n def set_param(self, prop_statement):\n exec(\"self.\" + prop_statement)\n\n def frame(self, iframe):\n if self.fixed == 'x':\n self.lines, = self.ax.plot(self.x[0,:], self.y[iframe,:], linewidth=self.linewidth, \\\n color=self.linecolor, linestyle=self.linestyle, label=self.label, \\\n zorder=self.zorder)\n elif self.fixed == 'y':\n self.lines, = self.ax.plot(self.x[iframe,:], self.y[0,:], linewidth=self.linewidth, \\\n color=self.linecolor, linestyle=self.linestyle, label=self.label, \\\n zorder=self.zorder)\n elif self.fixed is None:\n self.lines, = self.ax.plot(self.x[iframe,:], self.y[iframe,:], linewidth=self.linewidth, \\\n color=self.linecolor, linestyle=self.linestyle, label=self.label, \\\n zorder=self.zorder)\n if self.legend == True:\n self.ax.legend(title=self.lgdttl, loc=self.lgdloc)\n return self.lines\n \n def view_frame(self, iframe):\n _ = self.frame(iframe)\n return\n \n def animator(self, iframe):\n if not hasattr(self, 'lines'):\n self.lines = self.frame(0)\n else:\n if self.fixed == 'x':\n self.lines.set_data(self.x, self.y[iframe,:])\n elif self.fixed == 'y':\n self.lines.set_data(self.x[iframe,:], self.y)\n elif self.fixed is None:\n self.lines.set_data(self.x[iframe,:], self.y[iframe,:])\n return self.f\n \n def view_anim(self, backend=None):\n anim = animation.FuncAnimation(self.f, self.animator,\\\n frames=self.nframes, interval=self.interval)\n if backend == 'JS':\n return display_animation(anim)\n elif backend is None:\n return self.anim\n \n\nclass MultiLineAnimate(LineAnimate):\n \"\"\"\n Create multiline animation\n \"\"\"\n \n def __init__(self, dataset, fixed, nframes, figsize=(6,4), **kwargs):\n self.f, self.ax = plt.subplots(figsize=figsize)\n self.dataset = dataset\n self.dscount = len(dataset)\n self.nframes = nframes\n self.labels = kwargs.get('labels', ['']*self.dscount)\n self.linewidths = kwargs.get('linewidths', [2]*self.dscount)\n self.linestyles = kwargs.get('linestyles', ['-']*self.dscount)\n self.linecolors = kwargs.get('linecolors', ['k']*self.dscount)\n self.zorders = kwargs.get('zorders', range(self.dscount))\n self.inst = []\n for i in range(self.dscount):\n self.inst.append(LineAnimate(*dataset[i], fixed=fixed, nframes=nframes,\\\n fig=self.f, ax=self.ax, linewidth=self.linewidths[i], linecolor=self.linecolors[i], \\\n linestyle=self.linestyles[i], label=self.labels[i], zorder=self.zorders[i], **kwargs))\n\n def set_inst_param(self, n_inst, prop_statement):\n exec(\"self.inst[n_inst].\" + prop_statement)\n\n def frame(self, iframe):\n for i in range(self.dscount):\n self.inst[i].frame(iframe)\n return\n \n def view_frame(self, iframe):\n _ = self.frame(iframe)\n return\n \n def animator(self, iframe):\n for i in range(self.dscount):\n self.inst[i].animator(iframe)\n return self.f\n \n def view_anim(self, backend=None):\n anim = animation.FuncAnimation(self.f, self.animator,\\\n frames=self.nframes, interval=self.inst[0].interval)\n if backend == 'JS':\n return display_animation(anim)\n elif backend is None:\n return self.anim\n \n\nclass ImageAnimate(PlotAnimate):\n \"\"\"\n Class for 2D image animation\n \"\"\"\n \n def __init__(self, data, axis=0, **kwargs):\n if np.ndim(data) != 3:\n raise Exception('The input array needs to have dimension 3.')\n else:\n self.axis = axis\n self.data = np.rollaxis(data, axis)\n self.nl, self.nr, self.nc = self.data.shape\n self.interval = kwargs.get('interval', 100)\n self.colorbar = kwargs.get('colorbar', False)\n self.cscale = kwargs.get('cscale', 'linear')\n self.nframes = kwargs.get('nframes', data.shape[axis])\n self.xaxis = kwargs.get('imx', range(self.nc))\n self.yaxis = kwargs.get('imy', range(self.nr))\n self.xlabel = kwargs.get('xlabel', '')\n self.ylabel = kwargs.get('ylabel', '')\n self.axlabelsize = kwargs.get('axlabelsize', 15)\n self.figsize = kwargs.get('figsize', (5,6))\n self.xgrid, self.ygrid = np.meshgrid(self.xaxis, self.yaxis)\n self.cmap = kwargs.get('cmap', 'terrain_r')\n self.text = kwargs.get('text', ['']*self.nl)\n self.textpos = kwargs.get('textpos', (0.9, 0.9))\n self.textsize = kwargs.get('textsize', 15)\n self.textcolor = kwargs.get('textcolor', 'k')\n self.vmin = kwargs.get('vmin', None)\n self.vmax = kwargs.get('vmax', None)\n self.zorder = kwargs.get('zorder', 0)\n if {'fig', 'ax'} <= set(kwargs.keys()):\n self.f, self.ax = kwargs['fig'], kwargs['ax']\n else:\n self.f, self.ax = plt.subplots(figsize=self.figsize)\n self.ax.set_xlabel(self.xlabel, fontsize=self.axlabelsize)\n self.ax.set_ylabel(self.ylabel, fontsize=self.axlabelsize)\n \n def set_param(self, prop_statement):\n exec(\"self.\" + prop_statement)\n \n def frame(self, iframe):\n imgframe = self.data[iframe,:,:]\n self.qmesh = self.ax.pcolormesh(self.xgrid, self.ygrid, imgframe, \\\n cmap=self.cmap, vmin=self.vmin, vmax=self.vmax, zorder=self.zorder)\n self.qmesh.set_norm(parse_norm(imgframe, self.cscale))\n self.txt = self.ax.text(self.textpos[0], self.textpos[1], self.text[iframe], \\\n fontsize=self.textsize, color=self.textcolor, \\\n zorder=1, transform=self.ax.transAxes)\n if self.colorbar == True:\n self.f.colorbar(self.qmesh)\n return self.f, self.qmesh\n \n def view_frame(self, iframe):\n _ = self.frame(iframe)\n return\n \n def animator(self, iframe):\n if not hasattr(self, 'qmesh'):\n self.qmesh = self.frame(0)[1]\n else:\n imgcurr = self.data[iframe,:,:]\n self.qmesh.set_array(imgcurr[:-1,:-1].flatten())\n self.txt.set_text(self.text[iframe])\n return self.f\n \n def view_anim(self, backend=None):\n self.anim = animation.FuncAnimation(self.f, self.animator,\\\n frames=self.nframes, interval=self.interval)\n if backend == 'JS':\n return display_animation(self.anim)\n elif backend is None:\n return self.anim\n\n\nclass MultiImageAnimate(ImageAnimate):\n \"\"\"\n Create connected multi-image animation\n \"\"\"\n \n def __init__(self, dataset, axis=0, nrow=1, ncol=2, figsize=(12, 4), **kwargs):\n self.f, axs = plt.subplots(nrow, ncol, figsize=figsize)\n if np.ndim(axs) > 1:\n self.axs = axs.flatten()\n else:\n self.axs = axs\n self.dataset = dataset\n self.dscount = len(dataset)\n self.inst = []\n for i in range(self.dscount):\n self.inst.append(ImageAnimate(dataset[i], axis=axis, \\\n fig=self.f, ax=self.axs[i], **kwargs))\n\n def set_inst_param(self, n_inst, prop_statement):\n exec(\"self.inst[n_inst].\" + prop_statement)\n \n def frame(self, iframe):\n for i in range(self.dscount):\n self.inst[i].frame(iframe)\n \n def view_frame(self, iframe):\n _ = self.frame(iframe)\n return\n \n def animator(self, iframe):\n for i in range(self.dscount):\n self.inst[i].animator(iframe)\n return self.f\n \n def view_anim(self, backend):\n self.anim = animation.FuncAnimation(self.f, self.animator,\\\n frames=self.inst[0].nframes, interval=self.inst[0].interval)\n if backend == 'JS':\n return display_animation(self.anim)\n elif backend is None:\n return self.anim\n\n \nclass CompositePlotAnimate(LineAnimate, ImageAnimate):\n \"\"\"\n Class for composite image and line animation\n \"\"\"\n \n def __init__(self, x, y, data, fixed='x', axis=0, **kwargs):\n \n # Construct figure and axes objects\n self.figsize = kwargs.get('figsize', (5,6))\n if {'fig', 'ax'} <= set(kwargs.keys()):\n self.f, self.ax = kwargs['fig'], kwargs['ax']\n else:\n self.f, self.ax = plt.subplots(figsize=self.figsize)\n \n # Initiate animation with existing figure and axes handles\n ImageAnimate.__init__(self, data, axis=axis, fig=self.f, \\\n ax=self.ax, figsize=self.figsize, zorder=0, **kwargs)\n LineAnimate.__init__(self, x, y, self.nframes, fixed=fixed, \\\n fig=self.f, ax=self.ax, figsize=self.figsize, zorder=1, **kwargs)\n \n def frame(self, iframe):\n self.lines = LineAnimate.frame(self, iframe)\n self.qmesh = ImageAnimate.frame(self, iframe)[1]\n return self.lines, self.qmesh\n \n def view_frame(self, iframe):\n _ = self.frame(iframe)\n return\n \n def animator(self, iframe):\n LineAnimate.animator(self, iframe)\n ImageAnimate.animator(self, iframe)\n return self.f\n \n def view_anim(self, backend=None):\n self.anim = animation.FuncAnimation(self.f, self.animator,\\\n frames=self.nframes, interval=self.interval)\n if backend == 'JS':\n return display_animation(self.anim)\n elif backend is None:\n return self.anim" ]
[ [ "numpy.rollaxis", "matplotlib.colors.LogNorm", "matplotlib.colors.Normalize.__init__", "numpy.min", "matplotlib.pyplot.subplots", "matplotlib.colors.Normalize", "numpy.ndim", "numpy.max", "numpy.interp", "matplotlib.animation.FuncAnimation", "numpy.meshgrid" ] ]
jingxiufenghua/rec-model
[ "cfc7b3fbd4ba2d9157a78938e6bdaeba7df82822" ]
[ "xDeepFM/train.py" ]
[ "\"\"\"\nCreated on August 21, 2020\nUpdated on May 19, 2021\n\ntrain xDeepFM model\n\n@author: Ziyao Geng(zggzy1996@163.com)\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.keras.losses import binary_crossentropy\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.metrics import AUC\n\nfrom model import xDeepFM\nfrom data_process.criteo import create_criteo_dataset\n\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\nif __name__ == '__main__':\n # =============================== GPU ==============================\n # gpu = tf.config.experimental.list_physical_devices(device_type='GPU')\n # print(gpu)\n # If you have GPU, and the value is GPU serial number.\n os.environ['CUDA_VISIBLE_DEVICES'] = '4'\n # ========================= Hyper Parameters =======================\n # you can modify your file path\n file = '../dataset/Criteo/train.txt'\n read_part = True\n sample_num = 5000000\n test_size = 0.2\n\n embed_dim = 8\n dnn_dropout = 0.5\n hidden_units = [256, 128, 64]\n cin_size = [128, 128]\n\n learning_rate = 0.001\n batch_size = 4096\n epochs = 10\n # ========================== Create dataset =======================\n feature_columns, train, test = create_criteo_dataset(file=file,\n embed_dim=embed_dim,\n read_part=read_part,\n sample_num=sample_num,\n test_size=test_size)\n train_X, train_y = train\n test_X, test_y = test\n # ============================Build Model==========================\n mirrored_strategy = tf.distribute.MirroredStrategy()\n with mirrored_strategy.scope():\n model = xDeepFM(feature_columns, hidden_units, cin_size)\n model.summary()\n # =========================Compile============================\n model.compile(loss=binary_crossentropy, optimizer=Adam(learning_rate=learning_rate),\n metrics=[AUC()])\n # ============================model checkpoint======================\n # check_path = 'save/xdeepfm_weights.epoch_{epoch:04d}.val_loss_{val_loss:.4f}.ckpt'\n # checkpoint = tf.keras.callbacks.ModelCheckpoint(check_path, save_weights_only=True,\n # verbose=1, period=5)\n # ===========================Fit==============================\n model.fit(\n train_X,\n train_y,\n epochs=epochs,\n callbacks=[EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True)], # checkpoint\n batch_size=batch_size,\n validation_split=0.1\n )\n # ===========================Test==============================\n print('test AUC: %f' % model.evaluate(test_X, test_y, batch_size=batch_size)[1])" ]
[ [ "tensorflow.keras.metrics.AUC", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.distribute.MirroredStrategy" ] ]