repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
system123/SOMatch
[ "6f10cf28f506998a5e430ccd3faab3076fe350d5" ]
[ "datasets/csv_ua_dataset.py" ]
[ "import torch\n\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\nfrom functools import partial\nfrom skimage.io import imread\nfrom glob import glob\nfrom skimage import exposure, img_as_float, util\nfrom utils.augmentation import Augmentation, cropCenter, toGrayscale, cropCorner, cutout\n\nimport numpy as np\nimport pandas as pd\nimport os\n\nAUG_PROBS = {\n \"fliplr\": 0.5,\n \"flipud\": 0.5,\n \"scale\": 0.1,\n \"scale_px\": (1.1, 1.1),\n \"translate\": 0,\n \"translate_perc\": (0.0, 0.0),\n \"rotate\": 0,\n \"rotate_angle\": (-5, 5),\n \"contrast\": 0.7,\n \"dropout\": 0.8\n}\n\nclass CSVUADataset(Dataset):\n def __init__(self, config):\n super()\n\n self.domain = config.domain if isinstance(config.domain, str) else \"opt_crop\"\n self.balance = config.balance if isinstance(config.balance, bool) else False\n self.thresh_loss = config.thresh_loss if 'thresh_loss' in config else [0, 12]\n self.thresh_l2 = config.thresh_l2 if 'thresh_l2' in config else [1, 2.5]\n self.named = config.named if isinstance(config.named, bool) else False\n self.normed = config.normed if isinstance(config.normed, bool) else True\n\n self.base_dir = config.base_dir\n self.df = pd.read_csv(os.path.join(self.base_dir, config.csv))\n\n dataset_name = os.path.splitext(os.path.basename(config.csv))[0].rsplit(\"_\", 1)[1]\n self.img_dir = os.path.join(self.base_dir, dataset_name)\n\n func = []\n\n if config.augment:\n # If it is true like then just use the default augmentation parameters - this keeps things backwards compatible\n if config.augment is True or len(config.augment) == 0:\n config.augment = AUG_PROBS.copy()\n\n self.augmentor = Augmentation(probs=config.augment)\n else:\n self.augmentor = None\n\n func.append(transforms.ToTensor())\n self.transforms = transforms.Compose(func)\n\n self._label_and_prune(self.thresh_l2[0], self.thresh_loss[0], self.thresh_l2[1], self.thresh_loss[1])\n\n def _label_and_prune(self, l2_pos=1, loss_pos=2.2, l2_neg=2.5, loss_neg=1.2):\n self.df[\"label\"] = np.nan\n # Label positive samples\n self.df.loc[(self.df.l2 <= l2_pos) & (self.df.nlog_match_loss >= loss_pos), \"label\"] = 1\n self.df.loc[(self.df.l2 >= l2_neg) & (self.df.nlog_match_loss <= loss_neg), \"label\"] = 0\n\n # Remove all unlabeled points\n self.df.dropna(axis=0, inplace=True)\n\n if self.balance:\n limit = min( sum(self.df[\"label\"] == 0), sum(self.df[\"label\"] == 1) )\n limited_df = self.df.groupby(\"label\").apply( lambda x: x.sample(n=limit) )\n limited_df.reset_index(drop=True, inplace=True)\n self.df = limited_df.sample(frac=1).reset_index(drop=True)\n \n def _get_filepath(self, row, img=\"sar\"):\n return f\"{self.img_dir}/['{row.city}']_['{row.wkt}']_{img}.npy\"\n\n def _load_image(self, row, domain=None):\n data = np.load(self._get_filepath(row, img=domain))[0,]\n # Put in HxWxC format so data augmentation works\n return np.ascontiguousarray(data.transpose((1,2,0)))\n\n def normalize(self, img):\n return (img - img.min())/(img.ptp() + 1e-6)\n\n def _get_raw_triplet(self, row, crop=False):\n suffix = \"_crop\" if crop else \"\"\n opt = (self.transforms(self._load_image(row, f\"opt{suffix}\")).numpy().transpose((1,2,0))*255).astype(np.uint8)\n sar = (self.normalize(self.transforms(self._load_image(row, f\"sar{suffix}\")).numpy().transpose((1,2,0)))*255).astype(np.uint8)\n y = np.ones_like(sar) * row.label\n return sar, opt, y, {\"sar\": f\"{row.city}_{row.name}_sar.png\", \"opt\": f\"{row.city}_{row.name}_opt.png\", \"label\": row.label}\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, index):\n row = self.df.iloc[index]\n x = self._load_image(row, self.domain)\n\n name = {\"WKT\": row.wkt, \"city\": row.city}\n\n if self.augmentor:\n self.augmentor.refresh_random_state()\n x = self.augmentor(x)\n\n if \"sar\" in self.domain and self.normed:\n x = self.normalize(x)\n\n if \"hm\" in self.domain and self.normed:\n x = self.normalize(x)\n\n x = self.transforms(x.copy()).float()\n\n y = np.array([row.label])\n\n if self.named:\n return x, y, name\n else:\n return x, y\n \n" ]
[ [ "numpy.array", "numpy.ones_like" ] ]
jansforte/Inteligencia-Artificial
[ "f5bf8f11f5e58b4a8b7becf049479534e48c75a5" ]
[ "redes neuronales/red_neuronal_basico.py" ]
[ "import numpy as np\nfrom matplotlib import pyplot as plt\n\n#función sigmoidea\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\n#Derivada de la sigmoidea\ndef sigmoid_der(x):\n return sigmoid(x)*(1-sigmoid(x))\n\n#primero generamos aleatoriamente 100 puntos espaciados linealmente entre -10 y 10\ninput = np.linspace(-10, 10, 100)\n\n#Dibujamos los valores de entrada en función de los valores sigmoidales, la marcamos de color rojo\nplt.plot(input, sigmoid(input), c=\"r\")\nplt.show()\n\n#Este es el conjunto de características original\nfeature_set = np.array([[0,1,0],[0,0,1],[1,0,0],[1,1,0],[1,1,1]])\n#el label son los valores observaods, son los datos que definen que la persona tiene diabetes\nlabels = np.array([[1,0,0,1,1]]) \n#\nlabels = labels.reshape(5,1)\n\n#el seed se aplica para obtener los mismos valores aleatorios cada vez que se ejecute este archivo\nnp.random.seed(42)\n#genera una matriz de 3x1\nweights = np.random.rand(3,1)\nbias = np.random.rand(1)\n#establecemos la tasa de aprendizaje en 5%\nlr = 0.05\n\n#aquí entrenamos el algoritmo de nuestros datos 20 mil veces\nfor epoch in range(20000):\n inputs = feature_set\n\n # feedforward step1\n #aquí se hace el producto punto del conjunto original con el peso + el sesgo para generar el escalar\n XW = np.dot(feature_set, weights) + bias\n\n #feedforward step2\n #se pasa el producto escalar para obtener la sigmoidea del algoritmo\n z = sigmoid(XW)\n\n\n # backpropagation step 1\n # encontramos el error al restarle las etiquetas a la sigmoidea\n error = z - labels\n\n #aqui vemos como va mermando el error\n print(error.sum())\n\n # backpropagation step 2\n #al realizar la derivada da 2(z-labels), el 2 al ser constante se ovbia\n #quedando tal que la derivada del cost respecto derivada predicha es el error\n dcost_dpred = error\n #la derivada predicha respecto a la derivada sigmoidea será la derivada de la sigmoidea\n dpred_dz = sigmoid_der(z)\n\n #el producto de la derevida del costo en funcion de lo predicho por la derivada de\n #el predicho respecto a la derivada sigmoidea\n z_delta = dcost_dpred * dpred_dz\n #Realizamos la transpuesta de los conjuntos originales\n inputs = feature_set.T\n #multiplicamos la variable de aprendizaje por de la transpuesta de nuestros datos originales\n #y el z_delta\n #nos da los pesos, y al multiplicar por la variable de aprendizaje hacemos que aumente\n #la velocidad de la convergencia\n weights -= lr * np.dot(inputs, z_delta)\n\n #por último calculamos el bias (b) para tener la funcion: z=x1w1+x2w2+x3w3+b\n for num in z_delta:\n bias -= lr * num\n\n#predecimos el valor en el caso que una persona fuma, para hayar la probabilidad de que tenga diabetes o no\nsingle_point = np.array([1,0,0])\n#hayamos la sigmoidea del producto punto de nuestra persona con el peso que se calculó y le sumamos el bias\nresult = sigmoid(np.dot(single_point, weights) + bias)\n#por pultimo mostramos la probabilidad de tener o no diabetes\nprint(result)\n\nsingle_point = np.array([0,1,0])\nresult = sigmoid(np.dot(single_point, weights) + bias)\nprint(result)" ]
[ [ "numpy.array", "numpy.dot", "numpy.random.rand", "numpy.random.seed", "numpy.exp", "matplotlib.pyplot.show", "numpy.linspace" ] ]
eva5covergence/EVA5_AI_Projects
[ "7052373c52b6b9901cd0bc05a4758dd4b63f7480", "7052373c52b6b9901cd0bc05a4758dd4b63f7480" ]
[ "MultiObjectiveModel_YMPNet_Pavan/datasets/inference_dataset.py", "MultiObjectiveModel_YMPNet_Pavan/planer_models/model.py" ]
[ "\"\"\"\nCopyright (C) 2019 NVIDIA Corporation. All rights reserved.\nLicensed under the CC BY-NC-SA 4.0 license\n(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).\n\"\"\"\n\nimport numpy as np\nimport glob\nimport cv2\nimport os\n\nfrom utils import *\nfrom datasets.plane_dataset import *\n\nclass InferenceDataset(Dataset):\n \"\"\" This class creates a dataloader for custom images \"\"\"\n\n def __init__(self, options, config, image_list, camera, random=False):\n \"\"\" camera: [fx, fy, cx, cy, image_width, image_height, dummy, dummy, dummy, dummy] \"\"\"\n \n self.options = options\n self.config = config\n self.random = random\n self.camera = camera\n self.imagePaths = image_list\n self.anchors = generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n config.BACKBONE_SHAPES,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n return\n\n def __getitem__(self, index):\n t = int(time.time() * 1000000)\n np.random.seed(((t & 0xff000000) >> 24) +\n ((t & 0x00ff0000) >> 8) +\n ((t & 0x0000ff00) << 8) +\n ((t & 0x000000ff) << 24))\n if self.random:\n index = np.random.randint(len(self.imagePaths))\n else:\n index = index % len(self.imagePaths)\n pass\n\n imagePath = self.imagePaths[index]\n image = cv2.imread(imagePath)\n extrinsics = np.eye(4, dtype=np.float32)\n\n if isinstance(self.camera, list):\n if isinstance(self.camera[index], str):\n camera = np.zeros(6)\n with open(self.camera[index], 'r') as f:\n for line in f:\n values = [float(token.strip()) for token in line.split(' ') if token.strip() != '']\n for c in range(6):\n camera[c] = values[c]\n continue\n break\n pass\n else:\n camera = self.camera[index]\n pass\n elif len(self.camera) == 6:\n camera = self.camera\n else:\n assert(False)\n pass\n\n image = cv2.resize(image, (640, 480), interpolation=cv2.INTER_LINEAR)\n camera[[0, 2, 4]] *= 640.0 / camera[4] \n camera[[1, 3, 5]] *= 480.0 / camera[5]\n\n ## The below codes just fill in dummy values for all other data entries which are not used for inference. You can ignore everything except some preprocessing operations on \"image\".\n depth = np.zeros((self.config.IMAGE_MIN_DIM, self.config.IMAGE_MAX_DIM), dtype=np.float32)\n segmentation = np.zeros((self.config.IMAGE_MIN_DIM, self.config.IMAGE_MAX_DIM), dtype=np.int32)\n\n\n planes = np.zeros((segmentation.max() + 1, 3))\n\n instance_masks = []\n class_ids = []\n parameters = []\n\n if len(planes) > 0:\n if 'joint' in self.config.ANCHOR_TYPE:\n distances = np.linalg.norm(np.expand_dims(planes, 1) - self.config.ANCHOR_PLANES, axis=-1)\n plane_anchors = distances.argmin(-1)\n elif self.config.ANCHOR_TYPE == 'Nd':\n plane_offsets = np.linalg.norm(planes, axis=-1)\n plane_normals = planes / np.expand_dims(plane_offsets, axis=-1)\n distances_N = np.linalg.norm(np.expand_dims(plane_normals, 1) - self.config.ANCHOR_NORMALS, axis=-1)\n normal_anchors = distances_N.argmin(-1)\n distances_d = np.abs(np.expand_dims(plane_offsets, -1) - self.config.ANCHOR_OFFSETS)\n offset_anchors = distances_d.argmin(-1)\n elif self.config.ANCHOR_TYPE in ['normal', 'patch']:\n plane_offsets = np.linalg.norm(planes, axis=-1)\n plane_normals = planes / np.maximum(np.expand_dims(plane_offsets, axis=-1), 1e-4)\n distances_N = np.linalg.norm(np.expand_dims(plane_normals, 1) - self.config.ANCHOR_NORMALS, axis=-1)\n normal_anchors = distances_N.argmin(-1)\n elif self.config.ANCHOR_TYPE == 'normal_none':\n plane_offsets = np.linalg.norm(planes, axis=-1)\n plane_normals = planes / np.expand_dims(plane_offsets, axis=-1)\n pass\n pass\n\n for planeIndex, plane in enumerate(planes):\n m = segmentation == planeIndex\n if m.sum() < 1:\n continue\n instance_masks.append(m)\n if self.config.ANCHOR_TYPE == 'none':\n class_ids.append(1)\n parameters.append(np.concatenate([plane, np.zeros(1)], axis=0))\n elif 'joint' in self.config.ANCHOR_TYPE:\n class_ids.append(plane_anchors[planeIndex] + 1)\n residual = plane - self.config.ANCHOR_PLANES[plane_anchors[planeIndex]]\n parameters.append(np.concatenate([residual, np.array([0, plane_info[planeIndex][-1]])], axis=0))\n elif self.config.ANCHOR_TYPE == 'Nd':\n class_ids.append(normal_anchors[planeIndex] * len(self.config.ANCHOR_OFFSETS) + offset_anchors[planeIndex] + 1)\n normal = plane_normals[planeIndex] - self.config.ANCHOR_NORMALS[normal_anchors[planeIndex]]\n offset = plane_offsets[planeIndex] - self.config.ANCHOR_OFFSETS[offset_anchors[planeIndex]]\n parameters.append(np.concatenate([normal, np.array([offset])], axis=0))\n elif self.config.ANCHOR_TYPE == 'normal':\n class_ids.append(normal_anchors[planeIndex] + 1)\n normal = plane_normals[planeIndex] - self.config.ANCHOR_NORMALS[normal_anchors[planeIndex]]\n parameters.append(np.concatenate([normal, np.zeros(1)], axis=0))\n elif self.config.ANCHOR_TYPE == 'normal_none':\n class_ids.append(1)\n normal = plane_normals[planeIndex]\n parameters.append(np.concatenate([normal, np.zeros(1)], axis=0))\n else:\n assert(False)\n pass\n continue\n\n parameters = np.array(parameters)\n mask = np.stack(instance_masks, axis=2)\n class_ids = np.array(class_ids, dtype=np.int32)\n\n image, image_metas, gt_class_ids, gt_boxes, gt_masks, gt_parameters = load_image_gt(self.config, index, image, depth, mask, class_ids, parameters, augment=False)\n ## RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, self.anchors,\n gt_class_ids, gt_boxes, self.config)\n\n ## If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > self.config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), self.config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n gt_parameters = gt_parameters[ids]\n pass\n\n ## Add to batch\n rpn_match = rpn_match[:, np.newaxis]\n image = utils.mold_image(image.astype(np.float32), self.config)\n\n depth = np.concatenate([np.zeros((80, 640)), depth, np.zeros((80, 640))], axis=0).astype(np.float32)\n segmentation = np.concatenate([np.full((80, 640), fill_value=-1), segmentation, np.full((80, 640), fill_value=-1)], axis=0).astype(np.float32)\n\n data_pair = [image.transpose((2, 0, 1)).astype(np.float32), image_metas, rpn_match.astype(np.int32), rpn_bbox.astype(np.float32), gt_class_ids.astype(np.int32), gt_boxes.astype(np.float32), gt_masks.transpose((2, 0, 1)).astype(np.float32), gt_parameters[:, :-1].astype(np.float32), depth.astype(np.float32), extrinsics.astype(np.float32), planes.astype(np.float32), segmentation.astype(np.int64), gt_parameters[:, -1].astype(np.int32)]\n data_pair = data_pair + data_pair\n\n data_pair.append(np.zeros(7, np.float32))\n\n data_pair.append(planes)\n data_pair.append(planes)\n data_pair.append(np.zeros((len(planes), len(planes))))\n data_pair.append(camera.astype(np.float32))\n return data_pair\n\n def __len__(self):\n return len(self.imagePaths)\n", "\"\"\"\nCopyright (c) 2017 Matterport, Inc.\nCopyright (C) 2019 NVIDIA Corporation. All rights reserved.\nLicensed under the CC BY-NC-SA 4.0 license\n(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).\n\"\"\"\n\nimport datetime\nimport math\nimport os\nimport random\nimport re\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.autograd import Variable\n\nimport utils\nfrom nms.nms_wrapper import nms\nfrom roialign.roi_align.crop_and_resize import CropAndResizeFunction\nimport cv2\nfrom models.modules import *\nfrom utils import *\n\n############################################################\n# Pytorch Utility Functions\n############################################################\n\ndef unique1d(tensor):\n if tensor.size()[0] == 0 or tensor.size()[0] == 1:\n return tensor\n tensor = tensor.sort()[0]\n unique_bool = tensor[1:] != tensor [:-1]\n first_element = Variable(torch.ByteTensor([True]), requires_grad=False)\n if tensor.is_cuda:\n first_element = first_element.cuda()\n unique_bool = torch.cat((first_element, unique_bool),dim=0)\n return tensor[unique_bool.data]\n\ndef intersect1d(tensor1, tensor2):\n aux = torch.cat((tensor1, tensor2),dim=0)\n aux = aux.sort()[0]\n return aux[:-1][(aux[1:] == aux[:-1]).data]\n\ndef log2(x):\n \"\"\"Implementatin of Log2. Pytorch doesn't have a native implemenation.\"\"\"\n ln2 = Variable(torch.log(torch.FloatTensor([2.0])), requires_grad=False)\n if x.is_cuda:\n ln2 = ln2.cuda()\n return torch.log(x) / ln2\n\nclass SamePad2d(nn.Module):\n \"\"\"Mimics tensorflow's 'SAME' padding.\n \"\"\"\n\n def __init__(self, kernel_size, stride):\n super(SamePad2d, self).__init__()\n self.kernel_size = torch.nn.modules.utils._pair(kernel_size)\n self.stride = torch.nn.modules.utils._pair(stride)\n\n def forward(self, input):\n in_width = input.size()[2]\n in_height = input.size()[3]\n out_width = math.ceil(float(in_width) / float(self.stride[0]))\n out_height = math.ceil(float(in_height) / float(self.stride[1]))\n pad_along_width = ((out_width - 1) * self.stride[0] +\n self.kernel_size[0] - in_width)\n pad_along_height = ((out_height - 1) * self.stride[1] +\n self.kernel_size[1] - in_height)\n pad_left = math.floor(pad_along_width / 2)\n pad_top = math.floor(pad_along_height / 2)\n pad_right = pad_along_width - pad_left\n pad_bottom = pad_along_height - pad_top\n return F.pad(input, (pad_left, pad_right, pad_top, pad_bottom), 'constant', 0)\n\n def __repr__(self):\n return self.__class__.__name__\n\n\n############################################################\n# FPN Graph\n############################################################\n\nclass FPN(nn.Module):\n def __init__(self, C1, C2, C3, C4, C5, out_channels, bilinear_upsampling=False):\n super(FPN, self).__init__()\n self.out_channels = out_channels\n self.bilinear_upsampling = bilinear_upsampling\n self.C1 = C1\n self.C2 = C2\n self.C3 = C3\n self.C4 = C4\n self.C5 = C5\n self.P6 = nn.MaxPool2d(kernel_size=1, stride=2)\n self.P5_conv1 = nn.Conv2d(2048, self.out_channels, kernel_size=1, stride=1)\n self.P5_conv2 = nn.Sequential(\n SamePad2d(kernel_size=3, stride=1),\n nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),\n )\n self.P4_conv1 = nn.Conv2d(1024, self.out_channels, kernel_size=1, stride=1)\n self.P4_conv2 = nn.Sequential(\n SamePad2d(kernel_size=3, stride=1),\n nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),\n )\n self.P3_conv1 = nn.Conv2d(512, self.out_channels, kernel_size=1, stride=1)\n self.P3_conv2 = nn.Sequential(\n SamePad2d(kernel_size=3, stride=1),\n nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),\n )\n self.P2_conv1 = nn.Conv2d(256, self.out_channels, kernel_size=1, stride=1)\n self.P2_conv2 = nn.Sequential(\n SamePad2d(kernel_size=3, stride=1),\n nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, stride=1),\n )\n\n def forward(self, x):\n #print(\"x\",x.size())\n x = self.C1(x)\n x = self.C2(x)\n c2_out = x\n #print(\"c2_out\",c2_out.size())\n x = self.C3(x)\n c3_out = x\n #print(\"c3_out\",c3_out.size())\n x = self.C4(x)\n c4_out = x\n #print(\"c4_out\",c4_out.size())\n x = self.C5(x)\n p5_out = self.P5_conv1(x)\n #print(\"p5_out\",p5_out.size())\n \n if self.bilinear_upsampling:\n p4_out = self.P4_conv1(c4_out) + F.upsample(p5_out, scale_factor=2, mode='bilinear')\n p3_out = self.P3_conv1(c3_out) + F.upsample(p4_out, scale_factor=2, mode='bilinear')\n p2_out = self.P2_conv1(c2_out) + F.upsample(p3_out, scale_factor=2, mode='bilinear')\n else:\n p4_out = self.P4_conv1(c4_out) + F.upsample(p5_out, scale_factor=2)\n p3_out = self.P3_conv1(c3_out) + F.upsample(p4_out, scale_factor=2)\n p2_out = self.P2_conv1(c2_out) + F.upsample(p3_out, scale_factor=2)\n pass\n\n p5_out = self.P5_conv2(p5_out)\n p4_out = self.P4_conv2(p4_out)\n p3_out = self.P3_conv2(p3_out)\n p2_out = self.P2_conv2(p2_out)\n\n ## P6 is used for the 5th anchor scale in RPN. Generated by\n ## subsampling from P5 with stride of 2.\n p6_out = self.P6(p5_out)\n\n # print(p6_out[0].size())\n # print(p5_out[0].size())\n # print(p4_out[0].size())\n # print(p3_out[0].size())\n # print(p2_out[0].size())\n # print(\"done with p's\")\n\n return [p2_out, p3_out, p4_out, p5_out, p6_out]\n\n\n############################################################\n# Resnet Graph\n############################################################\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride)\n self.bn1 = nn.BatchNorm2d(planes, eps=0.001, momentum=0.01)\n self.padding2 = SamePad2d(kernel_size=3, stride=1)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3)\n self.bn2 = nn.BatchNorm2d(planes, eps=0.001, momentum=0.01)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1)\n self.bn3 = nn.BatchNorm2d(planes * 4, eps=0.001, momentum=0.01)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.padding2(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out = out + residual\n out = self.relu(out)\n\n return out\n\nclass ResNet(nn.Module):\n\n def __init__(self, architecture, stage5=False, numInputChannels=3):\n super(ResNet, self).__init__()\n assert architecture in [\"resnet50\", \"resnet101\"]\n self.inplanes = 64\n self.layers = [3, 4, {\"resnet50\": 6, \"resnet101\": 23}[architecture], 3]\n self.block = Bottleneck\n self.stage5 = stage5\n\n self.C1 = nn.Sequential(\n nn.Conv2d(numInputChannels, 64, kernel_size=7, stride=2, padding=3),\n nn.BatchNorm2d(64, eps=0.001, momentum=0.01),\n nn.ReLU(inplace=True),\n SamePad2d(kernel_size=3, stride=2),\n nn.MaxPool2d(kernel_size=3, stride=2),\n )\n self.C2 = self.make_layer(self.block, 64, self.layers[0])\n self.C3 = self.make_layer(self.block, 128, self.layers[1], stride=2)\n self.C4 = self.make_layer(self.block, 256, self.layers[2], stride=2)\n if self.stage5:\n self.C5 = self.make_layer(self.block, 512, self.layers[3], stride=2)\n else:\n self.C5 = None\n\n def forward(self, x):\n x = self.C1(x)\n x = self.C2(x)\n x = self.C3(x)\n x = self.C4(x)\n x = self.C5(x)\n return x\n\n\n def stages(self):\n return [self.C1, self.C2, self.C3, self.C4, self.C5]\n\n def make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride),\n nn.BatchNorm2d(planes * block.expansion, eps=0.001, momentum=0.01),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, 4] where each row is y1, x1, y2, x2\n deltas: [N, 4] where each row is [dy, dx, log(dh), log(dw)]\n \"\"\"\n ## Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n ## Apply deltas\n center_y = center_y + deltas[:, 0] * height\n center_x = center_x + deltas[:, 1] * width\n height = height * torch.exp(deltas[:, 2])\n width = width * torch.exp(deltas[:, 3])\n ## Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = torch.stack([y1, x1, y2, x2], dim=1)\n return result\n\ndef clip_boxes(boxes, window):\n \"\"\"\n boxes: [N, 4] each col is y1, x1, y2, x2\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n boxes = torch.stack( \\\n [boxes[:, 0].clamp(float(window[0]), float(window[2])),\n boxes[:, 1].clamp(float(window[1]), float(window[3])),\n boxes[:, 2].clamp(float(window[0]), float(window[2])),\n boxes[:, 3].clamp(float(window[1]), float(window[3]))], 1)\n return boxes\n\ndef proposal_layer(inputs, proposal_count, nms_threshold, anchors, config=None):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinment detals to anchors.\n\n Inputs:\n rpn_probs: [batch, anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n #print(\"proposal count\", proposal_count)\n ## Currently only supports batchsize 1\n inputs[0] = inputs[0].squeeze(0)\n inputs[1] = inputs[1].squeeze(0)\n\n ## Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, 1]\n #print('scores',scores.size())\n\n ## Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n #print('deltas',deltas.size())\n\n std_dev = Variable(torch.from_numpy(np.reshape(config.RPN_BBOX_STD_DEV, [1, 4])).float(), requires_grad=False)\n if config.GPU_COUNT:\n std_dev = std_dev.cuda()\n deltas = deltas * std_dev\n #print('deltas2', deltas.size())\n ## Improve performance by trimming to top anchors by score\n ## and doing the rest on the smaller subset.\n #\n # print(\"anchor size\", anchors.size())\n # print(\"anchor size\", len(anchors))\n\n pre_nms_limit = min(6000, anchors.size()[0])\n scores, order = scores.sort(descending=True)\n order = order[:pre_nms_limit]\n scores = scores[:pre_nms_limit]\n deltas = deltas[order.data, :]\n anchors = anchors[order.data, :]\n\n #print(anchors.size())\n ## Apply deltas to anchors to get refined anchors.\n ## [batch, N, (y1, x1, y2, x2)]\n boxes = apply_box_deltas(anchors, deltas)\n #print('boxes', boxes.size())\n\n ## Clip to image boundaries. [batch, N, (y1, x1, y2, x2)]\n height, width = config.IMAGE_SHAPE[:2]\n window = np.array([0, 0, height, width]).astype(np.float32)\n boxes = clip_boxes(boxes, window)\n #print('boxes2', boxes.size())\n\n ## Filter out small boxes\n ## According to Xinlei Chen's paper, this reduces detection accuracy\n ## for small objects, so we're skipping it.\n\n ## Non-max suppression\n keep = nms(torch.cat((boxes, scores.unsqueeze(1)), 1).data, nms_threshold)\n #print('keep0', keep.size())\n keep = keep[:proposal_count]\n #print('keep', keep.size())\n boxes = boxes[keep, :]\n #print('boxes3', boxes.size())\n \n \n ## Normalize dimensions to range of 0 to 1.\n norm = Variable(torch.from_numpy(np.array([height, width, height, width])).float(), requires_grad=False)\n if config.GPU_COUNT:\n norm = norm.cuda()\n normalized_boxes = boxes / norm\n #print('norm_boxes', normalized_boxes.size())\n ## Add back batch dimension\n normalized_boxes = normalized_boxes.unsqueeze(0)\n\n #print(normalized_boxes.size())\n\n return normalized_boxes\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef pyramid_roi_align(inputs, pool_size, image_shape):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_size: [height, width] of the output pooled regions. Usually [7, 7]\n - image_shape: [height, width, channels]. Shape of input image in pixels\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates.\n - Feature maps: List of feature maps from different levels of the pyramid.\n Each is [batch, channels, height, width]\n\n Output:\n Pooled regions in the shape: [num_boxes, height, width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n # print(\"inputs\",len(inputs))\n # print(\"inputs0\", len(inputs[0]))\n # print(\"inputs00\", inputs[0].size())\n # print(\"inputs1\", len(inputs[1]))\n\n ## Currently only supports batchsize 1\n for i in range(len(inputs)):\n inputs[i] = inputs[i].squeeze(0)\n\n ## Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n # print('boxes', len(boxes), boxes.size())\n\n ## Feature Maps. List of feature maps from different level of the\n ## feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[1:]\n # print('feature_maps', len(feature_maps), feature_maps[0].size())\n\n ## Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = boxes.chunk(4, dim=1)\n h = y2 - y1\n w = x2 - x1\n\n ## Equation 1 in the Feature Pyramid Networks paper. Account for\n ## the fact that our coordinates are normalized here.\n ## e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = Variable(torch.FloatTensor([float(image_shape[0]*image_shape[1])]), requires_grad=False)\n if boxes.is_cuda:\n image_area = image_area.cuda()\n roi_level = 4 + log2(torch.sqrt(h*w)/(224.0/torch.sqrt(image_area)))\n roi_level = roi_level.round().int()\n roi_level = roi_level.clamp(2, 5)\n\n\n ## Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = roi_level==level\n if not ix.any():\n continue\n ix = torch.nonzero(ix)[:,0]\n level_boxes = boxes[ix.data, :]\n\n ## Keep track of which box is mapped to which level\n box_to_level.append(ix.data)\n\n ## Stop gradient propogation to ROI proposals\n level_boxes = level_boxes.detach()\n\n ## Crop and Resize\n ## From Mask R-CNN paper: \"We sample four regular locations, so\n ## that we can evaluate either max or average pooling. In fact,\n ## interpolating only a single value at each bin center (without\n ## pooling) is nearly as effective.\"\n #\n ## Here we use the simplified approach of a single value per bin,\n ## which is how it's done in tf.crop_and_resize()\n ## Result: [batch * num_boxes, pool_height, pool_width, channels]\n ind = Variable(torch.zeros(level_boxes.size()[0]),requires_grad=False).int()\n if level_boxes.is_cuda:\n ind = ind.cuda()\n feature_maps[i] = feature_maps[i].unsqueeze(0) #CropAndResizeFunction needs batch dimension\n pooled_features = CropAndResizeFunction(pool_size, pool_size, 0)(feature_maps[i], level_boxes, ind)\n pooled.append(pooled_features)\n\n ## Pack pooled features into one tensor\n pooled = torch.cat(pooled, dim=0)\n ## Pack box_to_level mapping into one array and add another\n ## column representing the order of pooled boxes\n box_to_level = torch.cat(box_to_level, dim=0)\n\n ## Rearrange pooled features to match the order of the original boxes\n _, box_to_level = torch.sort(box_to_level)\n pooled = pooled[box_to_level, :, :]\n\n return pooled\n\ndef coordinates_roi(inputs, pool_size, image_shape):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_size: [height, width] of the output pooled regions. Usually [7, 7]\n - image_shape: [height, width, channels]. Shape of input image in pixels\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates.\n - Feature maps: List of feature maps from different levels of the pyramid.\n Each is [batch, channels, height, width]\n\n Output:\n Pooled regions in the shape: [num_boxes, height, width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n # print('inputs', type(inputs), len(inputs))\n # print(inputs[0].size())\n # print(inputs[1].size())\n\n ## Currently only supports batchsize 1\n for i in range(len(inputs)):\n inputs[i] = inputs[i].squeeze(0)\n\n ## Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n # print('boxes', boxes.size())\n\n ## Feature Maps. List of feature maps from different level of the\n ## feature pyramid. Each is [batch, height, width, channels]\n cooridnates = inputs[1]\n # print(\"cordi\", cooridnates.size())\n\n ## Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = boxes.chunk(4, dim=1)\n h = y2 - y1\n w = x2 - x1\n\n ## Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n ## Stop gradient propogation to ROI proposals\n boxes = boxes.detach()\n\n ind = Variable(torch.zeros(boxes.size()[0]),requires_grad=False).int()\n if boxes.is_cuda:\n ind = ind.cuda()\n cooridnates = cooridnates.unsqueeze(0) ## CropAndResizeFunction needs batch dimension\n pooled_features = CropAndResizeFunction(pool_size, pool_size, 0)(cooridnates, boxes, ind)\n\n return pooled_features\n\n\n############################################################\n## Detection Target Layer\n############################################################\ndef bbox_overlaps(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n ## 1. Tile boxes2 and repeate boxes1. This allows us to compare\n ## every boxes1 against every boxes2 without loops.\n ## TF doesn't have an equivalent to np.repeate() so simulate it\n ## using tf.tile() and tf.reshape.\n boxes1_repeat = boxes2.size()[0]\n boxes2_repeat = boxes1.size()[0]\n boxes1 = boxes1.repeat(1,boxes1_repeat).view(-1,4)\n boxes2 = boxes2.repeat(boxes2_repeat,1)\n\n ## 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = boxes1.chunk(4, dim=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = boxes2.chunk(4, dim=1)\n y1 = torch.max(b1_y1, b2_y1)[:, 0]\n x1 = torch.max(b1_x1, b2_x1)[:, 0]\n y2 = torch.min(b1_y2, b2_y2)[:, 0]\n x2 = torch.min(b1_x2, b2_x2)[:, 0]\n zeros = Variable(torch.zeros(y1.size()[0]), requires_grad=False)\n if y1.is_cuda:\n zeros = zeros.cuda()\n intersection = torch.max(x2 - x1, zeros) * torch.max(y2 - y1, zeros)\n\n ## 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area[:,0] + b2_area[:,0] - intersection\n\n ## 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = iou.view(boxes2_repeat, boxes1_repeat)\n\n return overlaps\n\ndef detection_target_layer(proposals, gt_class_ids, gt_boxes, gt_masks, gt_parameters, config):\n \"\"\"Subsamples proposals and generates target box refinment, class_ids,\n and masks for each.\n\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,\n (dy, dx, log(dh), log(dw), class_id)]\n Class-specific bbox refinments.\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n \"\"\"\n\n ## Currently only supports batchsize 1\n proposals = proposals.squeeze(0)\n gt_class_ids = gt_class_ids.squeeze(0)\n gt_boxes = gt_boxes.squeeze(0)\n gt_masks = gt_masks.squeeze(0)\n gt_parameters = gt_parameters.squeeze(0)\n no_crowd_bool = Variable(torch.ByteTensor(proposals.size()[0]*[True]), requires_grad=False)\n if config.GPU_COUNT:\n no_crowd_bool = no_crowd_bool.cuda()\n\n ## Compute overlaps matrix [proposals, gt_boxes]\n overlaps = bbox_overlaps(proposals, gt_boxes)\n\n ## Determine postive and negative ROIs\n roi_iou_max = torch.max(overlaps, dim=1)[0]\n\n ## 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = roi_iou_max >= 0.5\n #print('positive count', positive_roi_bool.sum())\n\n ## Subsample ROIs. Aim for 33% positive\n ## Positive ROIs\n if positive_roi_bool.sum() > 0:\n positive_indices = torch.nonzero(positive_roi_bool)[:, 0]\n\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n rand_idx = torch.randperm(positive_indices.size()[0])\n rand_idx = rand_idx[:positive_count]\n if config.GPU_COUNT:\n rand_idx = rand_idx.cuda()\n positive_indices = positive_indices[rand_idx]\n positive_count = positive_indices.size()[0]\n positive_rois = proposals[positive_indices.data,:]\n\n ## Assign positive ROIs to GT boxes.\n positive_overlaps = overlaps[positive_indices.data,:]\n roi_gt_box_assignment = torch.max(positive_overlaps, dim=1)[1]\n roi_gt_boxes = gt_boxes[roi_gt_box_assignment.data,:]\n roi_gt_class_ids = gt_class_ids[roi_gt_box_assignment.data]\n roi_gt_parameters = gt_parameters[roi_gt_box_assignment.data]\n \n ## Compute bbox refinement for positive ROIs\n deltas = Variable(utils.box_refinement(positive_rois.data, roi_gt_boxes.data), requires_grad=False)\n std_dev = Variable(torch.from_numpy(config.BBOX_STD_DEV).float(), requires_grad=False)\n if config.GPU_COUNT:\n std_dev = std_dev.cuda()\n deltas /= std_dev\n\n ## Assign positive ROIs to GT masks\n roi_masks = gt_masks[roi_gt_box_assignment.data]\n\n ## Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n ## Transform ROI corrdinates from normalized image space\n ## to normalized mini-mask space.\n y1, x1, y2, x2 = positive_rois.chunk(4, dim=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = roi_gt_boxes.chunk(4, dim=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = torch.cat([y1, x1, y2, x2], dim=1)\n box_ids = Variable(torch.arange(roi_masks.size()[0]), requires_grad=False).int()\n if config.GPU_COUNT:\n box_ids = box_ids.cuda()\n\n if config.NUM_PARAMETER_CHANNELS > 0:\n masks = Variable(CropAndResizeFunction(config.MASK_SHAPE[0], config.MASK_SHAPE[1], 0)(roi_masks[:, :, :, 0].contiguous().unsqueeze(1), boxes, box_ids).data, requires_grad=False).squeeze(1)\n masks = torch.round(masks)\n parameters = Variable(CropAndResizeFunction(config.MASK_SHAPE[0], config.MASK_SHAPE[1], 0)(roi_masks[:, :, :, 1].contiguous().unsqueeze(1), boxes, box_ids).data, requires_grad=False).squeeze(1)\n masks = torch.stack([masks, parameters], dim=-1)\n else:\n masks = Variable(CropAndResizeFunction(config.MASK_SHAPE[0], config.MASK_SHAPE[1], 0)(roi_masks.unsqueeze(1), boxes, box_ids).data, requires_grad=False).squeeze(1) \n masks = torch.round(masks) \n pass\n\n ## Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n ## binary cross entropy loss.\n else:\n positive_count = 0\n\n ## 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_roi_bool = roi_iou_max < 0.5\n negative_roi_bool = negative_roi_bool & no_crowd_bool\n ## Negative ROIs. Add enough to maintain positive:negative ratio.\n if (negative_roi_bool > 0).sum() > 0 and positive_count>0:\n negative_indices = torch.nonzero(negative_roi_bool)[:, 0]\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = int(r * positive_count - positive_count)\n rand_idx = torch.randperm(negative_indices.size()[0])\n rand_idx = rand_idx[:negative_count]\n if config.GPU_COUNT:\n rand_idx = rand_idx.cuda()\n negative_indices = negative_indices[rand_idx]\n negative_count = negative_indices.size()[0]\n negative_rois = proposals[negative_indices.data, :]\n else:\n negative_count = 0\n\n #print('count', positive_count, negative_count)\n #print(roi_gt_class_ids)\n \n ## Append negative ROIs and pad bbox deltas and masks that\n ## are not used for negative ROIs with zeros.\n if positive_count > 0 and negative_count > 0:\n rois = torch.cat((positive_rois, negative_rois), dim=0)\n zeros = Variable(torch.zeros(negative_count), requires_grad=False).int()\n if config.GPU_COUNT:\n zeros = zeros.cuda()\n roi_gt_class_ids = torch.cat([roi_gt_class_ids, zeros], dim=0)\n zeros = Variable(torch.zeros(negative_count, 4), requires_grad=False)\n if config.GPU_COUNT:\n zeros = zeros.cuda()\n deltas = torch.cat([deltas, zeros], dim=0)\n if config.NUM_PARAMETER_CHANNELS > 0:\n zeros = Variable(torch.zeros(negative_count,config.MASK_SHAPE[0],config.MASK_SHAPE[1], 2), requires_grad=False)\n else:\n zeros = Variable(torch.zeros(negative_count,config.MASK_SHAPE[0],config.MASK_SHAPE[1]), requires_grad=False)\n pass\n if config.GPU_COUNT:\n zeros = zeros.cuda()\n masks = torch.cat([masks, zeros], dim=0)\n \n zeros = Variable(torch.zeros(negative_count, config.NUM_PARAMETERS), requires_grad=False)\n if config.GPU_COUNT:\n zeros = zeros.cuda()\n roi_gt_parameters = torch.cat([roi_gt_parameters, zeros], dim=0)\n elif positive_count > 0:\n rois = positive_rois\n elif negative_count > 0:\n rois = negative_rois\n zeros = Variable(torch.zeros(negative_count), requires_grad=False)\n if config.GPU_COUNT:\n zeros = zeros.cuda()\n roi_gt_class_ids = zeros\n zeros = Variable(torch.zeros(negative_count, 4), requires_grad=False).int()\n if config.GPU_COUNT:\n zeros = zeros.cuda()\n deltas = zeros\n zeros = Variable(torch.zeros(negative_count,config.MASK_SHAPE[0],config.MASK_SHAPE[1]), requires_grad=False)\n if config.GPU_COUNT:\n zeros = zeros.cuda()\n masks = zeros\n\n zeros = Variable(torch.zeros(negative_count, config.NUM_PARAMETERS), requires_grad=False)\n if config.GPU_COUNT:\n zeros = zeros.cuda()\n roi_gt_parameters = torch.cat([roi_gt_parameters, zeros], dim=0) \n else:\n rois = Variable(torch.FloatTensor(), requires_grad=False)\n roi_gt_class_ids = Variable(torch.IntTensor(), requires_grad=False)\n deltas = Variable(torch.FloatTensor(), requires_grad=False)\n masks = Variable(torch.FloatTensor(), requires_grad=False)\n roi_gt_parameters = Variable(torch.FloatTensor(), requires_grad=False)\n if config.GPU_COUNT:\n rois = rois.cuda()\n roi_gt_class_ids = roi_gt_class_ids.cuda()\n deltas = deltas.cuda()\n masks = masks.cuda()\n roi_gt_parameters = roi_gt_parameters.cuda()\n pass\n\n return rois, roi_gt_class_ids, deltas, masks, roi_gt_parameters\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef clip_to_window(window, boxes):\n \"\"\"\n window: (y1, x1, y2, x2). The window in the image we want to clip to.\n boxes: [N, (y1, x1, y2, x2)]\n \"\"\"\n boxes = torch.stack([boxes[:, 0].clamp(float(window[0]), float(window[2])), boxes[:, 1].clamp(float(window[1]), float(window[3])), boxes[:, 2].clamp(float(window[0]), float(window[2])), boxes[:, 3].clamp(float(window[1]), float(window[3]))], dim=-1)\n return boxes\n\ndef refine_detections(rois, probs, deltas, parameters, window, config, return_indices=False, use_nms=1, one_hot=True):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in image coordinates. The part of the image\n that contains the image excluding the padding.\n\n Returns detections shaped: [N, (y1, x1, y2, x2, class_id, score)]\n \"\"\"\n\n ## Class IDs per ROI\n \n if len(probs.shape) == 1:\n class_ids = probs.long()\n else:\n _, class_ids = torch.max(probs, dim=1)\n pass\n\n ## Class probability of the top class of each ROI\n ## Class-specific bounding box deltas\n idx = torch.arange(class_ids.size()[0]).long()\n if config.GPU_COUNT:\n idx = idx.cuda()\n \n if len(probs.shape) == 1:\n class_scores = torch.ones(class_ids.shape)\n deltas_specific = deltas\n class_parameters = parameters\n if config.GPU_COUNT:\n class_scores = class_scores.cuda()\n else:\n class_scores = probs[idx, class_ids.data]\n deltas_specific = deltas[idx, class_ids.data]\n class_parameters = parameters[idx, class_ids.data]\n ## Apply bounding box deltas\n ## Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n std_dev = Variable(torch.from_numpy(np.reshape(config.RPN_BBOX_STD_DEV, [1, 4])).float(), requires_grad=False)\n if config.GPU_COUNT:\n std_dev = std_dev.cuda()\n \n refined_rois = apply_box_deltas(rois, deltas_specific * std_dev)\n ## Convert coordiates to image domain\n height, width = config.IMAGE_SHAPE[:2]\n scale = Variable(torch.from_numpy(np.array([height, width, height, width])).float(), requires_grad=False)\n if config.GPU_COUNT:\n scale = scale.cuda()\n refined_rois = refined_rois * scale\n ## Clip boxes to image window\n refined_rois = clip_to_window(window, refined_rois)\n\n ## Round and cast to int since we're deadling with pixels now\n refined_rois = torch.round(refined_rois)\n \n ## TODO: Filter out boxes with zero area\n\n ## Filter out background boxes\n keep_bool = class_ids > 0\n\n ## Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE and False:\n keep_bool = keep_bool & (class_scores >= config.DETECTION_MIN_CONFIDENCE)\n\n keep_bool = keep_bool & (refined_rois[:, 2] > refined_rois[:, 0]) & (refined_rois[:, 3] > refined_rois[:, 1])\n\n if keep_bool.sum() == 0:\n if return_indices:\n return torch.zeros((0, 10)).cuda(), torch.zeros(0).long().cuda(), torch.zeros((0, 4)).cuda()\n else:\n return torch.zeros((0, 10)).cuda()\n pass\n \n keep = torch.nonzero(keep_bool)[:,0]\n\n if use_nms == 2:\n ## Apply per-class NMS\n pre_nms_class_ids = class_ids[keep.data]\n pre_nms_scores = class_scores[keep.data]\n pre_nms_rois = refined_rois[keep.data]\n\n ixs = torch.arange(len(pre_nms_class_ids)).long().cuda()\n ## Sort\n ix_rois = pre_nms_rois\n ix_scores = pre_nms_scores\n ix_scores, order = ix_scores.sort(descending=True)\n ix_rois = ix_rois[order.data,:]\n \n nms_keep = nms(torch.cat((ix_rois, ix_scores.unsqueeze(1)), dim=1).data, config.DETECTION_NMS_THRESHOLD)\n nms_keep = keep[ixs[order[nms_keep].data].data]\n keep = intersect1d(keep, nms_keep) \n elif use_nms == 1:\n ## Apply per-class NMS\n pre_nms_class_ids = class_ids[keep.data]\n pre_nms_scores = class_scores[keep.data]\n pre_nms_rois = refined_rois[keep.data]\n\n for i, class_id in enumerate(unique1d(pre_nms_class_ids)):\n ## Pick detections of this class\n ixs = torch.nonzero(pre_nms_class_ids == class_id)[:,0]\n\n ## Sort\n ix_rois = pre_nms_rois[ixs.data]\n ix_scores = pre_nms_scores[ixs]\n ix_scores, order = ix_scores.sort(descending=True)\n ix_rois = ix_rois[order.data,:]\n\n class_keep = nms(torch.cat((ix_rois, ix_scores.unsqueeze(1)), dim=1).data, config.DETECTION_NMS_THRESHOLD)\n\n ## Map indicies\n class_keep = keep[ixs[order[class_keep].data].data]\n\n if i==0:\n nms_keep = class_keep\n else:\n nms_keep = unique1d(torch.cat((nms_keep, class_keep)))\n keep = intersect1d(keep, nms_keep)\n else:\n pass\n\n ## Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n top_ids = class_scores[keep.data].sort(descending=True)[1][:roi_count]\n keep = keep[top_ids.data]\n #print('num detectinos', len(keep))\n\n ### Apply plane anchors\n class_parameters = config.applyAnchorsTensor(class_ids, class_parameters)\n ## Arrange output as [N, (y1, x1, y2, x2, class_id, score, parameters)]\n ## Coordinates are in image domain.\n result = torch.cat((refined_rois[keep.data],\n class_ids[keep.data].unsqueeze(1).float(),\n class_scores[keep.data].unsqueeze(1),\n class_parameters[keep.data]), dim=1)\n\n if return_indices:\n ori_rois = rois * scale\n ori_rois = clip_to_window(window, ori_rois)\n ori_rois = torch.round(ori_rois)\n ori_rois = ori_rois[keep.data]\n return result, keep.data, ori_rois\n \n return result\n\n\ndef detection_layer(config, rois, mrcnn_class, mrcnn_bbox, mrcnn_parameter, image_meta, return_indices=False, use_nms=1, one_hot=True):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels\n \"\"\"\n\n ## Currently only supports batchsize 1\n rois = rois.squeeze(0)\n\n _, _, window, _ = parse_image_meta(image_meta)\n window = window[0]\n if len(mrcnn_class) == 0:\n if return_indices:\n return torch.zeros(0), torch.zeros(0), torch.zeros(0)\n else:\n return torch.zeros(0)\n \n return refine_detections(rois, mrcnn_class, mrcnn_bbox, mrcnn_parameter, window, config, return_indices=return_indices, use_nms=use_nms, one_hot=one_hot)\n\n\n\n############################################################\n# Region Proposal Network\n############################################################\n\nclass RPN(nn.Module):\n \"\"\"Builds the model of Region Proposal Network.\n\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n\n Returns:\n rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n\n def __init__(self, anchors_per_location, anchor_stride, depth):\n super(RPN, self).__init__()\n self.anchors_per_location = anchors_per_location\n self.anchor_stride = anchor_stride\n self.depth = depth\n\n self.padding = SamePad2d(kernel_size=3, stride=self.anchor_stride)\n self.conv_shared = nn.Conv2d(self.depth, 512, kernel_size=3, stride=self.anchor_stride)\n self.relu = nn.ReLU(inplace=True)\n self.conv_class = nn.Conv2d(512, 2 * anchors_per_location, kernel_size=1, stride=1)\n self.softmax = nn.Softmax(dim=2)\n self.conv_bbox = nn.Conv2d(512, 4 * anchors_per_location, kernel_size=1, stride=1)\n\n def forward(self, x):\n ## Shared convolutional base of the RPN\n x = self.relu(self.conv_shared(self.padding(x)))\n\n ## Anchor Score. [batch, anchors per location * 2, height, width].\n rpn_class_logits = self.conv_class(x)\n\n ## Reshape to [batch, 2, anchors]\n rpn_class_logits = rpn_class_logits.permute(0,2,3,1)\n rpn_class_logits = rpn_class_logits.contiguous()\n rpn_class_logits = rpn_class_logits.view(x.size()[0], -1, 2)\n\n ## Softmax on last dimension of BG/FG.\n rpn_probs = self.softmax(rpn_class_logits)\n\n ## Bounding box refinement. [batch, H, W, anchors per location, depth]\n ## where depth is [x, y, log(w), log(h)]\n rpn_bbox = self.conv_bbox(x)\n\n ## Reshape to [batch, 4, anchors]\n rpn_bbox = rpn_bbox.permute(0,2,3,1)\n rpn_bbox = rpn_bbox.contiguous()\n rpn_bbox = rpn_bbox.view(x.size()[0], -1, 4)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\nclass Classifier(nn.Module):\n def __init__(self, depth, pool_size, image_shape, num_classes, num_parameters, debug=False):\n super(Classifier, self).__init__()\n self.depth = depth\n self.pool_size = pool_size\n self.image_shape = image_shape\n self.num_classes = num_classes\n self.num_parameters = num_parameters\n self.conv1 = nn.Conv2d(self.depth + 64, 1024, kernel_size=self.pool_size, stride=1)\n self.bn1 = nn.BatchNorm2d(1024, eps=0.001, momentum=0.01)\n self.conv2 = nn.Conv2d(1024, 1024, kernel_size=1, stride=1)\n self.bn2 = nn.BatchNorm2d(1024, eps=0.001, momentum=0.01)\n self.relu = nn.ReLU(inplace=True)\n\n self.linear_class = nn.Linear(1024, num_classes)\n self.softmax = nn.Softmax(dim=1)\n\n self.linear_bbox = nn.Linear(1024, num_classes * 4)\n\n self.debug = debug\n if self.debug: \n self.linear_parameters = nn.Linear(3, num_classes * self.num_parameters)\n else:\n self.linear_parameters = nn.Linear(1024, num_classes * self.num_parameters)\n pass\n\n def forward(self, x, rois, ranges, pool_features=True, gt=None):\n # print('rois',len(rois), rois.size())\n # print('ranges_in', len(ranges), ranges.size())\n x = pyramid_roi_align([rois] + x, self.pool_size, self.image_shape)\n # print('x', x.size())\n ranges = coordinates_roi([rois] + [ranges, ], self.pool_size, self.image_shape)\n # print('ranges', ranges.size())\n roi_features = torch.cat([x, ranges], dim=1)\n x = self.conv1(roi_features)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu(x)\n\n x = x.view(-1,1024)\n mrcnn_class_logits = self.linear_class(x)\n mrcnn_probs = self.softmax(mrcnn_class_logits)\n\n mrcnn_bbox = self.linear_bbox(x)\n mrcnn_bbox = mrcnn_bbox.view(mrcnn_bbox.size()[0], -1, 4)\n\n if self.debug:\n x = gt\n pass\n \n mrcnn_parameters = self.linear_parameters(x)\n \n if self.debug:\n pass\n \n mrcnn_parameters = mrcnn_parameters.view(mrcnn_parameters.size()[0], -1, self.num_parameters)\n # print('param', mrcnn_parameters.size())\n\n if pool_features:\n return [mrcnn_class_logits, mrcnn_probs, mrcnn_bbox, mrcnn_parameters, roi_features]\n else:\n return [mrcnn_class_logits, mrcnn_probs, mrcnn_bbox, mrcnn_parameters]\n\nclass Mask(nn.Module):\n def __init__(self, config, depth, pool_size, image_shape, num_classes):\n super(Mask, self).__init__()\n self.config = config\n self.depth = depth\n self.pool_size = pool_size\n self.image_shape = image_shape\n self.num_classes = num_classes\n self.padding = SamePad2d(kernel_size=3, stride=1)\n self.conv1 = nn.Conv2d(self.depth, 256, kernel_size=3, stride=1)\n self.bn1 = nn.BatchNorm2d(256, eps=0.001)\n self.conv2 = nn.Conv2d(256, 256, kernel_size=3, stride=1)\n self.bn2 = nn.BatchNorm2d(256, eps=0.001)\n self.conv3 = nn.Conv2d(256, 256, kernel_size=3, stride=1)\n self.bn3 = nn.BatchNorm2d(256, eps=0.001)\n self.conv4 = nn.Conv2d(256, 256, kernel_size=3, stride=1)\n self.bn4 = nn.BatchNorm2d(256, eps=0.001)\n self.deconv = nn.ConvTranspose2d(256, 256, kernel_size=2, stride=2)\n self.conv5 = nn.Conv2d(256, num_classes + config.NUM_PARAMETER_CHANNELS, kernel_size=1, stride=1)\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x, rois, pool_features=True):\n if pool_features:\n roi_features = pyramid_roi_align([rois] + x, self.pool_size, self.image_shape)\n # print('roi_features', len(roi_features), roi_features[0].size(), roi_features[1].size())\n\n else:\n roi_features = x\n pass\n x = self.conv1(self.padding(roi_features))\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv2(self.padding(x))\n x = self.bn2(x)\n x = self.relu(x)\n x = self.conv3(self.padding(x))\n x = self.bn3(x)\n x = self.relu(x)\n x = self.conv4(self.padding(x))\n x = self.bn4(x)\n x = self.relu(x)\n x = self.deconv(x)\n x = self.relu(x)\n x = self.conv5(x)\n if self.config.NUM_PARAMETER_CHANNELS > 0 and not self.config.OCCLUSION:\n x = torch.cat([self.sigmoid(x[:, :-self.num_parameter_channels]), x[:, -self.num_parameter_channels:]], dim=1)\n else:\n # print('x_maks',len(x),x[0].size())\n x = self.sigmoid(x)\n pass\n return x, roi_features\n\nclass Depth(nn.Module):\n def __init__(self, num_output_channels=1):\n super(Depth, self).__init__()\n self.num_output_channels = num_output_channels \n self.conv1 = nn.Sequential(\n nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128, eps=0.001, momentum=0.01),\n nn.ReLU(inplace=True)\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128, eps=0.001, momentum=0.01),\n nn.ReLU(inplace=True)\n )\n self.conv3 = nn.Sequential(\n nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128, eps=0.001, momentum=0.01),\n nn.ReLU(inplace=True)\n )\n self.conv4 = nn.Sequential(\n nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128, eps=0.001, momentum=0.01),\n nn.ReLU(inplace=True)\n )\n self.conv5 = nn.Sequential(\n nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128, eps=0.001, momentum=0.01),\n nn.ReLU(inplace=True)\n )\n \n self.deconv1 = nn.Sequential(\n torch.nn.Upsample(scale_factor=2, mode='nearest'),\n nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128, eps=0.001, momentum=0.01),\n nn.ReLU(inplace=True)\n )\n self.deconv2 = nn.Sequential(\n torch.nn.Upsample(scale_factor=2, mode='nearest'),\n nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128, eps=0.001, momentum=0.01),\n nn.ReLU(inplace=True)\n )\n self.deconv3 = nn.Sequential(\n torch.nn.Upsample(scale_factor=2, mode='nearest'),\n nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128, eps=0.001, momentum=0.01),\n nn.ReLU(inplace=True)\n )\n self.deconv4 = nn.Sequential(\n torch.nn.Upsample(scale_factor=2, mode='nearest'),\n nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128, eps=0.001, momentum=0.01),\n nn.ReLU(inplace=True)\n )\n self.deconv5 = nn.Sequential(\n torch.nn.Upsample(scale_factor=2, mode='nearest'),\n nn.Conv2d(256, 64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(64, eps=0.001, momentum=0.01),\n nn.ReLU(inplace=True)\n )\n \n self.depth_pred = nn.Conv2d(64, num_output_channels, kernel_size=3, stride=1, padding=1)\n\n self.crop = True\n return\n \n def forward(self, feature_maps):\n if self.crop:\n padding = 5\n # print(feature_maps[0].size())\n # print(feature_maps[1].size())\n # print(feature_maps[2].size())\n # print(feature_maps[3].size())\n for c in range(2, 5):\n\n feature_maps[c] = feature_maps[c][:, :, padding * pow(2, c - 2):-padding * pow(2, c - 2)]\n continue\n pass\n x = self.deconv1(self.conv1(feature_maps[0]))\n x = self.deconv2(torch.cat([self.conv2(feature_maps[1]), x], dim=1))\n if self.crop:\n x = x[:, :, 5:35]\n x = self.deconv3(torch.cat([self.conv3(feature_maps[2]), x], dim=1))\n x = self.deconv4(torch.cat([self.conv4(feature_maps[3]), x], dim=1))\n x = self.deconv5(torch.cat([self.conv5(feature_maps[4]), x], dim=1))\n x = self.depth_pred(x)\n \n if self.crop:\n x = torch.nn.functional.interpolate(x, size=(480, 640), mode='bilinear')\n zeros = torch.zeros((len(x), self.num_output_channels, 80, 640)).cuda()\n x = torch.cat([zeros, x, zeros], dim=2)\n else:\n x = torch.nn.functional.interpolate(x, size=(640, 640), mode='bilinear')\n pass\n return x\n\n\n \n \n############################################################\n# Loss Functions\n############################################################\n\ndef compute_rpn_class_loss(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.\n \"\"\"\n\n ## Squeeze last dim to simplify\n rpn_match = rpn_match.squeeze(2)\n\n ## Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = (rpn_match == 1).long()\n\n ## Positive and Negative anchors contribute to the loss,\n ## but neutral anchors (match value = 0) don't.\n indices = torch.nonzero(rpn_match != 0)\n\n ## Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = rpn_class_logits[indices.data[:,0],indices.data[:,1],:]\n anchor_class = anchor_class[indices.data[:,0],indices.data[:,1]]\n\n ## Crossentropy loss\n loss = F.cross_entropy(rpn_class_logits, anchor_class)\n\n return loss\n\ndef compute_rpn_bbox_loss(target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n\n ## Squeeze last dim to simplify\n rpn_match = rpn_match.squeeze(2)\n\n ## Positive anchors contribute to the loss, but negative and\n ## neutral anchors (match value of 0 or -1) don't.\n indices = torch.nonzero(rpn_match==1)\n ## Pick bbox deltas that contribute to the loss\n rpn_bbox = rpn_bbox[indices.data[:,0],indices.data[:,1]]\n\n ## Trim target bounding box deltas to the same length as rpn_bbox.\n target_bbox = target_bbox[0,:rpn_bbox.size()[0],:]\n\n ## Smooth L1 loss\n loss = F.smooth_l1_loss(rpn_bbox, target_bbox)\n\n return loss\n\n\ndef compute_mrcnn_class_loss(target_class_ids, pred_class_logits):\n \"\"\"Loss for the classifier head of Mask RCNN.\n\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n \"\"\"\n\n ## Loss\n if len(target_class_ids) > 0:\n loss = F.cross_entropy(pred_class_logits,target_class_ids.long())\n else:\n loss = Variable(torch.FloatTensor([0]), requires_grad=False)\n if target_class_ids.is_cuda:\n loss = loss.cuda()\n\n return loss\n\n\ndef compute_mrcnn_bbox_loss(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n\n if (target_class_ids > 0).sum() > 0:\n ## Only positive ROIs contribute to the loss. And only\n ## the right class_id of each ROI. Get their indicies.\n positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = target_class_ids[positive_roi_ix.data].long()\n indices = torch.stack((positive_roi_ix,positive_roi_class_ids), dim=1)\n\n ## Gather the deltas (predicted and true) that contribute to loss\n target_bbox = target_bbox[indices[:,0].data,:]\n pred_bbox = pred_bbox[indices[:,0].data,indices[:,1].data,:]\n\n ## Smooth L1 loss\n loss = F.smooth_l1_loss(pred_bbox, target_bbox)\n else:\n loss = Variable(torch.FloatTensor([0]), requires_grad=False)\n if target_class_ids.is_cuda:\n loss = loss.cuda()\n\n return loss\n\n\ndef compute_mrcnn_mask_loss(config, target_masks, target_class_ids, target_parameters, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n if (target_class_ids > 0).sum() > 0:\n ## Only positive ROIs contribute to the loss. And only\n ## the class specific mask of each ROI.\n positive_ix = torch.nonzero(target_class_ids > 0)[:, 0]\n positive_class_ids = target_class_ids[positive_ix.data].long()\n indices = torch.stack((positive_ix, positive_class_ids), dim=1)\n\n ## Gather the masks (predicted and true) that contribute to loss\n y_true = target_masks[indices[:,0].data,:,:]\n\n if config.GLOBAL_MASK:\n y_pred = pred_masks[indices[:,0],0,:,:]\n else:\n y_pred = pred_masks[indices[:,0].data,indices[:,1].data,:,:]\n pass\n\n if config.NUM_PARAMETER_CHANNELS == 1:\n if config.OCCLUSION:\n visible_pred = pred_masks[indices[:,0],-1,:,:]\n visible_gt = y_true[:, :, :, -1]\n y_true = y_true[:, :, :, 0]\n loss = F.binary_cross_entropy(y_pred, y_true) + F.binary_cross_entropy(visible_pred, visible_gt)\n else:\n depth_pred = pred_masks[indices[:,0],-1,:,:]\n depth_gt = y_true[:, :, :, -1]\n y_true = y_true[:, :, :, 0]\n loss = F.binary_cross_entropy(y_pred, y_true) + l1LossMask(depth_pred, depth_gt, (depth_gt > 1e-4).float())\n pass\n elif config.NUM_PARAMETER_CHANNELS == 4:\n depth_pred = pred_masks[indices[:,0],-config.NUM_PARAMETER_CHANNELS,:,:]\n depth_gt = y_true[:, :, :, -1]\n y_true = y_true[:, :, :, 0]\n normal_pred = pred_masks[indices[:,0],-(config.NUM_PARAMETER_CHANNELS - 1):,:,:]\n normal_gt = target_parameters[indices[:,0]]\n normal_gt = normal_gt / torch.clamp(torch.norm(normal_gt, dim=-1, keepdim=True), min=1e-4)\n loss = F.binary_cross_entropy(y_pred, y_true) + l1LossMask(depth_pred, depth_gt, (depth_gt > 1e-4).float()) + l2NormLossMask(normal_pred, normal_gt.unsqueeze(-1).unsqueeze(-1), y_true, dim=1)\n else:\n ## Binary cross entropy\n loss = F.binary_cross_entropy(y_pred, y_true)\n pass\n else:\n loss = Variable(torch.FloatTensor([0]), requires_grad=False)\n if target_class_ids.is_cuda:\n loss = loss.cuda()\n\n return loss\n\ndef compute_mrcnn_parameter_loss(target_parameters, target_class_ids, pred_parameters):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n\n if (target_class_ids > 0).sum() > 0:\n ## Only positive ROIs contribute to the loss. And only\n ## the right class_id of each ROI. Get their indicies.\n positive_roi_ix = torch.nonzero(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = target_class_ids[positive_roi_ix.data].long()\n indices = torch.stack((positive_roi_ix,positive_roi_class_ids), dim=1)\n\n ## Gather the deltas (predicted and true) that contribute to loss\n target_parameters = target_parameters[indices[:,0].data,:]\n pred_parameters = pred_parameters[indices[:,0].data,indices[:,1].data,:]\n ## Smooth L1 loss\n loss = F.smooth_l1_loss(pred_parameters, target_parameters)\n else:\n loss = Variable(torch.FloatTensor([0]), requires_grad=False)\n if target_class_ids.is_cuda:\n loss = loss.cuda()\n return loss\n\ndef compute_losses(config, rpn_match, rpn_bbox, rpn_class_logits, rpn_pred_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask, target_parameters, mrcnn_parameters):\n\n rpn_class_loss = compute_rpn_class_loss(rpn_match, rpn_class_logits)\n rpn_bbox_loss = compute_rpn_bbox_loss(rpn_bbox, rpn_match, rpn_pred_bbox)\n mrcnn_class_loss = compute_mrcnn_class_loss(target_class_ids, mrcnn_class_logits)\n mrcnn_bbox_loss = compute_mrcnn_bbox_loss(target_deltas, target_class_ids, mrcnn_bbox)\n mrcnn_mask_loss = compute_mrcnn_mask_loss(config, target_mask, target_class_ids, target_parameters, mrcnn_mask)\n mrcnn_parameter_loss = compute_mrcnn_parameter_loss(target_parameters, target_class_ids, mrcnn_parameters)\n return [rpn_class_loss, rpn_bbox_loss, mrcnn_class_loss, mrcnn_bbox_loss, mrcnn_mask_loss, mrcnn_parameter_loss]\n\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN(nn.Module):\n \"\"\"Encapsulates the Mask RCNN model functionality.\n \"\"\"\n\n def __init__(self, config, resnet_layers, model_dir='test'):\n \"\"\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n super(MaskRCNN, self).__init__()\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.build(config=config)\n self.initialize_weights()\n self.loss_history = []\n self.val_loss_history = []\n self.resnet_layers = resnet_layers\n\n def build(self, config):\n \"\"\"Build Mask R-CNN architecture.\n \"\"\"\n\n ## Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n ## Build the shared convolutional layers.\n ## Bottom-up Layers\n ## Returns a list of the last layers of each stage, 5 in total.\n ## Don't create the thead (stage 5), so we pick the 4th item in the list.\n resnet = ResNet(\"resnet101\", stage5=True, numInputChannels=config.NUM_INPUT_CHANNELS)\n C1, C2, C3, C4, C5 = resnet.stages()\n C2, C3, C4, C5 = self.resnet_layers # Overwritting resnet layers\n\n # print(\"C1\",C1)\n # print(\"C2\", C2)\n # print(\"C3\", C3)\n # print(\"C4\", C4)\n # print(\"C5\", C5)\n\n ## Top-down Layers\n ## TODO: add assert to varify feature map sizes match what's in config\n self.fpn = FPN(C1, C2, C3, C4, C5, out_channels=256, bilinear_upsampling=self.config.BILINEAR_UPSAMPLING)\n\n ## Generate Anchors\n self.anchors = Variable(torch.from_numpy(utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n config.BACKBONE_SHAPES,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)).float(), requires_grad=False)\n if self.config.GPU_COUNT:\n self.anchors = self.anchors.cuda()\n\n ## RPN\n self.rpn = RPN(len(config.RPN_ANCHOR_RATIOS), config.RPN_ANCHOR_STRIDE, 256)\n\n ## Coordinate feature\n self.coordinates = nn.Conv2d(3, 64, kernel_size=1, stride=1)\n \n ## FPN Classifier\n self.debug = False\n self.classifier = Classifier(256, config.POOL_SIZE, config.IMAGE_SHAPE, config.NUM_CLASSES, config.NUM_PARAMETERS, debug=self.debug)\n\n ## FPN Mask\n self.mask = Mask(config, 256, config.MASK_POOL_SIZE, config.IMAGE_SHAPE, config.NUM_CLASSES)\n\n if self.config.PREDICT_DEPTH:\n if self.config.PREDICT_BOUNDARY: \n self.depth = Depth(num_output_channels=3)\n else:\n self.depth = Depth(num_output_channels=1)\n pass\n pass \n\n ## Fix batch norm layers\n def set_bn_fix(m):\n classname = m.__class__.__name__\n if classname.find('BatchNorm') != -1:\n for p in m.parameters(): p.requires_grad = False\n\n self.apply(set_bn_fix)\n\n def initialize_weights(self):\n \"\"\"Initialize model weights.\n \"\"\"\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.xavier_uniform(m.weight)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n def set_trainable(self, layer_regex, model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n\n for param in self.named_parameters():\n layer_name = param[0]\n trainable = bool(re.fullmatch(layer_regex, layer_name))\n if not trainable:\n param[1].requires_grad = False\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n\n ## Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n ## If we have a model path with date and epochs use them\n if model_path:\n ## Continue from we left of. Get epoch and date from the file name\n ## A sample model path might look like:\n ## /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5\n regex = r\".*/\\w+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})/mask\\_rcnn\\_\\w+(\\d{4})\\.pth\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n self.epoch = int(m.group(6))\n\n ## Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n ## Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.pth\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{:04d}\")\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n log_dir: The directory where events and weights are saved\n checkpoint_path: the path to the last checkpoint file\n \"\"\"\n ## Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n return None, None\n ## Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n ## Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n return dir_name, None\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return dir_name, checkpoint\n\n def load_weights(self, filepath):\n \"\"\"Modified version of the correspoding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exlude: list of layer names to excluce\n \"\"\"\n if os.path.exists(filepath):\n state_dict = torch.load(filepath)\n try:\n self.load_state_dict(state_dict, strict=False)\n except:\n print('load only base model')\n try:\n state_dict = {k: v for k, v in state_dict.items() if 'classifier.linear_class' not in k and 'classifier.linear_bbox' not in k and 'mask.conv5' not in k}\n state = self.state_dict()\n state.update(state_dict)\n self.load_state_dict(state)\n except:\n print('change input dimension')\n state_dict = {k: v for k, v in state_dict.items() if 'classifier.linear_class' not in k and 'classifier.linear_bbox' not in k and 'mask.conv5' not in k and 'fpn.C1.0' not in k and 'classifier.conv1' not in k}\n state = self.state_dict()\n state.update(state_dict)\n self.load_state_dict(state)\n pass\n pass\n else:\n print(\"Weight file not found ...\")\n exit(1)\n ## Update the log directory\n self.set_log_dir(filepath)\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n def detect(self, images, camera, mold_image=True, image_metas=None):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n\n ## Mold inputs to format expected by the neural network\n if mold_image:\n molded_images, image_metas, windows = mold_inputs(self.config, images)\n else:\n molded_images = images\n windows = [(0, 0, images.shape[1], images.shape[2]) for _ in range(len(images))]\n pass\n\n ## Convert images to torch tensor\n molded_images = torch.from_numpy(molded_images.transpose(0, 3, 1, 2)).float()\n\n ## To GPU\n if self.config.GPU_COUNT:\n molded_images = molded_images.cuda()\n\n ## Wrap in variable\n #molded_images = Variable(molded_images, volatile=True)\n\n ## Run object detection\n detections, mrcnn_mask, depth_np = self.predict([molded_images, image_metas, camera], mode='inference')\n\n if len(detections[0]) == 0:\n return [{'rois': [], 'class_ids': [], 'scores': [], 'masks': [], 'parameters': []}]\n \n ## Convert to numpy\n detections = detections.data.cpu().numpy()\n mrcnn_mask = mrcnn_mask.permute(0, 1, 3, 4, 2).data.cpu().numpy()\n\n ## Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks, final_parameters =\\\n unmold_detections(self.config, detections[i], mrcnn_mask[i],\n image.shape, windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n \"parameters\": final_parameters, \n })\n return results\n\n def predict(self, input, mode, use_nms=1, use_refinement=False, return_feature_map=False):\n molded_images = input[0]\n image_metas = input[1]\n\n if mode == 'inference':\n self.eval()\n elif 'training' in mode:\n self.train()\n ## Set batchnorm always in eval mode during training\n def set_bn_eval(m):\n classname = m.__class__.__name__\n if classname.find('BatchNorm') != -1:\n m.eval()\n\n self.apply(set_bn_eval)\n\n ## Feature extraction\n #print(molded_images)\n [p2_out, p3_out, p4_out, p5_out, p6_out] = self.fpn(molded_images)\n ## Note that P6 is used in RPN, but not in the classifier heads.\n\n rpn_feature_maps = [p2_out, p3_out, p4_out, p5_out, p6_out]\n mrcnn_feature_maps = [p2_out, p3_out, p4_out, p5_out]\n # print('mrcn_feature_maps ',len(mrcnn_feature_maps))\n # print('mrcn_feature_maps size ', mrcnn_feature_maps[0].size())\n\n feature_maps = [feature_map for index, feature_map in enumerate(rpn_feature_maps[::-1])]\n if self.config.PREDICT_DEPTH:\n depth_np = self.depth(feature_maps)\n if self.config.PREDICT_BOUNDARY:\n boundary = depth_np[:, 1:]\n depth_np = depth_np[:, 0]\n else:\n depth_np = depth_np.squeeze(1)\n # print('depth_size',depth_np.size())\n pass\n else:\n depth_np = torch.ones((1, self.config.IMAGE_MAX_DIM, self.config.IMAGE_MAX_DIM)).cuda()\n pass\n \n ranges = self.config.getRanges(input[-1]).transpose(1, 2).transpose(0, 1)\n zeros = torch.zeros(3, (self.config.IMAGE_MAX_DIM - self.config.IMAGE_MIN_DIM) // 2, self.config.IMAGE_MAX_DIM).cuda()\n ranges = torch.cat([zeros, ranges, zeros], dim=1)\n ranges = torch.nn.functional.interpolate(ranges.unsqueeze(0), size=(160, 160), mode='bilinear')\n ranges = self.coordinates(ranges * 10)\n \n \n ## Loop through pyramid layers\n layer_outputs = [] ## list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(self.rpn(p))\n\n ## Concatenate layer outputs\n ## Convert from list of lists of level outputs to list of lists\n ## of outputs across levels.\n ## e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n outputs = list(zip(*layer_outputs))\n outputs = [torch.cat(list(o), dim=1) for o in outputs]\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n ## Generate proposals\n ## Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n ## and zero padded.\n proposal_count = self.config.POST_NMS_ROIS_TRAINING if 'training' in mode and use_refinement == False \\\n else self.config.POST_NMS_ROIS_INFERENCE\n rpn_rois = proposal_layer([rpn_class, rpn_bbox],\n proposal_count=proposal_count,\n nms_threshold=self.config.RPN_NMS_THRESHOLD,\n anchors=self.anchors,\n config=self.config)\n # print('len rois',len(rpn_rois))\n # print(rpn_rois[0].size())\n #print('mode', mode)\n \n if mode == 'inference':\n ## Network Heads\n ## Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_parameters = self.classifier(mrcnn_feature_maps, rpn_rois, ranges)\n\n ## Detections\n ## output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in image coordinates\n detections = detection_layer(self.config, rpn_rois, mrcnn_class, mrcnn_bbox, mrcnn_parameters, image_metas)\n\n if len(detections) == 0:\n return [[]], [[]], depth_np\n ## Convert boxes to normalized coordinates\n ## TODO: let DetectionLayer return normalized coordinates to avoid\n ## unnecessary conversions\n h, w = self.config.IMAGE_SHAPE[:2]\n scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)\n if self.config.GPU_COUNT:\n scale = scale.cuda()\n detection_boxes = detections[:, :4] / scale\n\n ## Add back batch dimension\n detection_boxes = detection_boxes.unsqueeze(0)\n\n ## Create masks for detections\n mrcnn_mask, roi_features = self.mask(mrcnn_feature_maps, detection_boxes)\n\n ## Add back batch dimension\n detections = detections.unsqueeze(0)\n mrcnn_mask = mrcnn_mask.unsqueeze(0)\n return [detections, mrcnn_mask, depth_np]\n\n elif mode == 'training':\n\n gt_class_ids = input[2]\n gt_boxes = input[3]\n gt_masks = input[4]\n gt_parameters = input[5]\n \n ## Normalize coordinates\n h, w = self.config.IMAGE_SHAPE[:2]\n scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)\n if self.config.GPU_COUNT:\n scale = scale.cuda()\n gt_boxes = gt_boxes / scale\n\n ## Generate detection targets\n ## Subsamples proposals and generates target outputs for training\n ## Note that proposal class IDs, gt_boxes, and gt_masks are zero\n ## padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_deltas, target_mask, target_parameters = \\\n detection_target_layer(rpn_rois, gt_class_ids, gt_boxes, gt_masks, gt_parameters, self.config)\n\n if len(rois) == 0:\n mrcnn_class_logits = Variable(torch.FloatTensor())\n mrcnn_class = Variable(torch.IntTensor())\n mrcnn_bbox = Variable(torch.FloatTensor())\n mrcnn_mask = Variable(torch.FloatTensor())\n mrcnn_parameters = Variable(torch.FloatTensor())\n if self.config.GPU_COUNT:\n mrcnn_class_logits = mrcnn_class_logits.cuda()\n mrcnn_class = mrcnn_class.cuda()\n mrcnn_bbox = mrcnn_bbox.cuda()\n mrcnn_mask = mrcnn_mask.cuda()\n mrcnn_parameters = mrcnn_parameters.cuda()\n else:\n ## Network Heads\n ## Proposal classifier and BBox regressor heads\n #print([maps.shape for maps in mrcnn_feature_maps], target_parameters.shape)\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_parameters = self.classifier(mrcnn_feature_maps, rois, ranges, target_parameters)\n \n ## Create masks for detections\n mrcnn_mask, _ = self.mask(mrcnn_feature_maps, rois)\n\n return [rpn_class_logits, rpn_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask, mrcnn_mask, target_parameters, mrcnn_parameters, rois, depth_np]\n \n elif mode in ['training_detection', 'inference_detection']:\n gt_class_ids = input[2]\n gt_boxes = input[3]\n gt_masks = input[4]\n gt_parameters = input[5]\n \n ## Normalize coordinates\n h, w = self.config.IMAGE_SHAPE[:2]\n scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)\n if self.config.GPU_COUNT:\n scale = scale.cuda()\n\n gt_boxes = gt_boxes / scale\n \n ## Generate detection targets\n ## Subsamples proposals and generates target outputs for training\n ## Note that proposal class IDs, gt_boxes, and gt_masks are zero\n ## padded. Equally, returned rois and targets are zero padded.\n \n rois, target_class_ids, target_deltas, target_mask, target_parameters = \\\n detection_target_layer(rpn_rois, gt_class_ids, gt_boxes, gt_masks, gt_parameters, self.config)\n\n if len(rois) == 0:\n mrcnn_class_logits = Variable(torch.FloatTensor())\n mrcnn_class = Variable(torch.IntTensor())\n mrcnn_bbox = Variable(torch.FloatTensor())\n mrcnn_mask = Variable(torch.FloatTensor())\n mrcnn_parameters = Variable(torch.FloatTensor())\n if self.config.GPU_COUNT:\n mrcnn_class_logits = mrcnn_class_logits.cuda()\n mrcnn_class = mrcnn_class.cuda()\n mrcnn_bbox = mrcnn_bbox.cuda()\n mrcnn_mask = mrcnn_mask.cuda()\n mrcnn_parameters = mrcnn_parameters.cuda()\n else:\n ## Network Heads\n ## Proposal classifier and BBox regressor heads\n\n # print('rois1', len(rois), rois[0].size())\n\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_parameters, roi_features = self.classifier(mrcnn_feature_maps, rois, ranges, pool_features=True)\n ## Create masks for detections\n mrcnn_mask, _ = self.mask(mrcnn_feature_maps, rois)\n pass\n\n h, w = self.config.IMAGE_SHAPE[:2]\n scale = Variable(torch.from_numpy(np.array([h, w, h, w])).float(), requires_grad=False)\n if self.config.GPU_COUNT:\n scale = scale.cuda()\n\n if use_refinement:\n\n # print('rois2', len(rpn_rois), rpn_rois[0].size())\n\n mrcnn_class_logits_final, mrcnn_class_final, mrcnn_bbox_final, mrcnn_parameters_final, roi_features = self.classifier(mrcnn_feature_maps, rpn_rois[0], ranges, pool_features=True)\n\n ## Add back batch dimension\n ## Create masks for detections\n detections, indices, _ = detection_layer(self.config, rpn_rois, mrcnn_class_final, mrcnn_bbox_final, mrcnn_parameters_final, image_metas, return_indices=True, use_nms=use_nms)\n if len(detections) > 0: \n detection_boxes = detections[:, :4] / scale \n detection_boxes = detection_boxes.unsqueeze(0)\n # print('fm', len(mrcnn_feature_maps), mrcnn_feature_maps[0].size())\n # print('len_det_box', len(detection_boxes),detection_boxes[0].size())\n detection_masks, _ = self.mask(mrcnn_feature_maps, detection_boxes)\n # print('det_masks', len(detection_masks), detection_masks[16].size())\n roi_features = roi_features[indices]\n pass\n else:\n mrcnn_class_logits_final, mrcnn_class_final, mrcnn_bbox_final, mrcnn_parameters_final = mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_parameters\n \n rpn_rois = rois\n detections, indices, _ = detection_layer(self.config, rpn_rois, mrcnn_class_final, mrcnn_bbox_final, mrcnn_parameters_final, image_metas, return_indices=True, use_nms=use_nms)\n\n if len(detections) > 0:\n detection_boxes = detections[:, :4] / scale\n detection_boxes = detection_boxes.unsqueeze(0)\n detection_masks, _ = self.mask(mrcnn_feature_maps, detection_boxes)\n roi_features = roi_features[indices] \n pass\n pass\n\n valid = False \n if len(detections) > 0:\n positive_rois = detection_boxes.squeeze(0) \n\n gt_class_ids = gt_class_ids.squeeze(0)\n gt_boxes = gt_boxes.squeeze(0)\n gt_masks = gt_masks.squeeze(0)\n gt_parameters = gt_parameters.squeeze(0)\n\n ## Compute overlaps matrix [proposals, gt_boxes]\n overlaps = bbox_overlaps(positive_rois, gt_boxes)\n\n ## Determine postive and negative ROIs\n roi_iou_max = torch.max(overlaps, dim=1)[0]\n\n ## 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n if 'inference' in mode:\n positive_roi_bool = roi_iou_max > -1\n else:\n positive_roi_bool = roi_iou_max > 0.2\n pass\n\n detections = detections[positive_roi_bool]\n # print('type', type(positive_roi_bool))\n # print(len(positive_roi_bool))\n # print(positive_roi_bool)\n\n detection_masks = detection_masks[positive_roi_bool]\n roi_features = roi_features[positive_roi_bool]\n if len(detections) > 0:\n positive_indices = torch.nonzero(positive_roi_bool)[:, 0]\n\n positive_rois = positive_rois[positive_indices.data]\n\n ## Assign positive ROIs to GT boxes.\n positive_overlaps = overlaps[positive_indices.data,:]\n roi_gt_box_assignment = torch.max(positive_overlaps, dim=1)[1]\n roi_gt_boxes = gt_boxes[roi_gt_box_assignment.data,:]\n roi_gt_class_ids = gt_class_ids[roi_gt_box_assignment.data]\n roi_gt_parameters = gt_parameters[roi_gt_box_assignment.data]\n roi_gt_parameters = self.config.applyAnchorsTensor(roi_gt_class_ids.long(), roi_gt_parameters)\n ## Assign positive ROIs to GT masks\n roi_gt_masks = gt_masks[roi_gt_box_assignment.data,:,:]\n\n valid_mask = positive_overlaps.max(0)[1]\n valid_mask = (valid_mask[roi_gt_box_assignment] == torch.arange(len(roi_gt_box_assignment)).long().cuda()).long()\n roi_indices = roi_gt_box_assignment * valid_mask + (-1) * (1 - valid_mask)\n\n ## Compute mask targets\n boxes = positive_rois\n if self.config.USE_MINI_MASK:\n ## Transform ROI corrdinates from normalized image space\n ## to normalized mini-mask space.\n y1, x1, y2, x2 = positive_rois.chunk(4, dim=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = roi_gt_boxes.chunk(4, dim=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = torch.cat([y1, x1, y2, x2], dim=1)\n pass\n box_ids = Variable(torch.arange(roi_gt_masks.size()[0]), requires_grad=False).int()\n if self.config.GPU_COUNT:\n box_ids = box_ids.cuda()\n roi_gt_masks = Variable(CropAndResizeFunction(self.config.FINAL_MASK_SHAPE[0], self.config.FINAL_MASK_SHAPE[1], 0)(roi_gt_masks.unsqueeze(1), boxes, box_ids).data, requires_grad=False)\n roi_gt_masks = roi_gt_masks.squeeze(1)\n\n roi_gt_masks = torch.round(roi_gt_masks)\n valid = True\n pass\n pass\n if not valid:\n detections = torch.FloatTensor()\n detection_masks = torch.FloatTensor()\n roi_gt_parameters = torch.FloatTensor()\n roi_gt_masks = torch.FloatTensor()\n roi_features = torch.FloatTensor()\n roi_indices = torch.LongTensor()\n if self.config.GPU_COUNT:\n detections = detections.cuda()\n detection_masks = detection_masks.cuda()\n roi_gt_parameters = roi_gt_parameters.cuda()\n roi_gt_masks = roi_gt_masks.cuda()\n roi_features = roi_features.cuda()\n roi_indices = roi_indices.cuda()\n pass\n pass\n\n\n info = [rpn_class_logits, rpn_bbox, target_class_ids, mrcnn_class_logits, target_deltas, mrcnn_bbox, target_mask,\n mrcnn_mask, target_parameters, mrcnn_parameters, detections, detection_masks, roi_gt_parameters, roi_gt_masks,\n rpn_rois, roi_features, roi_indices]\n if return_feature_map:\n\n feature_map = mrcnn_feature_maps\n info.append(feature_map)\n pass\n\n # print('depth_np_source',len(depth_np),depth_np.size())\n info.append(depth_np)\n if self.config.PREDICT_BOUNDARY:\n info.append(boundary)\n pass\n return info\n\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef mold_inputs(config, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matricies [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matricies:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n ## Resize image to fit the model expected size\n ## TODO: move resizing to mold_image()\n molded_image, window, scale, padding = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n max_dim=config.IMAGE_MAX_DIM,\n padding=config.IMAGE_PADDING)\n molded_image = mold_image(molded_image, config)\n ## Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, window,\n np.zeros([config.NUM_CLASSES], dtype=np.int32))\n ## Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n ## Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\ndef unmold_detections(config, detections, mrcnn_mask, image_shape, window, debug=False):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)]\n mrcnn_mask: [N, height, width, num_classes]\n image_shape: [height, width, depth] Original size of the image before resizing\n window: [y1, x1, y2, x2] Box in the image where the real image is\n excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n ## How many detections do we have?\n ## Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n ## Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n parameters = detections[:N, 6:]\n if config.GLOBAL_MASK:\n masks = mrcnn_mask[np.arange(N), :, :, 0]\n else:\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n pass\n\n \n ## Compute scale and shift to translate coordinates to image domain.\n h_scale = image_shape[0] / (window[2] - window[0])\n w_scale = image_shape[1] / (window[3] - window[1])\n scale = min(h_scale, w_scale)\n shift = window[:2] ## y, x\n scales = np.array([scale, scale, scale, scale])\n shifts = np.array([shift[0], shift[1], shift[0], shift[1]])\n\n ## Translate bounding boxes to image domain\n boxes = np.multiply(boxes - shifts, scales).astype(np.int32)\n \n if debug:\n print(masks.shape, boxes.shape)\n for maskIndex, mask in enumerate(masks):\n print(maskIndex, boxes[maskIndex].astype(np.int32))\n cv2.imwrite('test/local_mask_' + str(maskIndex) + '.png', (mask * 255).astype(np.uint8))\n continue\n \n ## Filter out detections with zero area. Often only happens in early\n ## stages of training when the network weights are still a bit random.\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n parameters = np.delete(parameters, exclude_ix, axis=0) \n N = class_ids.shape[0]\n\n ## Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n ## Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty((0,) + masks.shape[1:3])\n\n if debug:\n print(full_masks.shape)\n for maskIndex in range(full_masks.shape[2]):\n cv2.imwrite('test/full_mask_' + str(maskIndex) + '.png', (full_masks[:, :, maskIndex] * 255).astype(np.uint8))\n continue\n pass\n return boxes, class_ids, scores, full_masks, parameters\n \ndef compose_image_meta(image_id, image_shape, window, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array. Use\n parse_image_meta() to parse the values back.\n\n image_id: An int ID of the image. Useful for debugging.\n image_shape: [height, width, channels]\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + ## size=1\n list(image_shape) + ## size=3\n list(window) + ## size=4 (y1, x1, y2, x2) in image cooredinates\n list(active_class_ids) ## size=num_classes\n )\n return meta\n\n\n## Two functions (for Numpy and TF) to parse image_meta tensors.\ndef parse_image_meta(meta):\n \"\"\"Parses an image info Numpy array to its components.\n See compose_image_meta() for more details.\n \"\"\"\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8] ## (y1, x1, y2, x2) window of image in in pixels\n active_class_ids = meta[:, 8:]\n return image_id, image_shape, window, active_class_ids\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n \"\"\"\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8]\n active_class_ids = meta[:, 8:]\n return [image_id, image_shape, window, active_class_ids]\n\n\ndef mold_image(images, config):\n \"\"\"Takes RGB images with 0-255 values and subtraces\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n" ]
[ [ "numpy.full", "numpy.array", "numpy.linalg.norm", "numpy.zeros", "numpy.random.seed", "numpy.eye", "numpy.stack", "numpy.arange", "numpy.expand_dims" ], [ "torch.nn.Linear", "torch.round", "torch.cat", "torch.nn.modules.utils._pair", "torch.stack", "torch.nn.functional.smooth_l1_loss", "torch.nn.BatchNorm2d", "torch.ones", "numpy.multiply", "numpy.where", "torch.nn.functional.cross_entropy", "torch.LongTensor", "torch.load", "torch.nn.functional.pad", "torch.exp", "torch.sqrt", "numpy.empty", "torch.nn.Softmax", "torch.nn.MaxPool2d", "torch.IntTensor", "torch.norm", "torch.FloatTensor", "torch.nn.ConvTranspose2d", "torch.ByteTensor", "numpy.arange", "torch.zeros", "torch.nonzero", "numpy.array", "numpy.delete", "torch.min", "numpy.zeros", "torch.max", "torch.nn.Sequential", "torch.nn.functional.upsample", "numpy.reshape", "torch.nn.ReLU", "torch.nn.Conv2d", "numpy.stack", "torch.log", "torch.sort", "torch.nn.functional.binary_cross_entropy", "torch.nn.Sigmoid", "torch.nn.init.xavier_uniform", "torch.nn.functional.interpolate", "torch.from_numpy", "torch.nn.Upsample" ] ]
Aethor/nintent
[ "a4c3a9cce22c46b65bfb9258ac25ce9c392674ff" ]
[ "train.py" ]
[ "import os\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\n\nimport argparse\nimport random\nfrom typing import Optional, List\n\nimport torch\nfrom torch.optim.optimizer import Optimizer\nfrom torch.nn.modules.loss import _Loss\nfrom torch.optim.lr_scheduler import _LRScheduler\nfrom transformers import BertTokenizer\nfrom tqdm.auto import tqdm\n\nfrom tree import IntentTree, Intent\nfrom datas import Dataset\nfrom config import Config\nfrom model import TreeMaker\nfrom score import score\n\n\ndef train_(\n model: TreeMaker,\n train_dataset: Dataset,\n valid_dataset: Dataset,\n device: torch.device,\n optimizer: Optimizer,\n scheduler: Optional[_LRScheduler],\n epochs_nb: int,\n batch_size: int,\n verbose: bool = False,\n):\n model.to(device)\n\n for epoch in range(epochs_nb):\n\n mean_loss_list = []\n\n batches_progress = tqdm(\n train_dataset.batches(batch_size, device),\n total=train_dataset.batches_nb(batch_size),\n )\n\n # TODO: only possible batch size is 1\n for i, target_trees in enumerate(batches_progress):\n model.train()\n optimizer.zero_grad()\n\n loss = model(\n target_trees[0],\n device,\n target_trees[0].tokens,\n target_trees[0].span_coords,\n )\n\n loss.backward()\n optimizer.step()\n\n mean_loss_list.append(loss.item())\n batches_progress.set_description(\n \"[e:{}][l:{:10.4f}]\".format(epoch + 1, loss.item())\n )\n\n if not scheduler is None:\n scheduler.step()\n\n tqdm.write(\"scoring train trees...\")\n train_metrics = score(\n model,\n train_dataset.trees[: int(0.10 * len(train_dataset.trees))],\n device,\n verbose,\n )\n tqdm.write(\"scoring validation trees...\")\n valid_metrics = score(model, valid_dataset.trees, device, verbose)\n\n tqdm.write(\"train exact accuracy : {:10.4f}\".format(train_metrics[0]))\n tqdm.write(\"train labeled precision : {:10.4f}\".format(train_metrics[1]))\n tqdm.write(\"train labeled recall : {:10.4f}\".format(train_metrics[2]))\n tqdm.write(\"train labeled f1 : {:10.4f}\".format(train_metrics[3]))\n\n tqdm.write(\"validation exact accuracy : {:10.4f}\".format(valid_metrics[0]))\n tqdm.write(\"validation labeled precision : {:10.4f}\".format(valid_metrics[1]))\n tqdm.write(\"validation labeled recall : {:10.4f}\".format(valid_metrics[2]))\n tqdm.write(\"validation labeled f1 : {:10.4f}\".format(valid_metrics[3]))\n\n tqdm.write(\n \"mean loss : {:10.4f}\".format(sum(mean_loss_list) / len(mean_loss_list))\n )\n\n return model\n\n\nif __name__ == \"__main__\":\n\n config = Config(\"./configs/default-train-config.json\")\n arg_parser = argparse.ArgumentParser(argparse.ArgumentDefaultsHelpFormatter)\n arg_parser.add_argument(\n \"-en\",\n \"--epochs-nb\",\n type=int,\n default=config[\"epochs_nb\"],\n help=\"Number of epochs\",\n )\n arg_parser.add_argument(\n \"-bz\",\n \"--batch-size\",\n type=int,\n default=config[\"batch_size\"],\n help=\"Size of batches\",\n )\n arg_parser.add_argument(\n \"-lr\",\n \"--learning-rate\",\n type=float,\n default=config[\"learning_rate\"],\n help=\"learning rate\",\n )\n arg_parser.add_argument(\n \"-tdur\",\n \"--train-datas-usage-ratio\",\n type=float,\n default=config[\"train_datas_usage_ratio\"],\n help=\"train datas usage ratio (between 0 and 1)\",\n )\n arg_parser.add_argument(\n \"-vdur\",\n \"--validation-datas-usage-ratio\",\n type=float,\n default=config[\"validation_datas_usage_ratio\"],\n help=\"validation datas usage ratio (between 0 and 1)\",\n )\n arg_parser.add_argument(\n \"-mp\",\n \"--model-path\",\n type=str,\n default=config[\"model_path\"],\n help=\"path where the model will be saved\",\n )\n arg_parser.add_argument(\n \"-cf\",\n \"--config-file\",\n type=str,\n default=None,\n help=\"Config file overriding default-train-config.json\",\n )\n args = arg_parser.parse_args()\n if args.config_file:\n config.load_from_file_(args.config_file)\n else:\n config.update_(vars(args))\n\n tokenizer = BertTokenizer.from_pretrained(\"bert-base-cased\")\n print(\"[info] loading datas...\")\n train_dataset, valid_dataset = Dataset.from_files(\n [\"./datas/train.tsv\", \"./datas/eval.tsv\"],\n [config[\"train_datas_usage_ratio\"], config[\"validation_datas_usage_ratio\"]],\n )\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n intent_weights, slot_weights = train_dataset.class_weights()\n intent_weights = torch.tensor(intent_weights).to(device)\n slot_weights = torch.tensor(slot_weights).to(device)\n\n model = TreeMaker()\n optimizer = torch.optim.Adam(model.parameters(), lr=config[\"learning_rate\"])\n\n train_(\n model,\n train_dataset,\n valid_dataset,\n device,\n optimizer,\n None, # scheduler,\n config[\"epochs_nb\"],\n config[\"batch_size\"],\n True,\n )\n\n torch.save(model.state_dict(), config[\"model_path\"])\n" ]
[ [ "torch.cuda.is_available", "torch.tensor" ] ]
lithium-ion/ImpedanceAnalyzer
[ "e8d8c77b4f9b88d82b9f7fc40f29270aaa1e681b" ]
[ "application/fitPhysics.py" ]
[ "\"\"\" Provides functions for fitting physics-based models \"\"\"\n\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom scipy.interpolate import interp1d\nfrom scipy.optimize import leastsq\n\n\ndef prepare_data(data):\n \"\"\" Prepares the experimental data for fitting\n\n Parameters\n ----------\n\n data : list of tuples\n experimental impedance spectrum given as list of\n (frequency, real impedance, imaginary impedance)\n\n Returns\n -------\n\n data_df : pd.DataFrame\n sorted DataFrame with f, real, imag, mag, and phase columns and\n \"\"\"\n\n exp_data = pd.DataFrame(data, columns=['f', 'real', 'imag'])\n exp_data['mag'] = exp_data.apply(magnitude, axis=1)\n exp_data['phase'] = exp_data.apply(phase, axis=1)\n\n # sort from high to low frequencies\n exp_data.sort_values(by='f', ascending=False, inplace=True)\n exp_data.index = range(len(exp_data))\n\n return exp_data\n\n\ndef interpolate_points(data, exp_freq):\n \"\"\" Interpolates experimental data to the simulated frequencies\n\n Parameters\n ----------\n\n data : pd.DataFrame\n\n \"\"\"\n\n # find the frequencies that fall within the experimental data and create\n # interpolated, to store interpolated experimental data for fitting\n min_f, max_f = min(data['f']), max(data['f'])\n freq_mask = [f for f in exp_freq if min_f <= f <= max_f]\n freq_mask = sorted(freq_mask, reverse=True)\n\n points_to_fit = pd.DataFrame(index=freq_mask, columns=['mag', 'ph'])\n\n # if the frequency isn't already within 1% of the simulation frequencies,\n # quadratically interpolate the nearest 4 points in the magnitude and phase\n for frequency in points_to_fit.index:\n exact = data[data['f'].between(.99*frequency, 1.01*frequency)]\n if not exact.empty:\n points_to_fit.loc[frequency, 'mag'] = np.asscalar(exact['mag'])\n points_to_fit.loc[frequency, 'ph'] = np.asscalar(exact['phase'])\n else:\n idx = np.argmin(np.abs(frequency - data['f']))\n\n x = data['f'].iloc[idx-2:idx+3]\n y_mag = data['mag'].iloc[idx-2:idx+3]\n y_phase = data['phase'].iloc[idx-2:idx+3]\n\n mag = interp1d(x, y_mag, kind='quadratic')\n phase = interp1d(x, y_phase, kind='quadratic')\n\n points_to_fit.loc[frequency, 'mag'] = mag(frequency)\n points_to_fit.loc[frequency, 'ph'] = phase(frequency)\n\n points_to_fit['real'] = points_to_fit.mag*(points_to_fit.ph.map(np.cos))\n points_to_fit['imag'] = points_to_fit.mag*(points_to_fit.ph.map(np.sin))\n\n return points_to_fit\n\n\ndef find_hf_crossover(data, points_to_fit):\n crossover = data[data['imag'] > 0]\n\n if crossover.index.tolist():\n index = crossover.index.tolist()[-1]\n\n x = data['imag'].loc[index-2:index+3]\n y = data['real'].loc[index-2:index+3]\n\n hf = interp1d(x, y, kind='quadratic')\n\n Zreal_hf = np.asscalar(hf(0))\n\n positive_Zimag = points_to_fit[points_to_fit['ph'] > 0]\n\n points_to_fit.drop(positive_Zimag.index, inplace=True)\n\n hf_dict = {'mag': Zreal_hf, 'ph': 0.0,\n 'real': Zreal_hf, 'imag': 0.0}\n\n hf_df = pd.DataFrame(hf_dict, index=[1e5],\n columns=points_to_fit.columns)\n\n points_to_fit = pd.concat([hf_df, points_to_fit])\n\n elif max(data['f']) < 1e5:\n # Cubically extrapolate five highest frequencies to find Z_hf\n x = data['real'].iloc[0:5]\n y = data['imag'].iloc[0:5]\n\n fit = np.polyfit(x, -y, 4)\n func = np.poly1d(fit)\n\n Zreal_hf = np.real(func.r[np.real(func.r) < min(x)])\n\n hf_dict = {'mag': Zreal_hf, 'ph': 0.0,\n 'real': Zreal_hf, 'imag': 0.0}\n\n hf_df = pd.DataFrame(hf_dict, index=[1e5],\n columns=points_to_fit.columns)\n\n points_to_fit = pd.concat([hf_df, points_to_fit])\n\n else:\n Zreal_hf = np.real(data[data['f'] == 1e5]['real'])\n\n return Zreal_hf, points_to_fit\n\n\ndef magnitude(x):\n return np.sqrt(x['real']**2 + x['imag']**2)\n\n\ndef phase(x):\n return np.arctan2(x['imag'], x['real'])\n\n\ndef fit_P2D_by_capacity(data_string, target_capacity):\n \"\"\" Fit physics-based model by matching the capacity and then sliding along\n real axes to determine contact resistance\n\n Parameters\n ----------\n\n data : list of tuples\n (frequency, real impedance, imaginary impedance) of the\n experimental data to be fit\n\n Returns\n -------\n\n fit_points : list of tuples\n (frequency, real impedance, imaginary impedance) of points\n used in the fitting of the physics-based model\n\n best_fit : list of tuples\n (frequency, real impedance, imaginary impedance) of\n the best fitting model\n\n full_results : pd.DataFrame\n DataFrame of top fits sorted by their residual\n\n\n \"\"\"\n\n # transform data from string to pd.DataFrame\n data = prepare_data(data_string)\n\n # read in all of the simulation results\n Z_csv = pd.read_csv('./application/static/data/38800-Z.csv', index_col=0)\n\n real = [a for a in Z_csv.columns if 'real' in a]\n real_df = Z_csv[real]\n real_df.columns = [float(a.split('_real')[0]) for a in real_df.columns]\n\n imag = [a for a in Z_csv.columns if 'imag' in a]\n imag_df = Z_csv[imag]\n imag_df.columns = [float(a.split('_imag')[0]) for a in imag_df.columns]\n\n Z = real_df + imag_df*1j\n\n # interpolate data to match simulated frequencies\n points_to_fit = interpolate_points(data, Z.columns)\n\n # find the high frequency real intercept\n # Zreal_hf, points_to_fit = find_hf_crossover(data, points_to_fit)\n\n Z_data_r = np.array(points_to_fit['real'].tolist())\n Z_data_i = 1j*np.array(points_to_fit['imag'].tolist())\n Z_data = Z_data_r + Z_data_i\n\n mask = [i for i, f in enumerate(Z.columns) if f in points_to_fit.index]\n\n results_array = np.ndarray(shape=(len(Z), 4))\n\n P = pd.read_csv('./application/static/data/model_runs.txt')\n\n ah_per_v = {'pos': 550*10**6, 'neg': 400*10**6} # mAh/m^3 - Nitta (2015)\n\n def scale_by_capacity(d, target_capacity, ah_per_v):\n \"\"\" returns the area (cm^2) for the parameter Series capacity\n to match the target capacity\n\n \"\"\"\n\n l_pos, l_neg = d[3], d[1]\n\n e_pos, e_neg = d[10], d[8]\n\n e_f_pos, e_f_neg = d[7], d[6]\n\n area_pos = target_capacity/(ah_per_v['pos']*l_pos*(1-e_pos-e_f_pos))\n area_neg = target_capacity/(ah_per_v['neg']*l_neg*(1-e_neg-e_f_neg))\n\n return max([area_pos, area_neg])\n\n area = np.ndarray((len(P), 1))\n for i, p in enumerate(P.values):\n area[i] = scale_by_capacity(p, target_capacity, ah_per_v)\n\n def contact_residual(contact_resistance, Z_model, Z_data):\n Zr = np.real(Z_model) + contact_resistance - np.real(Z_data)\n Zi = np.imag(Z_model) - np.imag(Z_data)\n\n return np.concatenate((Zr, Zi))\n\n avg_mag = points_to_fit['mag'].mean()\n for run, impedance in enumerate(Z.values[:, mask]):\n scaled = impedance/area[run]\n\n p_values = leastsq(contact_residual, 0, args=(scaled, Z_data))\n\n contact_resistance = p_values[0]\n shifted = scaled + contact_resistance\n\n real_squared = (np.real(Z_data) - np.real(shifted))**2\n imag_squared = (np.imag(Z_data) - np.imag(shifted))**2\n sum_of_squares = (np.sqrt(real_squared + imag_squared)).sum()\n\n avg_error = 100./len(shifted)*sum_of_squares/avg_mag\n\n results_array[run, 0] = run + 1 # run is 1-indexed\n results_array[run, 1] = area[run] # m^2\n results_array[run, 2] = avg_error # percentage\n results_array[run, 3] = contact_resistance # Ohms\n\n results = pd.DataFrame(results_array,\n columns=['run',\n 'area',\n 'residual',\n 'contact_resistance'])\n\n results.index = results['run']\n\n # remove contact resistances below 10% of high frequency real\n results = results[results['contact_resistance'] > -0.1*np.real(Z_data[0])]\n\n sorted_results = results.sort_values(['residual'])\n\n best_fit_idx = int(sorted_results['run'].iloc[0])\n best_fit_Z = Z.loc[best_fit_idx].iloc[mask]\n best_fit_cr = sorted_results['contact_resistance'].iloc[0]\n best_fit_area = sorted_results['area'].iloc[0]\n best_Z = best_fit_Z/best_fit_area + best_fit_cr\n\n fit_points = list(zip(points_to_fit.index,\n points_to_fit.real,\n points_to_fit.imag))\n\n best_fit = list(zip(best_Z.index,\n best_Z.map(np.real),\n best_Z.map(np.imag)))\n\n NUM_RESULTS = 50\n\n return fit_points, best_fit, sorted_results.iloc[0:NUM_RESULTS]\n" ]
[ [ "numpy.concatenate", "numpy.poly1d", "scipy.interpolate.interp1d", "pandas.DataFrame", "numpy.real", "scipy.optimize.leastsq", "numpy.asscalar", "numpy.arctan2", "numpy.sqrt", "pandas.concat", "numpy.polyfit", "numpy.imag", "pandas.read_csv", "numpy.abs" ] ]
xiaoweiChen/Tensorflow-2.x-Alexnet
[ "d9161ba6764143d3d8e84bee2268b0ac8ad95355" ]
[ "model.py" ]
[ "\nimport os, pathlib, PIL\nfrom tqdm import tqdm\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras import Model\n\nclass AlexNet(Model):\n def __init__(self, data_shape=(224, 224, 3), num_classes=1000):\n super(AlexNet, self).__init__()\n self.data_augmentation = keras.Sequential(\n [\n layers.experimental.preprocessing.RandomFlip(\n \"horizontal\", \n input_shape=data_shape),\n layers.experimental.preprocessing.RandomRotation(0.1),\n layers.experimental.preprocessing.RandomZoom(0.1),\n ]\n )\n \n self.rescaling = layers.experimental.preprocessing.Rescaling(1./255)\n \n # layer 1\n self.conv1 = layers.Conv2D(\n filters=96,\n kernel_size=(11, 11),\n strides=4,\n padding=\"valid\",\n activation='relu',\n input_shape= data_shape,\n kernel_initializer='GlorotNormal')\n self.pool1 = layers.MaxPooling2D(\n pool_size=(3, 3),\n strides=2,\n padding=\"valid\")\n self.norm1 = tf.keras.layers.BatchNormalization()\n \n # layer 2\n self.conv2 = layers.Conv2D(\n filters=256,\n kernel_size=(5, 5),\n strides=1,\n padding=\"same\",\n activation='relu',\n kernel_initializer='GlorotNormal')\n self.pool2 = layers.MaxPooling2D(\n pool_size=(3, 3),\n strides=2,\n padding=\"valid\")\n self.norm2 = tf.keras.layers.BatchNormalization()\n \n # layer 3\n self.conv3 = layers.Conv2D(\n filters=384,\n kernel_size=(3, 3),\n strides=1,\n padding=\"same\",\n activation='relu',\n kernel_initializer='GlorotNormal')\n \n # layer 4\n self.conv4 = layers.Conv2D(\n filters=384,\n kernel_size=(3, 3),\n strides=1,\n padding=\"same\",\n activation='relu',\n kernel_initializer='GlorotNormal')\n \n # layer 5\n self.conv5 = layers.Conv2D(\n filters=256,\n kernel_size=(3, 3),\n strides=1,\n padding=\"same\",\n activation='relu',\n kernel_initializer='GlorotNormal')\n self.pool5 = layers.MaxPooling2D(\n pool_size=(3, 3),\n strides=2,\n padding=\"valid\")\n self.norm5 = tf.keras.layers.BatchNormalization()\n \n # layer 6\n self.flatten6 = tf.keras.layers.Flatten()\n self.d6 = tf.keras.layers.Dense(\n units=4096,\n activation='relu')\n self.drop6 = tf.keras.layers.Dropout(rate=0.5)\n \n # layer 7\n self.d7 = tf.keras.layers.Dense(\n units=4096,\n activation='relu')\n self.drop7 = tf.keras.layers.Dropout(rate=0.5)\n \n # layer 8\n self.d8 = tf.keras.layers.Dense(\n units=num_classes,\n activation='softmax')\n \n self.build((None,) + data_shape)\n\n def call(self, x):\n x = self.data_augmentation(x)\n x = self.rescaling(x)\n x = self.conv1(x)\n x = self.pool1(x)\n x = self.norm1(x)\n x = self.conv2(x)\n x = self.pool2(x)\n x = self.norm2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n x = self.pool5(x)\n x = self.norm5(x)\n x = self.flatten6(x)\n x = self.d6(x)\n x = self.drop6(x)\n x = self.d7(x)\n x = self.drop7(x)\n x = self.d8(x)\n return x\n\n\nclass AlexNetWork():\n def __init__(self, args):\n # dataset\n data_dir = pathlib.Path(args.dataset_path)\n self.image_height = args.image_height\n self.image_width = args.image_width\n data_shape = (args.image_height, args.image_width, 3)\n batch_size = args.batchsize\n \n pretrain_model_path_or_dir = args.pre_train_model_path_dir\n \n # create model\n self.model = AlexNet(\n data_shape = data_shape,\n num_classes=args.classes)\n \n if os.path.exists(pretrain_model_path_or_dir):\n if args.use_whole_network_model:\n dir = pretrain_model_path_or_dir\n self.model = keras.models.load_model(dir)\n print(\"Whole network load from {} dir\".format(dir))\n else:\n path = pretrain_model_path_or_dir\n self.model.load_weights(path)\n print(\"Network model load from {}\".format(path))\n \n # Optimization\n self.learning_rate = args.lr\n self.epochs = args.epochs\n \n if args.opt_type == 'Adam':\n self.optimizer = tf.keras.optimizers.Adam(\n learning_rate=args.lr)\n elif args.opt_type == 'SGD':\n self.optimizer = tf.keras.optimizers.SGD(\n learning_rate=args.lr,\n momentum=args.momentum)\n elif args.opt_type == 'Adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(\n learning_rate=args.lr)\n elif args.opt_type == 'Adamax':\n self.optimizer = tf.keras.optimizers.Adamax(\n learning_rate=args.lr)\n elif args.opt_type == 'Ftrl':\n self.optimizer = tf.keras.optimizers.Ftrl(\n learning_rate=args.lr)\n elif args.opt_type == 'Nadam':\n self.optimizer = tf.keras.optimizers.Nadam(\n learning_rate=args.lr)\n else:\n self.optimizer = tf.keras.optimizers.RMSprop(\n learning_rate=args.lr)\n \n self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n \n # get the data set\n image_count = 0\n image_count += len(list(data_dir.glob('*/*.jpg')))\n image_count += len(list(data_dir.glob('*/*.JPEG')))\n print(\"image number:\", image_count)\n \n # train dataset\n self.train_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"training\",\n seed=123,\n image_size=(args.image_height, args.image_width),\n batch_size=batch_size)\n self.class_names = self.train_ds.class_names\n self.train_ds = self.train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)\n \n # valid/test dataset\n self.test_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"validation\",\n seed=123,\n image_size=(args.image_height, args.image_width),\n batch_size=batch_size)\n self.test_ds = self.test_ds.cache().prefetch(buffer_size=tf.data.AUTOTUNE)\n \n self.train_loss = tf.keras.metrics.Mean(name='train_loss')\n self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n\n self.test_loss = tf.keras.metrics.Mean(name='valid_loss')\n self.test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='vaild_accuracy')\n \n @tf.function\n def train_step(self, images, labels):\n with tf.GradientTape() as tape:\n predictions = self.model(images)\n loss = self.loss_object(labels, predictions)\n gradients = tape.gradient(loss, self.model.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n \n self.train_loss(loss)\n self.train_accuracy(labels, predictions)\n # [end train_step]\n \n @tf.function\n def test_step(self, images, labels):\n predictions = self.model(images)\n t_loss = self.loss_object(labels, predictions)\n\n self.test_loss(t_loss)\n self.test_accuracy(labels, predictions)\n # [end test_step]\n \n def train(self):\n # Model summary\n self.model.summary()\n \n for epoch in range(self.epochs):\n \n self.train_loss.reset_states()\n self.train_accuracy.reset_states()\n self.test_loss.reset_states()\n self.test_accuracy.reset_states()\n \n try:\n with tqdm(self.train_ds, ncols=80) as t:\n for images, labels in t:\n self.train_step(images, labels)\n template = '[Train\\t Epoch {}] Loss: {:.4f}, Accuracy: {:.4f}'\n template = template.format(epoch+1, self.train_loss.result(), self.train_accuracy.result()*100)\n t.set_description(desc=template)\n except KeyboardInterrupt:\n t.close()\n raise\n\n try:\n with tqdm(self.test_ds, ncols=80) as t:\n for test_images, test_labels in t:\n self.test_step(test_images, test_labels)\n template = '[Test\\t Epoch {}] Loss: {:.4f}, Accuracy: {:.4f}'\n template = template.format(epoch+1, self.test_loss.result(), self.test_accuracy.result()*100)\n t.set_description(desc=template)\n except KeyboardInterrupt:\n t.close()\n raise\n # [end train]\n \n def saveModel(self, path_or_dir, mode='save_weight'):\n if mode == 'save_weight':\n path = path_or_dir\n self.model.save_weights(path)\n print(\"Network model save to {}\".format(path))\n elif mode == 'whole_network':\n dir = path_or_dir\n self.model.save(dir)\n print(\"Whole network save to {} dir\".format(dir))\n # [end saveModel]\n \n def test(self, args):\n if not os.path.exists(args.test_image):\n return\n \n image_path = args.test_image\n \n img = keras.preprocessing.image.load_img(\n image_path, target_size=(\n args.image_height,\n args.image_width)\n )\n img_array = keras.preprocessing.image.img_to_array(img)\n img_array = tf.expand_dims(img_array, 0) # Create a batch\n predictions = self.model.predict(img_array)\n score = tf.nn.softmax(predictions[0])\n \n import numpy as np\n print(\"{} most likely belongs to {} with a {:.2f} percent confidence.\".format(image_path, self.class_names[np.argmax(score)], 100 * np.max(score)))\n # [end test]\n \n \n \n \n " ]
[ [ "tensorflow.keras.optimizers.SGD", "tensorflow.keras.preprocessing.image.load_img", "tensorflow.keras.layers.experimental.preprocessing.RandomFlip", "tensorflow.keras.layers.experimental.preprocessing.RandomRotation", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.experimental.preprocessing.Rescaling", "tensorflow.nn.softmax", "tensorflow.keras.metrics.SparseCategoricalAccuracy", "tensorflow.keras.layers.BatchNormalization", "numpy.max", "tensorflow.GradientTape", "tensorflow.keras.optimizers.Nadam", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.losses.SparseCategoricalCrossentropy", "numpy.argmax", "tensorflow.keras.layers.experimental.preprocessing.RandomZoom", "tensorflow.keras.optimizers.Adam", "tensorflow.expand_dims", "tensorflow.keras.layers.Dropout", "tensorflow.keras.models.load_model", "tensorflow.keras.preprocessing.image.img_to_array", "tensorflow.keras.optimizers.RMSprop", "tensorflow.keras.optimizers.Adadelta", "tensorflow.keras.metrics.Mean", "tensorflow.keras.optimizers.Adamax", "tensorflow.keras.layers.Flatten", "tensorflow.keras.preprocessing.image_dataset_from_directory", "tensorflow.keras.optimizers.Ftrl", "tensorflow.keras.layers.MaxPooling2D" ] ]
clinfo/DeepKF
[ "ee4f1be28e5f3bfa46bb47dbdc4d5f678eed36c1", "ee4f1be28e5f3bfa46bb47dbdc4d5f678eed36c1" ]
[ "adw_test/script/plot_vec.py", "adw_test/dmm_input.py" ]
[ "import numpy as np\nimport joblib\nimport json\nimport sys\n\nif len(sys.argv)>2 and sys.argv[2]==\"all\":\n\timport matplotlib\n\tmatplotlib.use('Agg')\n\tfrom matplotlib import pylab as plt\nelse:\n\tfrom matplotlib import pylab as plt\n\n\n\n\nfilename_info=\"data/pack_info.json\"\nfilename_result=\"sim.jbl\"\npid_key=\"pid_list_test\"\nout_dir=\"plot_test\"\n\nif len(sys.argv)>1:\n\tfp = open(sys.argv[1], 'r')\n\tconfig=json.load(fp)\n\tif \"plot_path\" in config:\n\t\tout_dir=config[\"plot_path\"]\n\t\tfilename_result=config[\"simulation_path\"]+\"/field.jbl\"\n\n\nprint(\"[LOAD] \",filename_result)\nobj=joblib.load(filename_result)\ndata_z=obj[\"z\"]\ndata_gz=-obj[\"gz\"][0]\nprint(\"shape z:\",data_z.shape)\nprint(\"shape grad. z\",data_gz.shape)\n#\n#fp = open(filename_info, 'r')\n#data_info = json.load(fp)\n#d=data_info[\"attr_emit_list\"].index(\"206010\")\n\nX=data_z[:,0]\nY=data_z[:,1]\nU=data_gz[:,0]\nV=data_gz[:,1]\nR=np.sqrt(U**2+V**2)\nplt.axes([0.025, 0.025, 0.95, 0.95])\nplt.quiver(X, Y, U, V, R, alpha=.5)\nplt.quiver(X, Y, U, V, edgecolor='k', facecolor='None', linewidth=.5)\nr=3.0#20\nplt.xlim(-r, r)\n#plt.xticks(())\nplt.ylim(-r,r)\n#plt.yticks(())\n\nout_filename=out_dir+\"/vec.png\"\nprint(\"[SAVE] :\",out_filename)\nplt.savefig(out_filename)\n\nplt.show()\nplt.clf()\n", "# ==============================================================================\n# Load data\n# Copyright 2017 Kyoto Univ. Okuno lab. . All Rights Reserved.\n# ==============================================================================\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nclass dotdict(dict):\n\t\"\"\"dot.notation access to dictionary attributes\"\"\"\n\t__getattr__ = dict.get\n\t__setattr__ = dict.__setitem__\n\t__delattr__ = dict.__delitem__\n\n\ndef load_data(config,with_shuffle=True,with_train_test=True,test_flag=False,output_dict_flag=True):\n\ttime_major=config[\"time_major\"]\n\tif not test_flag:\n\t\tx = np.load(config[\"data_train_npy\"])\n\t\tif config[\"mask_train_npy\"] is None:\n\t\t\tm = np.ones_like(x)\n\t\telse:\n\t\t\tm = np.load(config[\"mask_train_npy\"])\n\t\tif config[\"steps_npy\"] is None:\n\t\t\ts=[len(x[i]) for i in range(len(x))]\n\t\t\ts=np.array(s)\n\t\telse:\n\t\t\ts = np.load(config[\"steps_npy\"])\n\telse:\n\t\tx = np.load(config[\"data_test_npy\"])\n\t\tif config[\"mask_test_npy\"] is None:\n\t\t\tm = np.ones_like(x)\n\t\telse:\n\t\t\tm = np.load(config[\"mask_test_npy\"])\n\t\tif config[\"steps_test_npy\"] is None:\n\t\t\ts=[len(x[i]) for i in range(len(x))]\n\t\t\ts=np.array(s)\n\t\telse:\n\t\t\ts = np.load(config[\"steps_test_npy\"])\n\tif not time_major:\n\t\tx=x.transpose((0,2,1))\n\t\tm=m.transpose((0,2,1))\n\t# train / validatation/ test\n\tdata_num=x.shape[0]\n\tdata_idx=list(range(data_num))\n\tif with_shuffle:\n\t\tnp.random.shuffle(data_idx)\n\t# split train/test\n\tsep=[0.0,1.0]\n\tif with_train_test:\n\t\tsep=config[\"train_test_ratio\"]\n\tprev_idx=0\n\tsum_r=0.0\n\tsep_idx=[]\n\tfor r in sep:\n\t\tsum_r+=r\n\t\tidx=int(data_num*sum_r)\n\t\tsep_idx.append([prev_idx,idx])\n\t\tprev_idx=idx\n\tprint(\"#training data:\",sep_idx[0][1]-sep_idx[0][0])\n\tprint(\"#valid data:\",sep_idx[1][1]-sep_idx[1][0])\n\t\n\ttr_idx=data_idx[sep_idx[0][0]:sep_idx[0][1]]\n\tte_idx=data_idx[sep_idx[1][0]:sep_idx[1][1]]\n\ttr_x=x[tr_idx]\n\ttr_m=m[tr_idx]\n\ttr_s=s[tr_idx]\n\tte_x=x[te_idx]\n\tte_m=m[te_idx]\n\tte_s=s[te_idx]\n\t#tr_x=tr_x[0:100]\n\t#tr_m=tr_m[0:100]\n\t#te_x=te_x[0:100]\n\t#te_m=te_m[0:100]\n\t#return tr_x,tr_m,te_x,te_m\n\ttrain_data=dotdict({})\n\ttrain_data.x=tr_x\n\ttrain_data.m=tr_m\n\ttrain_data.s=tr_s\n\ttrain_data.num=tr_x.shape[0]\n\ttrain_data.n_steps=tr_x.shape[1]\n\ttrain_data.dim=tr_x.shape[2]\n\tvalid_data=dotdict({})\n\tvalid_data.x=te_x\n\tvalid_data.m=te_m\n\tvalid_data.s=te_s\n\tvalid_data.num=te_x.shape[0]\n\tvalid_data.n_steps=te_x.shape[1]\n\tvalid_data.dim=tr_x.shape[2]\n\treturn train_data,valid_data\n\n" ]
[ [ "matplotlib.pylab.axes", "matplotlib.use", "matplotlib.pylab.savefig", "matplotlib.pylab.show", "matplotlib.pylab.quiver", "matplotlib.pylab.ylim", "numpy.sqrt", "matplotlib.pylab.clf", "matplotlib.pylab.xlim" ], [ "numpy.array", "numpy.ones_like", "numpy.load", "numpy.random.shuffle" ] ]
eendebakpt/qupulse
[ "5b5b48de10084d413e10cfd8f6e9f7536c69dd70" ]
[ "tests/hardware/tabor_simulator_based_tests.py" ]
[ "import unittest\nimport subprocess\nimport time\nimport platform\nimport os\n\nimport pyvisa.resources\nimport tabor_control\nimport numpy as np\n\nfrom qupulse.hardware.awgs.tabor import TaborAWGRepresentation, TaborChannelPair\nfrom qupulse._program.tabor import TaborSegment, PlottableProgram, TaborException, TableDescription, TableEntry\nfrom typing import List, Tuple, Optional, Any\n\n\nclass TaborSimulatorManager:\n def __init__(self,\n instrument_type: type,\n address_kwarg_name: str,\n instrument_kwargs: dict,\n simulator_executable='WX2184C.exe',\n simulator_path=os.path.realpath(os.path.dirname(__file__))):\n self.simulator_executable = simulator_executable\n self.simulator_path = simulator_path\n\n self.started_simulator = False\n\n self.address_kwarg_name = address_kwarg_name\n self.instrument_type = instrument_type\n self.instrument_kwargs = instrument_kwargs\n\n self.simulator_process = None\n self.instrument = None\n\n def kill_running_simulators(self):\n command = ['Taskkill', '/IM {simulator_executable}'.format(simulator_executable=self.simulator_executable)]\n try:\n subprocess.run(command,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n except FileNotFoundError:\n pass\n\n @property\n def simulator_full_path(self):\n return os.path.join(self.simulator_path, self.simulator_executable)\n\n def start_simulator(self, try_connecting_to_existing_simulator=True, max_wait_time=30) -> pyvisa.resources.MessageBasedResource:\n try:\n pyvisa.ResourceManager()\n except ValueError:\n raise unittest.SkipTest(\"visalib not available\")\n\n if try_connecting_to_existing_simulator:\n try:\n return tabor_control.open_session('127.0.0.1')\n except pyvisa.VisaIOError:\n pass\n\n if not os.path.isfile(self.simulator_full_path):\n raise RuntimeError('Cannot locate simulator executable.')\n\n self.kill_running_simulators()\n\n self.simulator_process = subprocess.Popen([self.simulator_full_path, '/switch-on', '/gui-in-tray'])\n\n start = time.time()\n while time.time() - start <= max_wait_time:\n time.sleep(0.1)\n try:\n return tabor_control.open_session('127.0.0.1')\n except pyvisa.VisaIOError:\n pass\n if self.simulator_process.returncode:\n raise RuntimeError('Simulator exited with return code {}'.format(self.simulator_process.returncode))\n raise RuntimeError('Could not connect to simulator')\n\n def connect(self):\n self.instrument = self.instrument_type(**{**self.instrument_kwargs, self.address_kwarg_name: '127.0.0.1'})\n\n if self.instrument.main_instrument.visa_inst is None:\n raise RuntimeError('Could not connect to simulator')\n return self.instrument\n\n def disconnect(self):\n for device in self.instrument.all_devices:\n device.close()\n self.instrument = None\n\n def __del__(self):\n if self.started_simulator and self.simulator_process:\n self.simulator_process.kill()\n\n\n@unittest.skipIf(platform.system() != 'Windows', \"Simulator currently only available on Windows :(\")\nclass TaborSimulatorBasedTest(unittest.TestCase):\n simulator_manager = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.instrument = None\n\n @classmethod\n def setUpClass(cls):\n cls.simulator_manager = TaborSimulatorManager(TaborAWGRepresentation, 'instr_addr',\n dict(reset=True, paranoia_level=2),\n 'WX2184C.exe', os.path.dirname(__file__))\n try:\n cls.simulator_manager.start_simulator()\n except RuntimeError as err:\n raise unittest.SkipTest(*err.args) from err\n\n @classmethod\n def tearDownClass(cls):\n del cls.simulator_manager\n\n def setUp(self):\n self.instrument = self.simulator_manager.connect()\n\n def tearDown(self):\n self.instrument.reset()\n self.simulator_manager.disconnect()\n\n @staticmethod\n def to_new_sequencer_tables(sequencer_tables: List[List[Tuple[int, int, int]]]\n ) -> List[List[Tuple[TableDescription, Optional[Any]]]]:\n return [[(TableDescription(*entry), None) for entry in sequencer_table]\n for sequencer_table in sequencer_tables]\n\n @staticmethod\n def to_new_advanced_sequencer_table(advanced_sequencer_table: List[Tuple[int, int, int]]) -> List[TableDescription]:\n return [TableDescription(*entry) for entry in advanced_sequencer_table]\n\n\nclass TaborAWGRepresentationTests(TaborSimulatorBasedTest):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def test_sample_rate(self):\n for ch in (1, 2, 3, 4):\n self.assertIsInstance(self.instrument.sample_rate(ch), int)\n\n with self.assertRaises(TaborException):\n self.instrument.sample_rate(0)\n\n self.instrument.send_cmd(':INST:SEL 1')\n self.instrument.send_cmd(':FREQ:RAST 2.3e9')\n\n self.assertEqual(2300000000, self.instrument.sample_rate(1))\n\n def test_amplitude(self):\n for ch in (1, 2, 3, 4):\n self.assertIsInstance(self.instrument.amplitude(ch), float)\n\n with self.assertRaises(TaborException):\n self.instrument.amplitude(0)\n\n self.instrument.send_cmd(':INST:SEL 1; :OUTP:COUP DC')\n self.instrument.send_cmd(':VOLT 0.7')\n\n self.assertAlmostEqual(.7, self.instrument.amplitude(1))\n\n def test_select_marker(self):\n with self.assertRaises(TaborException):\n self.instrument.select_marker(6)\n\n self.instrument.select_marker(2)\n selected = self.instrument.send_query(':SOUR:MARK:SEL?')\n self.assertEqual(selected, '2')\n\n self.instrument.select_marker(1)\n selected = self.instrument.send_query(':SOUR:MARK:SEL?')\n self.assertEqual(selected, '1')\n\n def test_select_channel(self):\n with self.assertRaises(TaborException):\n self.instrument.select_channel(6)\n\n self.instrument.select_channel(1)\n self.assertEqual(self.instrument.send_query(':INST:SEL?'), '1')\n\n self.instrument.select_channel(4)\n self.assertEqual(self.instrument.send_query(':INST:SEL?'), '4')\n\n\nclass TaborMemoryReadTests(TaborSimulatorBasedTest):\n def setUp(self):\n super().setUp()\n\n ramp_up = np.linspace(0, 2**14-1, num=192, dtype=np.uint16)\n ramp_down = ramp_up[::-1]\n zero = np.ones(192, dtype=np.uint16) * 2**13\n sine = ((np.sin(np.linspace(0, 2*np.pi, 192+64)) + 1) / 2 * (2**14 - 1)).astype(np.uint16)\n\n self.segments = [TaborSegment.from_sampled(ramp_up, ramp_up, None, None),\n TaborSegment.from_sampled(ramp_down, zero, None, None),\n TaborSegment.from_sampled(sine, sine, None, None)]\n\n self.zero_segment = TaborSegment.from_sampled(zero, zero, None, None)\n\n # program 1\n self.sequence_tables_raw = [[(10, 0, 0), (10, 1, 0), (10, 0, 0), (10, 1, 0)],\n [(1, 0, 0), (1, 1, 0), (1, 0, 0), (1, 1, 0)]]\n self.advanced_sequence_table = [(1, 1, 0), (1, 2, 0)]\n\n self.sequence_tables = self.to_new_sequencer_tables(self.sequence_tables_raw)\n self.advanced_sequence_table = self.to_new_advanced_sequencer_table(self.advanced_sequence_table)\n\n self.channel_pair = TaborChannelPair(self.instrument, (1, 2), 'tabor_unit_test')\n\n def arm_program(self, sequencer_tables, advanced_sequencer_table, mode, waveform_to_segment_index):\n class DummyProgram:\n @staticmethod\n def get_sequencer_tables():\n return sequencer_tables\n\n @staticmethod\n def get_advanced_sequencer_table():\n return advanced_sequencer_table\n\n @staticmethod\n def update_volatile_parameters(parameters):\n modifications = {1: TableEntry(repetition_count=5, element_number=2, jump_flag=0),\n (0, 1): TableDescription(repetition_count=50, element_id=1, jump_flag=0)}\n return modifications\n\n markers = (None, None)\n channels = (1, 2)\n\n waveform_mode = mode\n\n self.channel_pair._known_programs['dummy_program'] = (waveform_to_segment_index, DummyProgram)\n self.channel_pair.change_armed_program('dummy_program')\n\n def test_read_waveforms(self):\n self.channel_pair._amend_segments(self.segments)\n\n waveforms = self.channel_pair.read_waveforms()\n\n segments = [TaborSegment.from_binary_segment(waveform)\n for waveform in waveforms]\n\n expected = [self.zero_segment, *self.segments]\n\n for ex, r in zip(expected, segments):\n ex1, ex2 = ex.data_a, ex.data_b\n r1, r2 = r.data_a, r.data_b\n np.testing.assert_equal(ex1, r1)\n np.testing.assert_equal(ex2, r2)\n\n self.assertEqual(expected, segments)\n\n def test_read_sequence_tables(self):\n self.channel_pair._amend_segments(self.segments)\n self.arm_program(self.sequence_tables, self.advanced_sequence_table, None, np.asarray([1, 2]))\n\n sequence_tables = self.channel_pair.read_sequence_tables()\n\n actual_sequence_tables = [self.channel_pair._idle_sequence_table] + [[(rep, index+2, jump)\n for rep, index, jump in table]\n for table in self.sequence_tables_raw]\n\n expected = list(tuple(np.asarray(d)\n for d in zip(*table))\n for table in actual_sequence_tables)\n\n np.testing.assert_equal(sequence_tables, expected)\n\n def test_read_advanced_sequencer_table(self):\n self.channel_pair._amend_segments(self.segments)\n self.arm_program(self.sequence_tables, self.advanced_sequence_table, None, np.asarray([1, 2]))\n\n actual_advanced_table = [(1, 1, 1)] + [(rep, idx+1, jmp) for rep, idx, jmp in self.advanced_sequence_table]\n\n expected = list(np.asarray(d)\n for d in zip(*actual_advanced_table))\n\n advanced_table = self.channel_pair.read_advanced_sequencer_table()\n np.testing.assert_equal(advanced_table, expected)\n\n def test_set_volatile_parameter(self):\n self.channel_pair._amend_segments(self.segments)\n self.arm_program(self.sequence_tables, self.advanced_sequence_table, None, np.asarray([1, 2]))\n\n para = {'a': 5}\n actual_sequence_tables = [self.channel_pair._idle_sequence_table] + [[(rep, index + 2, jump)\n for rep, index, jump in table]\n for table in self.sequence_tables_raw]\n\n actual_advanced_table = [(1, 1, 1)] + [(rep, idx + 1, jmp) for rep, idx, jmp in self.advanced_sequence_table]\n\n self.channel_pair.set_volatile_parameters('dummy_program', parameters=para)\n\n actual_sequence_tables[1][1] = (50, 3, 0)\n actual_advanced_table[2] = (5, 3, 0)\n\n sequence_table = self.channel_pair.read_sequence_tables()\n expected = list(tuple(np.asarray(d)\n for d in zip(*table))\n for table in actual_sequence_tables)\n np.testing.assert_equal(sequence_table, expected)\n\n advanced_table = self.channel_pair.read_advanced_sequencer_table()\n expected = list(np.asarray(d)\n for d in zip(*actual_advanced_table))\n np.testing.assert_equal(advanced_table, expected)\n" ]
[ [ "numpy.linspace", "numpy.ones", "numpy.asarray", "numpy.testing.assert_equal" ] ]
vilhub/DeBERTa
[ "87580930689ec9f75ef8dbebba367953ed3dfe63", "87580930689ec9f75ef8dbebba367953ed3dfe63" ]
[ "DeBERTa/deberta/ops.py", "DeBERTa/apps/tasks/ner_task.py" ]
[ "# Copyright (c) Microsoft, Inc. 2020\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n#\n# Author: penhe@microsoft.com\n# Date: 01/15/2020\n#\n\nimport math\nfrom packaging import version\nimport torch\nfrom torch.nn import LayerNorm\nfrom ..utils.jit_tracing import traceable\n\nif version.Version(torch.__version__) >= version.Version('1.0.0'):\n from torch import _softmax_backward_data as _softmax_backward_data\nelse:\n from torch import softmax_backward_data as _softmax_backward_data\n\n__all__ = ['StableDropout', 'MaskedLayerNorm', 'XSoftmax', 'ACT2FN', 'LayerNorm']\n\n@traceable\nclass XSoftmax(torch.autograd.Function):\n \"\"\" Masked Softmax which is optimized for saving memory\n\n Args:\n \n input (:obj:`torch.tensor`): The input tensor that will apply softmax.\n mask (:obj:`torch.IntTensor`): The mask matrix where 0 indicate that element will be ignored in the softmax caculation.\n dim (int): The dimenssion that will apply softmax.\n \n Example::\n\n import torch\n from DeBERTa.deberta import XSoftmax\n # Make a tensor\n x = torch.randn([4,20,100])\n # Create a mask\n mask = (x>0).int()\n y = XSoftmax.apply(x, mask, dim=-1)\n \n \"\"\"\n\n @staticmethod\n def forward(self, input, mask, dim):\n \"\"\"\n \"\"\"\n\n self.dim = dim\n if version.Version(torch.__version__) >= version.Version('1.2.0a'):\n rmask = ~(mask.bool())\n else:\n rmask = (1-mask).byte() # This line is not supported by Onnx tracing.\n\n output = input.masked_fill(rmask, float('-inf'))\n output = torch.softmax(output, self.dim)\n output.masked_fill_(rmask, 0)\n self.save_for_backward(output)\n return output\n\n @staticmethod\n def backward(self, grad_output):\n \"\"\"\n \"\"\"\n\n output, = self.saved_tensors\n inputGrad = _softmax_backward_data(grad_output, output, self.dim, output)\n return inputGrad, None, None\n\n @staticmethod\n def symbolic(g, self, mask, dim):\n import torch.onnx.symbolic_helper as sym_help\n from torch.onnx.symbolic_opset9 import masked_fill, softmax\n\n mask_cast_value = g.op(\"Cast\", mask, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n r_mask = g.op(\"Cast\", g.op(\"Sub\", g.op(\"Constant\", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value), to_i=sym_help.cast_pytorch_to_onnx['Byte'])\n output = masked_fill(g, self, r_mask, g.op(\"Constant\", value_t=torch.tensor(float('-inf'))))\n output = softmax(g, output, dim)\n return masked_fill(g, output, r_mask, g.op(\"Constant\", value_t=torch.tensor(0, dtype=torch.uint8)))\n\nclass DropoutContext(object):\n def __init__(self):\n self.dropout = 0\n self.mask = None\n self.scale = 1\n self.reuse_mask = True\n\ndef get_mask(input, local_context):\n if not isinstance(local_context, DropoutContext):\n dropout = local_context\n mask = None\n else:\n dropout = local_context.dropout\n dropout *= local_context.scale\n mask = local_context.mask if local_context.reuse_mask else None\n\n if dropout>0 and mask is None:\n if version.Version(torch.__version__) >= version.Version('1.2.0a'):\n mask=(1-torch.empty_like(input).bernoulli_(1-dropout)).bool()\n else:\n mask=(1-torch.empty_like(input).bernoulli_(1-dropout)).byte()\n \n if isinstance(local_context, DropoutContext):\n if local_context.mask is None:\n local_context.mask = mask\n\n return mask, dropout\n\n@traceable\nclass XDropout(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, local_ctx):\n mask, dropout = get_mask(input, local_ctx)\n ctx.scale=1.0/(1-dropout)\n if dropout>0:\n ctx.save_for_backward(mask)\n return input.masked_fill(mask, 0)*ctx.scale\n else:\n return input\n\n @staticmethod\n def backward(ctx, grad_output):\n if ctx.scale > 1:\n mask, = ctx.saved_tensors\n return grad_output.masked_fill(mask, 0)*ctx.scale, None\n else:\n return grad_output, None\n\nclass StableDropout(torch.nn.Module):\n \"\"\" Optimized dropout module for stabilizing the training\n\n Args:\n\n drop_prob (float): the dropout probabilities\n\n \"\"\"\n\n def __init__(self, drop_prob):\n super().__init__()\n self.drop_prob = drop_prob\n self.count = 0\n self.context_stack = None\n\n def forward(self, x):\n \"\"\" Call the module\n\n Args:\n \n x (:obj:`torch.tensor`): The input tensor to apply dropout\n\n\n \"\"\"\n if self.training and self.drop_prob>0:\n return XDropout.apply(x, self.get_context())\n return x\n\n def clear_context(self):\n self.count = 0\n self.context_stack = None\n\n def init_context(self, reuse_mask=True, scale = 1):\n if self.context_stack is None:\n self.context_stack = []\n self.count = 0\n for c in self.context_stack:\n c.reuse_mask = reuse_mask\n c.scale = scale\n\n def get_context(self):\n if self.context_stack is not None:\n if self.count >= len(self.context_stack):\n self.context_stack.append(DropoutContext())\n ctx = self.context_stack[self.count]\n ctx.dropout = self.drop_prob\n self.count += 1\n return ctx\n else:\n return self.drop_prob\n\ndef MaskedLayerNorm(layerNorm, input, mask = None):\n \"\"\" Masked LayerNorm which will apply mask over the output of LayerNorm to avoid inaccurate updatings to the LayerNorm module.\n \n Args:\n layernorm (:obj:`~DeBERTa.deberta.LayerNorm`): LayerNorm module or function\n input (:obj:`torch.tensor`): The input tensor\n mask (:obj:`torch.IntTensor`): The mask to applied on the output of LayerNorm where `0` indicate the output of that element will be ignored, i.e. set to `0`\n\n Example::\n\n # Create a tensor b x n x d\n x = torch.randn([1,10,100])\n m = torch.tensor([[1,1,1,0,0,0,0,0,0,0]], dtype=torch.int)\n LayerNorm = DeBERTa.deberta.LayerNorm(100)\n y = MaskedLayerNorm(LayerNorm, x, m)\n\n \"\"\"\n output = layerNorm(input).to(input)\n if mask is None:\n return output\n if mask.dim()!=input.dim():\n if mask.dim()==4:\n mask=mask.squeeze(1).squeeze(1)\n mask = mask.unsqueeze(2)\n mask = mask.to(output.dtype)\n return output*mask\n\ndef gelu(x):\n \"\"\"Implementation of the gelu activation function.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\ndef swish(x):\n return x * torch.sigmoid(x)\n\ndef linear_act(x):\n return x\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish, \"tanh\": torch.tanh, \"linear\": linear_act, 'sigmoid': torch.sigmoid}\n\n\n", "\nfrom collections import OrderedDict,defaultdict,Sequence,Counter\nimport math\nimport numpy as np\nimport os\nimport pdb\nimport random\nimport torch\nimport ujson as json\nfrom ...utils import xtqdm as tqdm\nfrom ...utils import get_logger\n\nfrom ..models import NERModel\nfrom ...data import ExampleInstance, ExampleSet, DynamicDataset\nfrom ...data.example import *\nfrom ...data.example import _truncate_segments\nfrom .task import EvalData, Task\nfrom .task_registry import register_task\n\nfrom seqeval import metrics as seq_metrics\n\n__all__ = ['NERTask']\n\nlogger = get_logger()\n\n@register_task(name=\"NER\", desc=\"Named-entity recognition task\")\nclass NERTask(Task):\n def __init__(self, data_dir, tokenizer, args, **kwargs):\n super().__init__(tokenizer, args, **kwargs)\n self.data_dir = data_dir\n\n def train_data(self, max_seq_len=512, dataset_size=None, epochs=1, mask_gen=None, **kwargs):\n train = self.load_data(os.path.join(self.data_dir, 'train.txt'), max_seq_len=max_seq_len)\n examples = ExampleSet(train)\n if dataset_size is None:\n dataset_size = len(examples)*epochs\n return DynamicDataset(examples, feature_fn = self.get_feature_fn(max_seq_len=max_seq_len, mask_gen=mask_gen), \\\ndataset_size = dataset_size, shuffle=True, **kwargs)\n\n def eval_data(self, max_seq_len=512, dataset_size=None, **kwargs):\n ds = [\n self._data('dev', 'valid.txt', 'dev', max_seq_len=max_seq_len),\n self._data('test', 'test.txt', 'test', max_seq_len=max_seq_len)\n ]\n \n for d in ds:\n if dataset_size is None:\n _size = len(d.data)\n d.data = DynamicDataset(d.data, feature_fn = self.get_feature_fn(max_seq_len=max_seq_len), dataset_size = _size, **kwargs)\n return ds\n\n def test_data(self,max_seq_len=512, dataset_size = None, **kwargs):\n \"\"\"See base class.\"\"\"\n ds = [\n self._data('test', 'test.txt', 'test', max_seq_len=max_seq_len)\n ]\n \n for d in ds:\n if dataset_size is None:\n _size = len(d.data)\n d.data = DynamicDataset(d.data, feature_fn = self.get_feature_fn(max_seq_len=max_seq_len), dataset_size = _size, **kwargs)\n return ds\n\n def _data(self, name, path, type_name = 'dev', ignore_metric=False, max_examples=None, shuffle=False, max_seq_len=512):\n input_src = os.path.join(self.data_dir, path)\n assert os.path.exists(input_src), f\"{input_src} doesn't exists\"\n data = self.load_data(input_src, max_seq_len=max_seq_len, max_examples=max_examples, shuffle=shuffle)\n examples = ExampleSet(data)\n predict_fn = self.get_predict_fn(examples)\n metrics_fn = self.get_metrics_fn()\n return EvalData(name, examples,\n metrics_fn = metrics_fn, predict_fn = predict_fn, ignore_metric=ignore_metric, critial_metrics=['f1'])\n\n def get_metrics_fn(self):\n \"\"\"Calcuate metrics based on prediction results\"\"\"\n def metrics_fn(logits, labels):\n preds = np.argmax(logits, axis=-1)\n label_names = self.get_labels()\n y_true = []\n y_pred = []\n for pred,label in zip(preds, labels):\n y_true.append([label_names[l] for l in label if l>=0])\n y_pred.append([label_names[p] for p,l in zip(pred, label) if l>=0])\n return OrderedDict(\n accuracy = seq_metrics.accuracy_score(y_true, y_pred),\n f1 = seq_metrics.f1_score(y_true, y_pred),\n precision = seq_metrics.precision_score(y_true, y_pred),\n recall = seq_metrics.recall_score(y_true, y_pred)\n )\n return metrics_fn\n\n def get_predict_fn(self, examples):\n \"\"\"Calcuate metrics based on prediction results\"\"\"\n def predict_fn(logits, output_dir, name, prefix):\n output=os.path.join(output_dir, 'submit-{}-{}.tsv'.format(name, prefix))\n preds = np.argmax(logits, axis=-1)\n labels = self.get_labels()\n with open(output, 'w', encoding='utf-8') as fs:\n fs.write('index\\tpredictions\\n')\n for i,(e,p) in enumerate(zip(examples,preds)):\n words = ''.join(e.sentence).split(' ')\n tokens = e.segments[0]\n bw = 0\n for w,t in zip(words,tokens):\n fs.write(f'{w} {labels[p[bw]]}\\n')\n bw += len(t)\n fs.write('\\n')\n\n return predict_fn\n\n def get_feature_fn(self, max_seq_len = 512, mask_gen = None):\n def _example_to_feature(example, rng=None, ext_params=None, **kwargs):\n return self.example_to_feature(self.tokenizer, example, max_seq_len = max_seq_len, \\\n rng = rng, mask_generator = mask_gen, ext_params = ext_params, **kwargs)\n return _example_to_feature\n\n def get_model_class_fn(self):\n def partial_class(*wargs, **kwargs):\n return NERModel.load_model(*wargs, **kwargs)\n return partial_class\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"O\", \"B-MISC\", \"I-MISC\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\"]\n\n def load_data(self, path, max_seq_len=512, max_examples=None, shuffle=False):\n docs = self.extract_docs(path)\n examples=[]\n for doc in docs:\n merged_words = []\n merged_tokens = []\n merged_labels = []\n size = 0\n for sent in doc:\n words = [t[0] if i==0 else (' ' + t[0]) for i,t in enumerate(sent)]\n labels = [t[1] for t in sent]\n tokens = [self.tokenizer.tokenize(w) for w in words]\n l = sum(len(t) for t in tokens)\n if size+l > max_seq_len-2:\n examples.append(ExampleInstance(segments=[merged_tokens], label=merged_labels, sentence=merged_words))\n size = 0\n merged_words = []\n merged_tokens = []\n merged_labels = []\n size += l\n merged_words.extend(words)\n merged_tokens.extend(tokens)\n merged_labels.extend(labels)\n if size>0:\n examples.append(ExampleInstance(segments=[merged_tokens], label=merged_labels, sentence=merged_words))\n\n def get_stats(l):\n return f'Max={max(l)}, min={min(l)}, avg={np.mean(l)}'\n ctx_token_size = [sum(len(w) for w in e.segments[0]) for e in examples]\n logger.info(f'Statistics: {get_stats(ctx_token_size)}, long={len([t for t in ctx_token_size if t > 500])}/{len(ctx_token_size)}')\n\n return examples\n\n def example_to_feature(self, tokenizer, example, max_seq_len=512, rng=None, mask_generator = None, ext_params=None, label_type='int', **kwargs):\n if not rng:\n rng = random\n max_num_tokens = max_seq_len - 2\n features = OrderedDict()\n tokens = ['[CLS]']\n target_labels = [-1]\n type_ids = [0]\n\n for i,w in enumerate(example.segments[0]):\n tokens.extend(w)\n type_ids.extend([0]*len(w))\n if example.label is not None:\n target_labels.append(self.label2id(example.label[i]))\n target_labels.extend([-1]*(len(w)-1))\n tokens.append('[SEP]')\n if example.label is not None:\n target_labels.extend([-1]*(max_seq_len-len(target_labels)))\n type_ids.append(0)\n token_ids = tokenizer.convert_tokens_to_ids(tokens)\n pos_ids = list(range(len(token_ids)))\n input_mask = [1]*len(token_ids)\n features['input_ids'] = token_ids\n features['type_ids'] = type_ids\n features['position_ids'] = pos_ids\n features['input_mask'] = input_mask\n padding_size = max(0, max_seq_len - len(token_ids))\n for f in features:\n features[f].extend([0]*padding_size)\n\n for f in features:\n features[f] = torch.tensor(features[f], dtype=torch.int)\n if example.label is not None: # and example.label[0]>=0 and example.label[1]>=0:\n features['labels'] = torch.tensor(target_labels, dtype=torch.int)\n return features\n\n def extract_docs(self, path):\n docs = []\n with open(path, 'r', encoding='utf-8') as fs:\n doc = []\n sent = []\n for line in fs:\n if line.startswith('-DOCSTART- '):\n if len(sent) > 0:\n doc.append(sent)\n sent = []\n if len(doc) > 0:\n docs.append(doc)\n doc = []\n elif line.strip() == '':\n if len(sent) > 0:\n doc.append(sent)\n sent = []\n else:\n tabs = line.split(' ')\n sent.append([tabs[0], tabs[-1].strip()])\n if len(sent) > 0:\n doc.append(sent)\n sent = []\n if len(doc) > 0:\n docs.append(doc)\n doc = []\n logger.info(f'Loaded {len(docs)} docs, {sum([len(d) for d in docs])} sentences.')\n return docs\n\ndef test_ner_load_data():\n tokenizer = GPT2Tokenizer()\n data='/mount/biglm/bert/NER/data/train.txt'\n task = NERTask(os.path.dirname(data), tokenizer)\n #docs = task.extract_docs(data)\n examples = task.load_data(data)\n feature = task.example_to_feature(tokenizer, examples[0], max_seq_len=512)\n pdb.set_trace()\n" ]
[ [ "torch.sigmoid", "torch.onnx.symbolic_opset9.softmax", "torch.softmax", "torch.tensor", "torch.softmax_backward_data", "torch.empty_like" ], [ "numpy.mean", "numpy.argmax", "torch.tensor" ] ]
michael-p-sachen/ProHMR
[ "0167d05a9a45939a217d02b4ef8fd67977c15f82" ]
[ "prohmr/utils/geometry.py" ]
[ "from typing import Optional\nimport torch\nfrom torch.nn import functional as F\n\ndef aa_to_rotmat(theta: torch.Tensor):\n \"\"\"\n Convert axis-angle representation to rotation matrix.\n Works by first converting it to a quaternion.\n Args:\n theta (torch.Tensor): Tensor of shape (B, 3) containing axis-angle representations.\n Returns:\n torch.Tensor: Corresponding rotation matrices with shape (B, 3, 3).\n \"\"\"\n norm = torch.norm(theta + 1e-8, p = 2, dim = 1)\n angle = torch.unsqueeze(norm, -1)\n normalized = torch.div(theta, angle)\n angle = angle * 0.5\n v_cos = torch.cos(angle)\n v_sin = torch.sin(angle)\n quat = torch.cat([v_cos, v_sin * normalized], dim = 1)\n return quat_to_rotmat(quat)\n\ndef quat_to_rotmat(quat: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert quaternion representation to rotation matrix.\n Args:\n quat (torch.Tensor) of shape (B, 4); 4 <===> (w, x, y, z).\n Returns:\n torch.Tensor: Corresponding rotation matrices with shape (B, 3, 3).\n \"\"\"\n norm_quat = quat\n norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)\n w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]\n\n B = quat.size(0)\n\n w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)\n wx, wy, wz = w*x, w*y, w*z\n xy, xz, yz = x*y, x*z, y*z\n\n rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,\n 2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,\n 2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)\n return rotMat\n\n\ndef rot6d_to_rotmat(x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert 6D rotation representation to 3x3 rotation matrix.\n Based on Zhou et al., \"On the Continuity of Rotation Representations in Neural Networks\", CVPR 2019\n Args:\n x (torch.Tensor): (B,6) Batch of 6-D rotation representations.\n Returns:\n torch.Tensor: Batch of corresponding rotation matrices with shape (B,3,3).\n \"\"\"\n x = x.reshape(-1,2,3).permute(0, 2, 1).contiguous()\n a1 = x[:, :, 0]\n a2 = x[:, :, 1]\n b1 = F.normalize(a1)\n b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)\n b3 = torch.cross(b1, b2)\n return torch.stack((b1, b2, b3), dim=-1)\n\ndef perspective_projection(points: torch.Tensor,\n translation: torch.Tensor,\n focal_length: torch.Tensor,\n camera_center: Optional[torch.Tensor] = None,\n rotation: Optional[torch.Tensor] = None) -> torch.Tensor:\n \"\"\"\n Computes the perspective projection of a set of 3D points.\n Args:\n points (torch.Tensor): Tensor of shape (B, N, 3) containing the input 3D points.\n translation (torch.Tensor): Tensor of shape (B, 3) containing the 3D camera translation.\n focal_length (torch.Tensor): Tensor of shape (B, 2) containing the focal length in pixels.\n camera_center (torch.Tensor): Tensor of shape (B, 2) containing the camera center in pixels.\n rotation (torch.Tensor): Tensor of shape (B, 3, 3) containing the camera rotation.\n Returns:\n torch.Tensor: Tensor of shape (B, N, 2) containing the projection of the input points.\n \"\"\"\n batch_size = points.shape[0]\n if rotation is None:\n rotation = torch.eye(3, device=points.device, dtype=points.dtype).unsqueeze(0).expand(batch_size, -1, -1)\n if camera_center is None:\n camera_center = torch.zeros(batch_size, 2, device=points.device, dtype=points.dtype)\n # Populate intrinsic camera matrix K.\n K = torch.zeros([batch_size, 3, 3], device=points.device, dtype=points.dtype)\n K[:,0,0] = focal_length[:,0]\n K[:,1,1] = focal_length[:,1]\n K[:,2,2] = 1.\n K[:,:-1, -1] = camera_center\n\n # Transform points\n points = torch.einsum('bij,bkj->bki', rotation, points)\n points = points + translation.unsqueeze(1)\n\n # Apply perspective distortion\n projected_points = points / points[:,:,-1].unsqueeze(-1)\n\n # Apply camera intrinsics\n projected_points = torch.einsum('bij,bkj->bki', K, projected_points)\n\n return projected_points[:, :, :-1]" ]
[ [ "torch.zeros", "torch.cos", "torch.cat", "torch.nn.functional.normalize", "torch.stack", "torch.sin", "torch.einsum", "torch.norm", "torch.unsqueeze", "torch.eye", "torch.cross", "torch.div" ] ]
mehdi-mirzapour/eigenthemes
[ "7252c21831b6856e9047a26bdb09bb0787b957d0" ]
[ "jrk/jrk.py" ]
[ "import torch\nimport torch.nn as nn\nfrom jrk.encoder import Encoder\nimport numpy\nnumpy.set_printoptions(threshold=numpy.nan)\n\nclass JRK(nn.Module):\n def __init__(self, config):\n super(JRK, self).__init__()\n\n self.encoder = Encoder(config={\n 'type': config['type'],\n 'lstm_hiddim': config['lstm_hiddim'],\n 'n_filters': config['n_filters'],\n 'filter_sizes': config['filter_sizes'],\n 'word_embs': config['word_embs'],\n 'pos_embdim': config['pos_embdim'],\n 'dropout': config['dropout'],\n 'en_dim': config['en_dim'],\n 'n_rels': config['n_rels']})\n\n self.ent_pair2id = config['ent_pair2id']\n self.ent_pair_rel_scores = nn.Embedding(len(self.ent_pair2id), config['n_rels'])\n self.ent_pair_rel_scores.weight.data.fill_(-1)\n self.ent_pair_rel_scores.weight.requires_grad = False\n\n def init_with_kb(self, triples):\n kb = self.ent_pair_rel_scores.weight.data\n kb.fill_(-10)\n self.init_kb = torch.zeros(kb.shape)\n\n for t in triples:\n ent_pair = self.ent_pair2id[(t[0], t[2])]\n if t[1] != 0:\n kb[ent_pair, t[1]] = 5\n self.init_kb[ent_pair, t[1]] == 1\n else:\n kb[ent_pair, 0] = 5\n self.init_kb[ent_pair, 0] = 1\n self.init_kb = self.init_kb.cuda()\n\n def forward(self, input):\n p_not_na, p_rel_not_na, reprs = self.encoder(input)\n ent_pair_rel = torch.sigmoid(self.ent_pair_rel_scores(input['ent_pair']))\n probs = (ent_pair_rel[:, 1:] * p_rel_not_na).sum(dim=1)\n return probs, p_not_na, p_rel_not_na, reprs, ent_pair_rel\n\n def compute_loss(self, input, regularity='prob'):\n probs, p_not_na, reg_coef, ent_pair_rel = input['probs'], input['p_not_na'], input['reg_coef'], input['ent_pair_rel']\n reg = torch.zeros(1).cuda()\n\n # compute kl\n if regularity == 'kl':\n p_not_na_total = p_not_na.sum()\n p_na_total = (1 - p_not_na).sum()\n p_not_na_total /= p_not_na_total + p_na_total\n p_na_total /= p_not_na_total + p_na_total\n\n prior = torch.Tensor([0.7, 0.3]).cuda()\n kl = p_na_total * torch.log(p_na_total / prior[0] + 1e-10) + p_not_na_total * torch.log(p_not_na_total / prior[1] + 1e-10)\n reg += reg_coef * kl\n elif regularity == 'prob':\n reg += reg_coef * p_not_na.sum() / p_not_na.shape[0]\n\n if self.ent_pair_rel_scores.weight.requires_grad == True:\n mask = ent_pair_rel.le(0.9).float()\n reg_kb = -(ent_pair_rel * mask * nn.functional.embedding(input['ent_pair'], self.init_kb)).sum() / ent_pair_rel.shape[0]\n reg += 0.1 * reg_kb\n\n # compute\n loss = (-torch.log(probs + 1e-10)).mean() + reg\n return loss, reg\n" ]
[ [ "torch.zeros", "numpy.set_printoptions", "torch.nn.functional.embedding", "torch.log", "torch.Tensor" ] ]
ctralie/BoneTissue
[ "bafd9767a8345d6a1b1912ec049c523a9bb8ae8c" ]
[ "PolyMesh.py" ]
[ "\"\"\"\nProgrammer: Chris Tralie\nPurpose: Some simple routines for dealing with triangle meshes\n\"\"\"\nimport numpy as np\n\ndef loadOffFile(filename):\n \"\"\"\n Load a .off (or .coff) triangle mesh file\n Parameters\n ----------\n filename: string\n Path to the .off file\n Returns\n -------\n VPos: ndarray(N, 3, dtype=np.float64)\n Positions of the vertices\n VColors: ndarray(N, 3, dtype=np.float64)\n Colors of the vertices (if .coff file)\n ITris: ndarray(M, 3, dtype=np.int32)\n The indices of the triangles\n \"\"\"\n fin = open(filename, 'r')\n nVertices = 0\n nFaces = 0\n lineCount = 0\n face = 0\n vertex = 0\n divideColor = False\n VPos = np.zeros((0, 3))\n VColors = np.zeros((0, 3))\n ITris = np.zeros((0, 3))\n for line in fin:\n lineCount = lineCount+1\n fields = line.split() #Splits whitespace by default\n if len(fields) == 0: #Blank line\n continue\n if fields[0][0] in ['#', '\\0', ' '] or len(fields[0]) == 0:\n continue\n #Check section\n if nVertices == 0:\n if fields[0] == \"OFF\" or fields[0] == \"COFF\":\n if len(fields) > 2:\n fields[1:4] = [int(field) for field in fields]\n [nVertices, nFaces, nEdges] = fields[1:4]\n print(\"nVertices = %i, nFaces = %i\"%(nVertices, nFaces))\n #Pre-allocate vertex arrays\n VPos = np.zeros((nVertices, 3))\n VColors = np.zeros((nVertices, 3))\n ITris = np.zeros((nFaces, 3))\n if fields[0] == \"COFF\":\n divideColor = True\n else:\n fields[0:3] = [int(field) for field in fields]\n [nVertices, nFaces, nEdges] = fields[0:3]\n VPos = np.zeros((nVertices, 3))\n VColors = np.zeros((nVertices, 3))\n ITris = np.zeros((nFaces, 3))\n elif vertex < nVertices:\n fields = [float(i) for i in fields]\n P = [fields[0],fields[1], fields[2]]\n color = np.array([0.5, 0.5, 0.5]) #Gray by default\n if len(fields) >= 6:\n #There is color information\n if divideColor:\n color = [float(c)/255.0 for c in fields[3:6]]\n else:\n color = [float(c) for c in fields[3:6]]\n VPos[vertex, :] = P\n VColors[vertex, :] = color\n vertex = vertex+1\n elif face < nFaces:\n #Assume the vertices are specified in CCW order\n fields = [int(i) for i in fields]\n ITris[face, :] = fields[1:fields[0]+1]\n face = face+1\n fin.close()\n VPos = np.array(VPos, np.float64)\n VColors = np.array(VColors, np.float64)\n ITris = np.array(ITris, np.int32)\n return (VPos, VColors, ITris)\n\ndef saveOffFile(filename, VPos, VColors, ITris):\n \"\"\"\n Save a .off file\n Parameters\n ----------\n filename: string\n Path to which to save off file\n VPos: ndarray(N, 3)\n Positions of vertices\n VColors: ndarray(N, 3)\n RGB values of colors, or empty if all gray\n ITris: ndarray(M, 3)\n Indices into vertices of each triangle\n \"\"\"\n nV = VPos.shape[0]\n nF = ITris.shape[0]\n fout = open(filename, \"w\")\n if VColors.size == 0:\n fout.write(\"OFF\\n%i %i %i\\n\"%(nV, nF, 0))\n else:\n fout.write(\"COFF\\n%i %i %i\\n\"%(nV, nF, 0))\n for i in range(nV):\n fout.write(\"%g %g %g\"%tuple(VPos[i, :]))\n if VColors.size > 0:\n fout.write(\" %g %g %g\"%tuple(VColors[i, :]))\n fout.write(\"\\n\")\n for i in range(nF):\n fout.write(\"3 %i %i %i\\n\"%tuple(ITris[i, :]))\n fout.close()\n\ndef randomlySamplePoints(VPos, ITris, NPoints, colPoints = False):\n \"\"\"\n Randomly sample points by area on a triangle mesh. This function is\n extremely fast by using broadcasting/numpy operations in lieu of loops\n\n Parameters\n ----------\n VPos : ndarray (N, 3)\n Array of points in 3D\n ITris : ndarray (M, 3)\n Array of triangles connecting points, pointing to vertex indices\n NPoints : int\n Number of points to sample\n colPoints : boolean (default False)\n Whether the points are along the columns or the rows\n \n Returns\n -------\n (Ps : NDArray (NPoints, 3) array of sampled points, \n Ns : Ndarray (NPoints, 3) of normals at those points )\n \"\"\"\n ###Step 1: Compute cross product of all face triangles and use to compute\n #areas and normals (very similar to code used to compute vertex normals)\n\n #Vectors spanning two triangle edges\n P0 = VPos[ITris[:, 0], :]\n P1 = VPos[ITris[:, 1], :]\n P2 = VPos[ITris[:, 2], :]\n V1 = P1 - P0\n V2 = P2 - P0\n FNormals = np.cross(V1, V2)\n FAreas = np.sqrt(np.sum(FNormals**2, 1)).flatten()\n\n #Get rid of zero area faces and update points\n ITris = ITris[FAreas > 0, :]\n FNormals = FNormals[FAreas > 0, :]\n FAreas = FAreas[FAreas > 0]\n P0 = VPos[ITris[:, 0], :]\n P1 = VPos[ITris[:, 1], :]\n P2 = VPos[ITris[:, 2], :]\n\n #Compute normals\n NTris = ITris.shape[0]\n FNormals = FNormals/FAreas[:, None]\n FAreas = 0.5*FAreas\n FNormals = FNormals\n VNormals = np.zeros_like(VPos)\n VAreas = np.zeros(VPos.shape[0])\n for k in range(3):\n VNormals[ITris[:, k], :] += FAreas[:, None]*FNormals\n VAreas[ITris[:, k]] += FAreas\n #Normalize normals\n VAreas[VAreas == 0] = 1\n VNormals = VNormals / VAreas[:, None]\n\n ###Step 2: Randomly sample points based on areas\n FAreas = FAreas/np.sum(FAreas)\n AreasC = np.cumsum(FAreas)\n samples = np.sort(np.random.rand(NPoints))\n #Figure out how many samples there are for each face\n FSamples = np.zeros(NTris, dtype=int)\n fidx = 0\n for s in samples:\n while s > AreasC[fidx]:\n fidx += 1\n FSamples[fidx] += 1\n #Now initialize an array that stores the triangle sample indices\n tidx = np.zeros(NPoints, dtype=np.int64)\n idx = 0\n for i in range(len(FSamples)):\n tidx[idx:idx+FSamples[i]] = i\n idx += FSamples[i]\n N = np.zeros((NPoints, 3)) #Allocate space for normals\n idx = 0\n\n #Vector used to determine if points need to be flipped across parallelogram\n V3 = P2 - P1\n V3 = V3/np.sqrt(np.sum(V3**2, 1))[:, None] #Normalize\n\n #Randomly sample points on each face\n #Generate random points uniformly in parallelogram\n u = np.random.rand(NPoints, 1)\n v = np.random.rand(NPoints, 1)\n Ps = u*V1[tidx, :] + P0[tidx, :]\n Ps += v*V2[tidx, :]\n #Flip over points which are on the other side of the triangle\n dP = Ps - P1[tidx, :]\n proj = np.sum(dP*V3[tidx, :], 1)\n dPPar = V3[tidx, :]*proj[:, None] #Parallel project onto edge\n dPPerp = dP - dPPar\n Qs = Ps - dPPerp\n dP0QSqr = np.sum((Qs - P0[tidx, :])**2, 1)\n dP0PSqr = np.sum((Ps - P0[tidx, :])**2, 1)\n idxreg = np.arange(NPoints, dtype=np.int64)\n idxflip = idxreg[dP0QSqr < dP0PSqr]\n u[idxflip, :] = 1 - u[idxflip, :]\n v[idxflip, :] = 1 - v[idxflip, :]\n Ps[idxflip, :] = P0[tidx[idxflip], :] + u[idxflip, :]*V1[tidx[idxflip], :] + v[idxflip, :]*V2[tidx[idxflip], :]\n\n #Step 3: Compute normals of sampled points by barycentric interpolation\n Ns = u*VNormals[ITris[tidx, 1], :]\n Ns += v*VNormals[ITris[tidx, 2], :]\n Ns += (1-u-v)*VNormals[ITris[tidx, 0], :]\n return (Ps, Ns)" ]
[ [ "numpy.zeros_like", "numpy.array", "numpy.random.rand", "numpy.zeros", "numpy.sum", "numpy.arange", "numpy.cumsum", "numpy.cross" ] ]
vladdders/attention-ocr
[ "c29be8608ff58a341fdf921b30aa2cf5cc4f73ac" ]
[ "aocr/model/model.py" ]
[ "\"\"\"Visual Attention Based OCR Model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport time\nimport os\nimport math\nimport logging\nimport sys\n\nimport distance\nimport numpy as np\nimport tensorflow as tf\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nfrom .cnn import CNN\nfrom .seq2seq_model import Seq2SeqModel\nfrom ..util.data_gen import DataGen\nfrom ..util.visualizations import visualize_attention\n\n\nclass Model(object):\n def __init__(self,\n phase,\n visualize,\n output_dir,\n batch_size,\n initial_learning_rate,\n steps_per_checkpoint,\n model_dir,\n target_embedding_size,\n attn_num_hidden,\n attn_num_layers,\n clip_gradients,\n max_gradient_norm,\n session,\n load_model,\n gpu_id,\n use_gru,\n use_distance=True,\n max_image_width=160,\n max_image_height=60,\n max_prediction_length=8,\n channels=1,\n reg_val=0):\n\n self.use_distance = use_distance\n\n # We need resized width, not the actual width\n max_resized_width = 1. * max_image_width / max_image_height * DataGen.IMAGE_HEIGHT\n\n self.max_original_width = max_image_width\n self.max_width = int(math.ceil(max_resized_width))\n\n self.encoder_size = int(math.ceil(1. * self.max_width / 4))\n self.decoder_size = max_prediction_length + 2\n self.buckets = [(self.encoder_size, self.decoder_size)]\n\n if gpu_id >= 0:\n device_id = '/gpu:' + str(gpu_id)\n else:\n device_id = '/cpu:0'\n self.device_id = device_id\n\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n if phase == 'test':\n batch_size = 1\n\n logging.info('phase: %s', phase)\n logging.info('model_dir: %s', model_dir)\n logging.info('load_model: %s', load_model)\n logging.info('output_dir: %s', output_dir)\n logging.info('steps_per_checkpoint: %d', steps_per_checkpoint)\n logging.info('batch_size: %d', batch_size)\n logging.info('learning_rate: %f', initial_learning_rate)\n logging.info('reg_val: %d', reg_val)\n logging.info('max_gradient_norm: %f', max_gradient_norm)\n logging.info('clip_gradients: %s', clip_gradients)\n logging.info('max_image_width %f', max_image_width)\n logging.info('max_prediction_length %f', max_prediction_length)\n logging.info('channels: %d', channels)\n logging.info('target_embedding_size: %f', target_embedding_size)\n logging.info('attn_num_hidden: %d', attn_num_hidden)\n logging.info('attn_num_layers: %d', attn_num_layers)\n logging.info('visualize: %s', visualize)\n\n if use_gru:\n logging.info('using GRU in the decoder.')\n\n self.reg_val = reg_val\n self.sess = session\n self.steps_per_checkpoint = steps_per_checkpoint\n self.model_dir = model_dir\n self.output_dir = output_dir\n self.batch_size = batch_size\n self.global_step = tf.Variable(0, trainable=False)\n self.phase = phase\n self.visualize = visualize\n self.learning_rate = initial_learning_rate\n self.clip_gradients = clip_gradients\n self.channels = channels\n\n if phase == 'train':\n self.forward_only = False\n else:\n self.forward_only = True\n\n with tf.device(device_id):\n\n self.height = tf.constant(DataGen.IMAGE_HEIGHT, dtype=tf.int32)\n self.height_float = tf.constant(DataGen.IMAGE_HEIGHT, dtype=tf.float32)\n\n self.img_pl = tf.placeholder(tf.string, name='input_image_as_bytes')\n self.img_data = tf.cond(\n tf.less(tf.rank(self.img_pl), 1),\n lambda: tf.expand_dims(self.img_pl, 0),\n lambda: self.img_pl\n )\n self.img_data = tf.map_fn(self._prepare_image, self.img_data, dtype=tf.float32)\n num_images = tf.shape(self.img_data)[0]\n\n # TODO: create a mask depending on the image/batch size\n self.encoder_masks = []\n for i in xrange(self.encoder_size + 1):\n self.encoder_masks.append(\n tf.tile([[1.]], [num_images, 1])\n )\n\n self.decoder_inputs = []\n self.target_weights = []\n for i in xrange(self.decoder_size + 1):\n self.decoder_inputs.append(\n tf.tile([1], [num_images])\n )\n if i < self.decoder_size:\n self.target_weights.append(tf.tile([1.], [num_images]))\n else:\n self.target_weights.append(tf.tile([0.], [num_images]))\n\n cnn_model = CNN(self.img_data, not self.forward_only)\n self.conv_output = cnn_model.tf_output()\n self.perm_conv_output = tf.transpose(self.conv_output, perm=[1, 0, 2])\n self.attention_decoder_model = Seq2SeqModel(\n encoder_masks=self.encoder_masks,\n encoder_inputs_tensor=self.perm_conv_output,\n decoder_inputs=self.decoder_inputs,\n target_weights=self.target_weights,\n target_vocab_size=len(DataGen.CHARMAP),\n buckets=self.buckets,\n target_embedding_size=target_embedding_size,\n attn_num_layers=attn_num_layers,\n attn_num_hidden=attn_num_hidden,\n forward_only=self.forward_only,\n use_gru=use_gru)\n\n table = tf.contrib.lookup.MutableHashTable(\n key_dtype=tf.int64,\n value_dtype=tf.string,\n default_value=\"\",\n checkpoint=True,\n )\n\n insert = table.insert(\n tf.constant(list(range(len(DataGen.CHARMAP))), dtype=tf.int64),\n tf.constant(DataGen.CHARMAP),\n )\n\n with tf.control_dependencies([insert]):\n num_feed = []\n prb_feed = []\n\n for line in xrange(len(self.attention_decoder_model.output)):\n guess = tf.argmax(self.attention_decoder_model.output[line], axis=1)\n proba = tf.reduce_max(\n tf.nn.softmax(self.attention_decoder_model.output[line]), axis=1)\n num_feed.append(guess)\n prb_feed.append(proba)\n\n # Join the predictions into a single output string.\n trans_output = tf.transpose(num_feed)\n trans_output = tf.map_fn(\n lambda m: tf.foldr(\n lambda a, x: tf.cond(\n tf.equal(x, DataGen.EOS_ID),\n lambda: '',\n lambda: table.lookup(x) + a # pylint: disable=undefined-variable\n ),\n m,\n initializer=''\n ),\n trans_output,\n dtype=tf.string\n )\n\n # Calculate the total probability of the output string.\n trans_outprb = tf.transpose(prb_feed)\n trans_outprb = tf.gather(trans_outprb, tf.range(tf.size(trans_output)))\n trans_outprb = tf.map_fn(\n lambda m: tf.foldr(\n lambda a, x: tf.multiply(tf.cast(x, tf.float32), a),\n m,\n initializer=tf.cast(1, tf.float32)\n ),\n trans_outprb,\n dtype=tf.float32\n )\n\n self.prediction = tf.cond(\n tf.equal(tf.shape(trans_output)[0], 1),\n lambda: trans_output[0],\n lambda: trans_output,\n )\n self.probability = tf.cond(\n tf.equal(tf.shape(trans_outprb)[0], 1),\n lambda: trans_outprb[0],\n lambda: trans_outprb,\n )\n\n self.prediction = tf.identity(self.prediction, name='prediction')\n self.probability = tf.identity(self.probability, name='probability')\n\n if not self.forward_only: # train\n self.updates = []\n self.summaries_by_bucket = []\n\n params = tf.trainable_variables()\n opt = tf.train.AdadeltaOptimizer(learning_rate=initial_learning_rate)\n loss_op = self.attention_decoder_model.loss\n\n if self.reg_val > 0:\n reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n logging.info('Adding %s regularization losses', len(reg_losses))\n logging.debug('REGULARIZATION_LOSSES: %s', reg_losses)\n loss_op = self.reg_val * tf.reduce_sum(reg_losses) + loss_op\n\n gradients, params = list(zip(*opt.compute_gradients(loss_op, params)))\n if self.clip_gradients:\n gradients, _ = tf.clip_by_global_norm(gradients, max_gradient_norm)\n\n # Summaries for loss, variables, gradients, gradient norms and total gradient norm.\n summaries = [\n tf.summary.scalar(\"loss\", loss_op),\n tf.summary.scalar(\"total_gradient_norm\", tf.global_norm(gradients))\n ]\n all_summaries = tf.summary.merge(summaries)\n self.summaries_by_bucket.append(all_summaries)\n\n # update op - apply gradients\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n self.updates.append(\n opt.apply_gradients(\n list(zip(gradients, params)),\n global_step=self.global_step\n )\n )\n\n self.saver_all = tf.train.Saver(tf.all_variables())\n self.checkpoint_path = os.path.join(self.model_dir, \"model.ckpt\")\n\n ckpt = tf.train.get_checkpoint_state(model_dir)\n if ckpt and load_model:\n # pylint: disable=no-member\n logging.info(\"Reading model parameters from %s\", ckpt.model_checkpoint_path)\n self.saver_all.restore(self.sess, ckpt.model_checkpoint_path)\n else:\n logging.info(\"Created model with fresh parameters.\")\n self.sess.run(tf.initialize_all_variables())\n\n def predict(self, image_file_data):\n input_feed = {}\n input_feed[self.img_pl.name] = image_file_data\n\n output_feed = [self.prediction, self.probability]\n outputs = self.sess.run(output_feed, input_feed)\n\n text = outputs[0]\n probability = outputs[1]\n if sys.version_info >= (3,):\n text = text.decode('iso-8859-1')\n\n return (text, probability)\n\n def test(self, data_path):\n current_step = 0\n num_correct = 0.0\n num_total = 0.0\n\n s_gen = DataGen(data_path, self.buckets, epochs=1, max_width=self.max_original_width)\n for batch in s_gen.gen(1):\n current_step += 1\n # Get a batch (one image) and make a step.\n start_time = time.time()\n result = self.step(batch, self.forward_only)\n curr_step_time = (time.time() - start_time)\n\n num_total += 1\n\n output = result['prediction']\n ground = batch['labels'][0]\n comment = batch['comments'][0]\n if sys.version_info >= (3,):\n output = output.decode('iso-8859-1')\n ground = ground.decode('iso-8859-1')\n comment = comment.decode('iso-8859-1')\n\n probability = result['probability']\n\n if self.use_distance:\n incorrect = distance.levenshtein(output, ground)\n if not ground:\n if not output:\n incorrect = 0\n else:\n incorrect = 1\n else:\n incorrect = float(incorrect) / len(ground)\n incorrect = min(1, incorrect)\n else:\n incorrect = 0 if output == ground else 1\n\n num_correct += 1. - incorrect\n\n if self.visualize:\n # Attention visualization.\n threshold = 0.5\n normalize = True\n binarize = True\n attns_list = [[a.tolist() for a in step_attn] for step_attn in result['attentions']]\n attns = np.array(attns_list).transpose([1, 0, 2])\n visualize_attention(batch['data'],\n 'out',\n attns,\n output,\n self.max_width,\n DataGen.IMAGE_HEIGHT,\n threshold=threshold,\n normalize=normalize,\n binarize=binarize,\n ground=ground,\n flag=None)\n\n step_accuracy = \"{:>4.0%}\".format(1. - incorrect)\n if incorrect:\n correctness = step_accuracy + \" ({} vs {}) {}\".format(output, ground, comment)\n else:\n correctness = step_accuracy + \" (\" + ground + \")\"\n\n logging.info('Step {:.0f} ({:.3f}s). '\n 'Accuracy: {:6.2%}, '\n 'loss: {:f}, perplexity: {:0<7.6}, probability: {:6.2%} {}'.format(\n current_step,\n curr_step_time,\n num_correct / num_total,\n result['loss'],\n math.exp(result['loss']) if result['loss'] < 300 else float('inf'),\n probability,\n correctness))\n\n def train(self, data_path, num_epoch):\n logging.info('num_epoch: %d', num_epoch)\n s_gen = DataGen(\n data_path, self.buckets,\n epochs=num_epoch, max_width=self.max_original_width\n )\n step_time = 0.0\n loss = 0.0\n current_step = 0\n skipped_counter = 0\n writer = tf.summary.FileWriter(self.model_dir, self.sess.graph)\n\n logging.info('Starting the training process.')\n for batch in s_gen.gen(self.batch_size):\n\n current_step += 1\n\n start_time = time.time()\n # result = self.step(batch, self.forward_only)\n result = None\n try:\n result = self.step(batch, self.forward_only)\n except Exception as e:\n skipped_counter += 1\n logging.info(\"Step {} failed, batch skipped.\" +\n \" Total skipped: {}\".format(current_step, skipped_counter))\n logging.error(\n \"Step {} failed. Exception details: {}\".format(current_step, str(e)))\n continue\n\n loss += result['loss'] / self.steps_per_checkpoint\n curr_step_time = (time.time() - start_time)\n step_time += curr_step_time / self.steps_per_checkpoint\n\n # num_correct = 0\n\n # step_outputs = result['prediction']\n # grounds = batch['labels']\n # for output, ground in zip(step_outputs, grounds):\n # if self.use_distance:\n # incorrect = distance.levenshtein(output, ground)\n # incorrect = float(incorrect) / len(ground)\n # incorrect = min(1.0, incorrect)\n # else:\n # incorrect = 0 if output == ground else 1\n # num_correct += 1. - incorrect\n\n writer.add_summary(result['summaries'], current_step)\n\n # precision = num_correct / len(batch['labels'])\n step_perplexity = math.exp(result['loss']) if result['loss'] < 300 else float('inf')\n\n # logging.info('Step %i: %.3fs, precision: %.2f, loss: %f, perplexity: %f.'\n # % (current_step, curr_step_time, precision*100,\n # result['loss'], step_perplexity))\n\n logging.info('Step %i: %.3fs, loss: %f, perplexity: %f.',\n current_step, curr_step_time, result['loss'], step_perplexity)\n\n # Once in a while, we save checkpoint, print statistics, and run evals.\n if current_step % self.steps_per_checkpoint == 0:\n perplexity = math.exp(loss) if loss < 300 else float('inf')\n # Print statistics for the previous epoch.\n logging.info(\"Global step %d. Time: %.3f, loss: %f, perplexity: %.2f.\",\n self.sess.run(self.global_step), step_time, loss, perplexity)\n # Save checkpoint and reset timer and loss.\n logging.info(\"Saving the model at step %d.\", current_step)\n self.saver_all.save(self.sess, self.checkpoint_path, global_step=self.global_step)\n step_time, loss = 0.0, 0.0\n\n # Print statistics for the previous epoch.\n perplexity = math.exp(loss) if loss < 300 else float('inf')\n logging.info(\"Global step %d. Time: %.3f, loss: %f, perplexity: %.2f.\",\n self.sess.run(self.global_step), step_time, loss, perplexity)\n\n if skipped_counter:\n logging.info(\"Skipped {} batches due to errors.\".format(skipped_counter))\n\n # Save checkpoint and reset timer and loss.\n logging.info(\"Finishing the training and saving the model at step %d.\", current_step)\n self.saver_all.save(self.sess, self.checkpoint_path, global_step=self.global_step)\n\n # step, read one batch, generate gradients\n def step(self, batch, forward_only):\n img_data = batch['data']\n decoder_inputs = batch['decoder_inputs']\n target_weights = batch['target_weights']\n\n # Input feed: encoder inputs, decoder inputs, target_weights, as provided.\n input_feed = {}\n input_feed[self.img_pl.name] = img_data\n\n for idx in xrange(self.decoder_size):\n input_feed[self.decoder_inputs[idx].name] = decoder_inputs[idx]\n input_feed[self.target_weights[idx].name] = target_weights[idx]\n\n # Since our targets are decoder inputs shifted by one, we need one more.\n last_target = self.decoder_inputs[self.decoder_size].name\n input_feed[last_target] = np.zeros([self.batch_size], dtype=np.int32)\n\n # Output feed: depends on whether we do a backward step or not.\n output_feed = [\n self.attention_decoder_model.loss, # Loss for this batch.\n ]\n\n if not forward_only:\n output_feed += [self.summaries_by_bucket[0],\n self.updates[0]]\n else:\n output_feed += [self.prediction]\n output_feed += [self.probability]\n if self.visualize:\n output_feed += self.attention_decoder_model.attentions\n\n outputs = self.sess.run(output_feed, input_feed)\n\n res = {\n 'loss': outputs[0],\n }\n\n if not forward_only:\n res['summaries'] = outputs[1]\n else:\n res['prediction'] = outputs[1]\n res['probability'] = outputs[2]\n if self.visualize:\n res['attentions'] = outputs[3:]\n\n return res\n\n def _prepare_image(self, image):\n \"\"\"Resize the image to a maximum height of `self.height` and maximum\n width of `self.width` while maintaining the aspect ratio. Pad the\n resized image to a fixed size of ``[self.height, self.width]``.\"\"\"\n img = tf.image.decode_png(image, channels=self.channels)\n dims = tf.shape(img)\n dims = tf.cast(x=dims, dtype=tf.float32)\n\n width = self.max_width\n\n max_width = tf.to_int32(tf.ceil(tf.truediv(dims[1], dims[0]) * self.height_float))\n\n max_height = tf.to_int32(tf.ceil(tf.truediv(tf.cast(x=width, dtype=tf.float32),\n tf.cast(x=max_width, dtype=tf.float32)) * self.height_float))\n\n resized = tf.cond(\n tf.greater_equal(width, max_width),\n lambda: tf.cond(\n tf.less_equal(tf.cast(x=dims[0], dtype=tf.int32), self.height),\n lambda: tf.to_float(img),\n lambda: tf.image.resize_images(img, [self.height, max_width],\n method=tf.image.ResizeMethod.BICUBIC),\n ),\n lambda: tf.image.resize_images(img, [max_height, width],\n method=tf.image.ResizeMethod.BICUBIC)\n )\n\n padded = tf.image.pad_to_bounding_box(resized, 0, 0, self.height, width)\n return padded\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "tensorflow.to_float", "tensorflow.control_dependencies", "tensorflow.tile", "tensorflow.nn.softmax", "tensorflow.identity", "tensorflow.cast", "tensorflow.rank", "tensorflow.trainable_variables", "tensorflow.shape", "tensorflow.argmax", "tensorflow.Variable", "tensorflow.transpose", "tensorflow.constant", "tensorflow.image.pad_to_bounding_box", "tensorflow.all_variables", "tensorflow.get_collection", "tensorflow.train.AdadeltaOptimizer", "numpy.array", "tensorflow.summary.merge", "tensorflow.initialize_all_variables", "numpy.zeros", "tensorflow.expand_dims", "tensorflow.summary.scalar", "tensorflow.map_fn", "tensorflow.contrib.lookup.MutableHashTable", "tensorflow.placeholder", "tensorflow.global_norm", "tensorflow.reduce_sum", "tensorflow.image.resize_images", "tensorflow.clip_by_global_norm", "tensorflow.size", "tensorflow.greater_equal", "tensorflow.equal", "tensorflow.truediv", "tensorflow.image.decode_png", "tensorflow.device", "tensorflow.summary.FileWriter" ] ]
hoppe93/sitsi
[ "bf69900e6a3fa0a89efff5ecb5be7a64d43fab9b" ]
[ "sitsi/Algorithms/Tikhonov.py" ]
[ "r\"\"\"\nThis module calculates the best fitting radial profile for a given set of input\ndata, using Tikhonov regularization. The general least-squares problem we\nconsider is\n\n.. math::\n \n \\mathrm{min} \\sum_i\\left\\lVert I_i^{\\rm exp} - I_i \\right\\rVert_2^2\n\nwhere :math:`I_i^{\\rm exp}` is the experimental data and :math:`I_i=I_i(x)` the\nsynthetic data resulting from the least-squares solution :math:`x`. In\n``sitsi``, we take :math:`I_i` to be\n\n.. math::\n \n I_i = \\sum_j G_{ij} x^j,\n\nwhere :math:`G_{ij}` is a `SOFT2 <https://github.com/hoppe93/SOFT2>`_ Green's\nfunction. We regularize this problem by adding a scaled matrix term\n:math:`\\alpha\\Gamma_{ij}`:\n\n.. math::\n\n \\mathrm{min}\\left[ \\sum_i\\left\\lVert I_i^{\\rm exp} - \\sum_j G_{ij} x^j \\right\\rVert_2^2 + \\left\\lVert \\sum_j \\alpha\\Gamma_{ij}x^j \\right\\rVert_2^2 \\right]\n\nThe simplest choice for :math:`\\Gamma_{ij}` is to use an identity matrix. We\nalso implement a finite difference operator in ``sitsi``. The scale factor\n:math:`alpha` is determined using the L-curve method\n(https://www.sintef.no/globalassets/project/evitameeting/2005/lcurve.pdf).\n\n\"\"\"\n\nimport numpy as np\nfrom .. InverterException import InverterException\n\n\nclass Tikhonov:\n \n\n def __init__(self, inp, method='standard', fitness=None):\n \"\"\"\n Constructor.\n\n method: Name of Tikhonov method to use. Either 'standard' (uses a\n constant times an identity matrix for regularization), or\n 'diff' (uses forward finite difference for regularization)\n fitness: Fitness function to use, taking two input arguments:\n (1) the input data, (2) the best fit output.\n The default is to take the sum of differences squared, i.e.\n sum(|a-b|^2)\n where a and b are the input and output vectors respectively.\n inp: List of tuples, with each tuple consisting of the input data\n as well as the Green's function which can be used to\n generate synthetic data for the input data.\n \"\"\"\n self.data = []\n self.green = []\n self.fitness = fitness\n\n if not self.checkMethod(method.lower()):\n raise InverterException(\"Unrecognized method specified: '{}'.\".format(method))\n self.method = method\n\n if self.fitness is None:\n self.fitness = lambda inp, synth : np.sum((inp - synth)**2)\n\n # Store input data and Green's functions\n for i in inp:\n self.data.append(i[0])\n self.green.append(i[1])\n\n self.data = np.concatenate(self.data)\n self.green = np.concatenate(self.green)\n\n if self.data.size != self.green.shape[1]:\n raise InverterException(\"Incompatible dimensions of input data and Green's function.\")\n\n\n def checkMethod(self, method):\n \"\"\"\n Checks if the specified Tikhonov method is valid.\n \"\"\"\n return (method in ['diff', 'standard', 'svd'])\n\n\n def invert(self):\n \"\"\"\n Solves for the optimum using a Tikhonov method.\n Returns a tuple consisting of the solution and the solution\n multiplied with the input Green's function.\n \"\"\"\n invfunc = None\n if self.method == 'diff':\n invfunc = self._invert_general\n self._invert_general_init('diff')\n elif self.method == 'standard':\n invfunc = self._invert_general\n self._invert_general_init('standard')\n elif self.method == 'svd':\n invfunc = self._invert_svd\n self._invert_svd_init()\n else:\n raise InverterException(\"Unrecognized method specified: '{}'.\".format(self.method))\n\n def evaluate(alpha):\n _, Ax = invfunc(alpha)\n return self.fitness(self.data, Ax)\n\n lower, upper = -100, 100\n minimum = evaluate(10.0 ** lower)\n maximum = evaluate(10.0 ** upper)\n\n tol = 1e-4\n tol_it = 0.1\n\n def is_good(alpha):\n fitness = evaluate(alpha)\n return ((fitness - minimum) / (maximum-minimum)) < tol\n\n # L-curve method\n while (upper - lower) > tol_it:\n mid = (upper + lower) / 2\n if is_good(10.0 ** mid):\n lower = mid\n else:\n upper = mid\n\n x, Ax = invfunc(10.0 ** lower)\n\n return x, Ax\n\n \n def _invert_general_init(self, method='standard'):\n \"\"\"\n Initializes the general Tikhonov methods.\n \"\"\"\n N = self.green.shape[0]\n\n # SELECT OPERATOR TO ADD\n if method == 'diff':\n # (Upwind) finite difference\n self.diff_D = (np.eye(N) - np.eye(N, k=1))[:-1]\n elif method == 'standard':\n # Scaled identity matrix\n self.diff_D = np.eye(N)\n else:\n raise InverterException(\"Unrecognized generalized Tikhonov method specified: '{}'.\".format(method))\n\n # Set up input vector\n self.diff_b = np.hstack((self.data, np.zeros(self.diff_D.shape[0])))\n\n\n def _invert_general(self, alpha):\n \"\"\"\n Solves for the optimum using a Tikhonov method, with a scaled term\n added to the equation. I.e, instead of solving the ill-posed problem\n\n min || A*x - b ||^2\n \n we solve\n\n min || A*x - b + alpha*D ||^2\n \"\"\"\n # Construct matrix to invert\n A = np.vstack((self.green.T, alpha * self.diff_D))\n\n x, _, _, _ = np.linalg.lstsq(A, self.diff_b, rcond=None)\n img = self.green.T.dot(x)\n\n return x, img\n\n\n def _invert_svd_init(self):\n \"\"\"\n Initializes the SVD method for Tikhonov regularization.\n \"\"\"\n self.svd_u, self.svd_s, self.svd_vt = np.linalg.svd(self.green.T, full_matrices=False)\n\n\n def _invert_svd(self, alpha):\n \"\"\"\n Solves the linear problem using Tikhonov regularization and\n SVD decomposition of the linear operator matrix.\n \"\"\"\n s = np.copy(self.svd_s)\n f = s**2 / (s**2 + alpha**2)\n s = np.divide(1, s, where=(s>0))\n s = s*f\n\n pinv = np.matmul(self.svd_vt.T, np.multiply(s[...,np.newaxis], self.svd_u.T))\n\n x = pinv.dot(self.data)\n img = self.green.T.dot(x)\n\n return x, img\n\n\n" ]
[ [ "numpy.concatenate", "numpy.divide", "numpy.zeros", "numpy.sum", "numpy.copy", "numpy.eye", "numpy.multiply", "numpy.linalg.lstsq", "numpy.linalg.svd", "numpy.vstack" ] ]
nonator/cp2018
[ "267950ce0fcb79c37da5b03e2cd5bb6278e9282f" ]
[ "00/src/pi_num.py" ]
[ "from numpy.random import rand\n\n\ndef main():\n N = 100000\n\n xy = rand(N, 2)\n\n r = xy[:,0] ** 2 + xy[:,1]**2\n\n n = sum(r < 1)\n\n pi_est = 4 * n / float(N)\n\n print(pi_est)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.random.rand" ] ]
PBibiloni/softcolor
[ "80194877d828e873a39a73b5936975436edd7017", "80194877d828e873a39a73b5936975436edd7017" ]
[ "softcolor/morphology.py", "examples/example_contrast_mapping.py" ]
[ "from math import ceil\nfrom warnings import warn\n\nfrom scipy.ndimage.filters import convolve\nfrom skimage import color\nimport numpy as np\nfrom skimage.morphology import disk\n\nfrom softcolor.aggregation_functions import conjunction_min, r_implication, implication_godel\nfrom softcolor.distance_between_images import euclidean_distance\nfrom softcolor.soft_color_operators import soft_color_erosion, soft_color_dilation\n\n\nclass BaseMorphology:\n\n def __init__(self, conjunction=None, fuzzy_implication_function=None,\n distance_multivariate_images=euclidean_distance,\n combine_multivariate_images=lambda x, y: 0.5*(x+y)):\n if conjunction is None:\n conjunction = conjunction_min\n self.conj = conjunction\n if fuzzy_implication_function is None:\n try:\n fuzzy_implication_function = r_implication(self.conj)\n except AttributeError:\n fuzzy_implication_function = implication_godel\n self.impl = fuzzy_implication_function\n self.dist = distance_multivariate_images\n self.combine = combine_multivariate_images\n\n def dilation(self, multivariate_image, structuring_element):\n return soft_color_dilation(multivariate_image=multivariate_image,\n structuring_element=structuring_element,\n fuzzy_conjunction=self.conj)\n\n def erosion(self, multivariate_image, structuring_element):\n return soft_color_erosion(multivariate_image=multivariate_image,\n structuring_element=structuring_element,\n fuzzy_implication_function=self.impl)\n\n def opening(self, multivariate_image, structuring_element):\n return self.dilation(\n self.erosion(\n multivariate_image,\n structuring_element=structuring_element),\n structuring_element=structuring_element[::-1, ::-1]\n )\n\n def closing(self, multivariate_image, structuring_element):\n return self.erosion(\n self.dilation(\n multivariate_image,\n structuring_element=structuring_element),\n structuring_element=structuring_element[::-1, ::-1]\n )\n\n def tophat_opening(self, multivariate_image, structuring_element):\n return self.dist(\n multivariate_image,\n self.opening(multivariate_image, structuring_element=structuring_element)\n )\n\n def tophat_closing(self, multivariate_image, structuring_element):\n return self.dist(\n multivariate_image,\n self.closing(multivariate_image, structuring_element=structuring_element)\n )\n\n def gradient(self, multivariate_image, structuring_element):\n \"\"\" Distance between erosion and dilation of the image. \"\"\"\n return self.dist(\n self.erosion(multivariate_image, structuring_element=structuring_element),\n self.dilation(multivariate_image, structuring_element=structuring_element)\n )\n\n def inpaint_with_steps(self, multivariate_image, structuring_element, max_iterations=10):\n \"\"\" Iteratively recover pixels given by 0.5 * (opening + closing). \"\"\"\n if not np.all(np.isnan(structuring_element) | (structuring_element == 1)):\n msg = \"\"\"\n Inpainting with structuring element which contains elements not being 1 or np.nan:\n It may give non-interpretable results, we strongly recommend using structuring\n elements that only contain 1 or np.nan.\"\"\"\n warn(msg)\n inpainted_image = multivariate_image.copy()\n num_channels = multivariate_image.shape[2]\n steps = [inpainted_image.copy()]\n mask_unknown = np.isnan(inpainted_image[:, :, 0])\n idx_it = 0\n while np.any(mask_unknown) and idx_it <= max_iterations:\n closing = self.closing(inpainted_image,\n structuring_element=structuring_element)\n opening = self.opening(inpainted_image,\n structuring_element=structuring_element)\n mask_recovered = mask_unknown & ~np.isnan(closing[:, :, 0]) & ~np.isnan(opening[:, :, 0])\n if not np.any(mask_recovered):\n break\n x = closing[mask_recovered, :].reshape(-1, 1, num_channels)\n y = opening[mask_recovered, :].reshape(-1, 1, num_channels)\n inpainted_image[mask_recovered] = self.combine(x, y).reshape(-1, num_channels)\n mask_unknown = np.isnan(inpainted_image[:, :, 0])\n idx_it += 1\n steps += [inpainted_image.copy()]\n return inpainted_image, steps\n\n def inpaint(self, multivariate_image, structuring_element, max_iterations=10):\n \"\"\" Iteratively recover pixels given by 0.5 * (opening + closing). \"\"\"\n inpainted_image, _ = self.inpaint_with_steps(multivariate_image,\n structuring_element=structuring_element,\n max_iterations=max_iterations)\n return inpainted_image\n\n def contrast_mapping_with_steps(self, multivariate_image, structuring_element, num_iterations=3):\n \"\"\" Iteratively change pixels as the most similar one between their dilation and their erosion. \"\"\"\n contrasted_image = multivariate_image.copy()\n idx_it = 0\n steps = [contrasted_image.copy()]\n while idx_it <= num_iterations:\n dilation = self.dilation(contrasted_image,\n structuring_element=structuring_element)\n erosion = self.erosion(contrasted_image,\n structuring_element=structuring_element)\n d_dilation = self.dist(contrasted_image, dilation)\n d_erosion = self.dist(contrasted_image, erosion)\n mask_dilation_is_closest = d_dilation < d_erosion\n mask_dilation_is_closest = np.tile(mask_dilation_is_closest[:, :, np.newaxis], (1, 1, 3))\n contrasted_image = erosion\n contrasted_image[mask_dilation_is_closest] = dilation[mask_dilation_is_closest]\n idx_it += 1\n steps += [contrasted_image.copy()]\n return contrasted_image, steps\n\n def contrast_mapping(self, multivariate_image, structuring_element, num_iterations=10):\n \"\"\" Iteratively change pixels as the most similar one between their dilation and their erosion. \"\"\"\n contrasted_image, _ = self.contrast_mapping_with_steps(\n multivariate_image,\n structuring_element=structuring_element,\n num_iterations=num_iterations,\n )\n return contrasted_image\n\n\nclass MorphologyInCIELab(BaseMorphology):\n\n def __init__(self, conjunction=None, fuzzy_implication_function=None):\n super().__init__(conjunction=conjunction, fuzzy_implication_function=fuzzy_implication_function,\n distance_multivariate_images=_perceptual_distance,\n combine_multivariate_images=_combine_in_lab)\n\n def dilation(self, image_as_rgb, structuring_element):\n lab_image = _rgb_to_lab(image_as_rgb)/100.0\n lab_dilation = super().dilation(multivariate_image=lab_image,\n structuring_element=structuring_element) * 100.0\n return _lab_to_rgb(lab_dilation)\n\n def erosion(self, image_as_rgb, structuring_element):\n lab_image = _rgb_to_lab(image_as_rgb)/100.0\n lab_erosion = super().erosion(multivariate_image=lab_image,\n structuring_element=structuring_element) * 100.0\n return _lab_to_rgb(lab_erosion)\n\n\ndef _rgb_to_lab(image_as_rgb):\n # Wrapper of skimage.color.rgb2lab to avoid computing on NaN values\n rgb_flat = image_as_rgb.reshape((-1, 1, image_as_rgb.shape[2]))\n nonnan_mask = ~np.isnan(rgb_flat[:, 0, 0])\n lab_flat = np.full(shape=rgb_flat.shape,\n dtype='float64',\n fill_value=np.nan)\n lab_flat[nonnan_mask, :, :] = color.rgb2lab(rgb_flat[nonnan_mask, :, :])\n lab = lab_flat.reshape(image_as_rgb.shape)\n return lab\n\n\ndef _lab_to_rgb(image_as_lab):\n # Wrapper of skimage.color.lab2rgb to avoid computing on NaN values\n lab_flat = image_as_lab.reshape((-1, 1, image_as_lab.shape[2]))\n nonnan_mask = ~np.isnan(lab_flat[:, 0, 0])\n rgb_flat = np.full(shape=lab_flat.shape,\n dtype='float64',\n fill_value=np.nan)\n rgb_flat[nonnan_mask, :, :] = color.lab2rgb(lab_flat[nonnan_mask, :, :])\n rgb = rgb_flat.reshape(image_as_lab.shape)\n return rgb\n\n\ndef _perceptual_distance(x_as_rgb, y_as_rgb):\n return euclidean_distance(\n x=_rgb_to_lab(x_as_rgb),\n y=_rgb_to_lab(y_as_rgb),\n )\n\n\ndef _combine_in_lab(x_as_rgb, y_as_rgb):\n return _lab_to_rgb(0.5 * (_rgb_to_lab(x_as_rgb) + _rgb_to_lab(y_as_rgb)))\n\n\ndef soften_structuring_element(structuring_element, sz_averaging_kernel_in_px=None, preserve_shape=True):\n if sz_averaging_kernel_in_px is not None:\n pad_px = sz_averaging_kernel_in_px//2\n else:\n pad_px = ceil(sum(structuring_element.shape)/10)\n pad_px = max(1, pad_px)\n\n if np.all(structuring_element <= 1):\n structuring_element = structuring_element.astype('float32')\n\n se_padded = np.pad(structuring_element,\n pad_width=((pad_px, pad_px), (pad_px, pad_px)),\n mode='constant', constant_values=0)\n\n kernel = disk(pad_px)\n kernel = convolve(input=kernel, weights=kernel, mode='constant', cval=0.0)\n kernel = kernel/np.sum(kernel)\n se_soft = convolve(input=se_padded, weights=kernel, mode='constant', cval=0.0)\n\n se_orig_center = tuple(e//2 for e in structuring_element.shape)\n se_soft_center = tuple(e//2 for e in se_soft.shape)\n if structuring_element[se_orig_center] == 1 and se_soft[se_soft_center] != 1:\n msg = (\"\"\"\n The original Structuring Element had a center equal to 1, but the softened one does not.\n This may produce unexpected behaviours.\"\"\")\n warn(RuntimeWarning(msg))\n\n if preserve_shape:\n se_soft = se_soft[pad_px:-pad_px, pad_px:-pad_px]\n\n return se_soft\n", "from math import ceil\n\nimport matplotlib.pyplot as plt\nfrom skimage import io\nfrom skimage.morphology import disk\n\nfrom examples.utils import Timer\nfrom softcolor.morphology import MorphologyInCIELab, soften_structuring_element\n\nif __name__ == \"__main__\":\n img = io.imread('images/lena-512.gif')\n img = img[100:200, 100:200, :]\n\n morphology = MorphologyInCIELab()\n se = disk(3)\n se = soften_structuring_element(se)\n\n with Timer() as t:\n img_contrasted, img_contrasted_steps = morphology.contrast_mapping_with_steps(img,\n structuring_element=se,\n num_iterations=3)\n print('Time elapsed contrast mapping: {} s'.format(t.interval))\n\n _, axs = plt.subplots(nrows=2, ncols=2)\n [a.axis('off') for a in axs.flat]\n axs[0, 1].imshow(se)\n axs[0, 0].imshow(img)\n axs[1, 0].imshow(img_contrasted)\n plt.show()\n\n _, axs = plt.subplots(nrows=3, ncols=ceil(len(img_contrasted_steps)/3))\n [a.axis('off') for a in axs.flat]\n for idx_step, step in enumerate(img_contrasted_steps):\n axs.flat[idx_step].imshow(step)\n plt.show()\n" ]
[ [ "numpy.full", "numpy.pad", "numpy.isnan", "scipy.ndimage.filters.convolve", "numpy.sum", "numpy.tile", "numpy.any", "numpy.all" ], [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
rachelxwang/intravideo_search
[ "09b6f4688be367762c332c4d24e4832c754b159b" ]
[ "test/test_model.py" ]
[ "# -*- coding: utf-8 -*-\nimport cv2\nfrom PIL import Image, ImageChops\nimport os\nimport sys\nimport pytest\nimport torch\nimport numpy as np\nimport pytest_check as check\nsys.path.append('src')\nsys.path.append('utils')\nfrom build_vocab import Vocabulary\nfrom view import * # nopep8\nfrom model import * # nopep8\nfrom seer_model import *\n\nexample_parameters1 = {\n 'settings': {\n 'conf': .9,\n 'poll': 5,\n 'anti': 5,\n 'search': [\"dog\"],\n 'runtime': 5.0\n },\n 'video': 'test/sampleVideo/SampleVideo_1280x720_1mb.mp4'\n}\n\nexample_job1 = Job(example_parameters1)\n\nexample_parameters2 = {\n 'settings': {\n 'conf': .9,\n 'poll': 4,\n 'anti': 5,\n 'search': [\"rabbit\"],\n 'runtime': 5.0\n },\n 'video': 'test/sampleVideo/SampleVideo_1280x720_1mb.mp4'\n}\n\nexample_job2 = Job(example_parameters2)\n\nexample_parameters3 = {\n 'settings': {\n 'conf': .9,\n 'poll': 1,\n 'anti': 3,\n 'search': [\"rock\"],\n 'runtime': 5.0\n },\n 'video': 'test/sampleVideo/SampleVideo_1280x720_1mb.mp4'\n}\n\nexample_job3 = Job(example_parameters3)\n\nexample_parameters4 = {\n 'settings': {\n 'conf': .9,\n 'poll': 8,\n 'anti': 6,\n 'search': [\"water\"],\n 'runtime': 25.0\n },\n 'video': 'test/sampleVideo/SampleVideoNature.mp4'\n}\n\nexample_job4 = Job(example_parameters4)\n\ndef test_save_clips():\n timestamps1 = [0, 5]\n timestamps2 = [4, 5]\n timestamps3 = [-1, 5]\n timestamps4 = [10, 5]\n timestamps5 = [1, -5]\n timestamps6 = [1, 1]\n\n check.is_false(example_job1.save_clips([]))\n\n with pytest.raises(Exception):\n example_job1.save_clips([timestamps3])\n example_job1.save_clips([timestamps4])\n example_job1.save_clips([timestamps5])\n example_job1.save_clips([timestamps6])\n\n check.is_true(example_job1.save_clips([timestamps1]))\n check.is_true(example_job1.save_clips([timestamps1, timestamps2]))\n path = os.path.splitext(example_job1.video_path)\n # form of filenames updated to match implementation\n check.is_true(\n os.path.isfile(\n path[0] + '_subclip(' + str(timestamps1[0]) + ',' + str(timestamps1[1]) + ')' + path[1]))\n check.is_true(\n os.path.isfile(\n path[0] + '_subclip(' + str(timestamps2[0]) + ',' + str(timestamps2[1]) + ')' + path[1]))\n\n\ndef test_classify_frames():\n frame_list1 = example_job2.classify_frames()\n frame_list = example_job1.classify_frames()\n check.equal(frame_list1[0][1], 0)\n check.less(frame_list1[0][0], 0.7)\n check.not_equal(frame_list1[1][1], 5)\n check.greater(frame_list1[1][0], 0.7)\n\n check.equal(frame_list[0][1], 0)\n check.less(frame_list[0][0], 0.7)\n check.not_equal(frame_list[1][1], 4)\n check.greater(frame_list[1][0], 0.7)\n\ndef test_score():\n j = Job(example_parameters1)\n api_results1 = {'dog': 0.9, 'cat': 0.7}\n api_results2 = {'cat': 0.7}\n check.equal(j.score(api_results1), 0.9)\n check.equal(j.score(api_results2), 0)\n with pytest.raises(Exception):\n j.score('a string')\n\n\ndef test_job_constructor():\n j = Job({'settings': {'conf': .9, 'poll': 5, 'anti': 5, 'search': ['dog'], 'runtime': 100.0},\n 'video': 'test/sampleVideo/SampleVideo_1280x720_1mb.mp4'})\n check.equal(j.video_path, 'test/sampleVideo/SampleVideo_1280x720_1mb.mp4')\n check.equal(j.settings, {'conf': .9, 'poll': 5, 'anti': 5, 'search': ['dog'], 'runtime': 100.0})\n # redundant tests removed from milestone 3a comments\n # runtime key added to test dict as per new specs of settings\n\ndef test_interpret_results_null_input():\n job = Job(example_parameters1)\n results = None\n with pytest.raises(Exception):\n ret = job.interpret_results(results)\n assert(isinstance(ret, type(None)))\n\n\ndef test_interpret_results_negative_time():\n job = Job(example_parameters1)\n results = [(-2.0, 0.1)]\n with pytest.raises(Exception):\n job.interpret_results(results)\n\n\ndef test_interpret_results_negative_score():\n job = Job(example_parameters1)\n results = [(3.0, -0.5)]\n with pytest.raises(Exception):\n job.interpret_results(results)\n\n\ndef test_interpret_results_unnormalized_score():\n job = Job(example_parameters1)\n resultsNonNorm = [(3.0, 1.2)]\n with pytest.raises(Exception):\n job.interpret_results(resultsNonNorm)\n\n\ndef test_interpret_results_duplicate_times():\n job = Job(example_parameters1)\n results = [(1.0, 0.1), (1.0, 0.03)]\n with pytest.raises(Exception):\n job.interpret_results(results)\n\n\ndef test_interpret_results_negative_cutoff():\n job = Job(example_parameters1)\n toyResults = [(1.0, 1)]\n with pytest.raises(Exception):\n job.interpret_results(toyResults, cutoff=-0.3)\n\n\ndef test_interpret_results_out_of_order():\n job = Job(example_parameters1)\n results = [(3.0, 0.6), (1.0, 0.03)]\n with pytest.raises(Exception):\n job.interpret_results(results)\n\ndef test_interpret_results_mid_clip():\n job = Job(example_parameters1)\n results = [(0.0, 0.1), (10.0, 0.6), (20.0, 0.3), (30.0, 0.2)]\n check.equal(job.interpret_results(results, cutoff=0.5), [(5.0, 15.0)])\n\n\ndef test_interpret_results_spanning_clip():\n job = Job(example_parameters1)\n results = [(0.0, 0.2), (10.0, 0.6), (20.0, 0.5), (30.0, 0.01)]\n\n check.is_true(stampListsAreEqual(job.interpret_results(results, cutoff=0.5),\n [(5.0, 25.0)]))\n\n\ndef test_interpret_results_multiple_seperate_clips():\n job = Job(example_parameters1)\n results = [(0.0, 0.2), (10.0, 0.6), (20.0, 0.5), (30.0, 0.1),\n (40.0, 0.7),\n (50.0, 0.8),\n (60.0, 0.01)]\n check.is_true(stampListsAreEqual(job.interpret_results(results, cutoff=0.5),\n [(5.0, 25.0), (35.0, 55.0)]))\n\n\ndef test_interpret_results_from_start():\n job = Job(example_parameters1)\n results = [(1.0, 0.6), (10.0, 0.2), (20.0, 0.1), (30.0, 0.08)]\n #print(job.interpret_results(results, cutoff=0.5))\n #exit(0)\n output = job.interpret_results(results, cutoff=0.5)\n check.is_true(stampListsAreEqual(output,[(0.0, 5.5)]))\n\n\ndef test_interpret_results_from_end():\n job = Job(example_parameters1)\n results = [(1.0, 0.2), (10.0, 0.2), (20.0, 0.1), (30.0, 0.8)]\n job.settings[\"runtime\"] = 40.0\n check.is_true(stampListsAreEqual(job.interpret_results(results, cutoff=0.5),\n [(25.0, 40.0)]))\n\n\ndef test_interpret_results_zero_cutoff():\n job = Job(example_parameters1)\n results = [(1.0, 0.2), (10.0, 0.2), (20.0, 0.1), (30.0, 0.8)]\n job.settings[\"runtime\"] = 40.0\n check.is_true(stampListsAreEqual(job.interpret_results(results, cutoff=0.0),\n [(0.0, 40.0)]))\n\n\ndef test_interpret_results_cutoff_morethan_1():\n job = Job(example_parameters1)\n results = [(0.0, 0.2), (10.0, 0.6), (20.0, 0.5), (30.0, 0.1),\n (40.0, 0.7),\n (50.0, 0.8),\n (60.0, 0.01)]\n check.is_true(stampListsAreEqual(\n job.interpret_results(results, cutoff=1.1), []))\n\n\ndef stampListsAreEqual(times1, times2):\n if not isinstance(times1, type([])) or \\\n not isinstance(times2, type([])) or \\\n not (len(times1) == len(times2)):\n return False\n\n for i in range(len(times1)):\n if not (times1[i] == times2[i]):\n return False\n\n return True\n\n# helper function to test get_frames()\ndef areImagesSame(im1, im2):\n arr1 = np.array(im1)\n arr2 = np.array(im2)\n\n if arr1.shape != arr2.shape:\n return False\n\n results = []\n\n for i, x in enumerate(arr1):\n for j, y in enumerate(x):\n for k, z in enumerate(y):\n px_val1 = int(arr1[i][j][k])\n px_val2 = int(arr2[i][j][k])\n\n # rgb val diff threshold +/-5\n if abs(px_val1 - px_val2) > 10:\n results.append(0)\n else:\n results.append(1)\n\n # make sure 95% of pixels fall within threshold\n return sum(results) / len(results) > 0.95\n\n# add tests for get_frames() based on comments from milestone 3a\n# now test with different videos and different settings\n# also test was changed to reflect change in get_frames() return value\n# from list of Images to list of tuples of Images and timestamps\ndef test_get_frames_poll_5():\n frames = example_job1.get_frames()\n frames = list(sorted(frames, key=lambda x: x[1]))\n check.equal(len(frames), 2)\n # frame at 0 seconds of sample video\n frame1 = Image.open('test/sampleVideo/settings_poll_5/frame0.jpg')\n # frame at 5 seconds of sample video\n frame2 = Image.open('test/sampleVideo/settings_poll_5/frame1.jpg')\n # these are same images, so should return true\n check.is_true(areImagesSame(frames[0][0], frame1))\n check.is_true(areImagesSame(frames[1][0], frame2))\n\n # these are diff images, so should return false\n check.is_false(areImagesSame(frames[1][0], frame1))\n check.is_false(areImagesSame(frames[0][0], frame2))\n\n check.equal(frames[0][1], 0)\n check.equal(frames[1][1], 5)\n\ndef test_get_frames_poll_1():\n frames = example_job3.get_frames()\n frames = list(sorted(frames, key=lambda x: x[1]))\n poll = example_job3.settings['poll']\n check.equal(len(frames), 6)\n # check frames against expected frame at each second (because poll = 1)\n for i in range(6):\n path = 'test/sampleVideo/settings_poll_1/frame%d.jpg' % i\n compare_img = Image.open(path)\n # same image, so should return true\n check.is_true(areImagesSame(frames[i][0], compare_img))\n # comparing test image w previous frame, so should be false\n if i != 0:\n check.is_false(areImagesSame(frames[i-1][0], compare_img))\n check.equal(frames[i][1], i * poll)\n\ndef test_get_frames_poll_8():\n frames = example_job4.get_frames()\n frames = list(sorted(frames, key=lambda x: x[1]))\n poll = example_job4.settings['poll']\n check.equal(len(frames), 4)\n # check frames against frame at 0,8,16,24 seconds (because poll = 8)\n for i in range(4):\n path = 'test/sampleVideo/settings_poll_8/frame%d.jpg' % i\n compare_img = Image.open(path)\n # same image, so should return true\n check.is_true(areImagesSame(frames[i][0], compare_img))\n # comparing test image w previous frame, so should be false\n if i != 0:\n check.is_false(areImagesSame(frames[i-1][0], compare_img))\n check.equal(frames[i][1], i * poll)\n\n# The following are tests for Seer.\n# There are a total of 4 methods in the Seer class, however two are entirley\n# internal and are incorporated into the initialization and the captioning\n# function, both of which are tested below.\n#\n# The initialization merely loads up the proper models (sourced from an\n# open source repository, so equivalent to an API) and so the only check\n# required is to ensure that the relevant attributes were initialized, and\n# that they are of the correct API type.\n#\n# As for the captioning method (tell_us_oh_wise_one,) multiple image types\n# and invalid inputs are tested, as is usual for a unit test.\n\n# Device Config. Use GPU if available.\ndef test_seer_init():\n delphi = Seer()\n check.is_true(isinstance(delphi.encoder, type(EncoderCNN(1))))\n check.is_true(isinstance(delphi.decoder, type(DecoderRNN(1,1,1,1,1))))\n check.is_true(delphi.vocab_path == 'torchdata/vocab.pkl')\n check.is_true(delphi.encoder_path == 'torchdata/encoder-5-3000.pkl')\n check.is_true(delphi.decoder_path == 'torchdata/decoder-5-3000.pkl')\n check.is_true(delphi.embed_size == 256)\n check.is_true(delphi.hidden_size == 512)\n check.is_true(delphi.num_layers == 1)\n check.is_true(isinstance(delphi.vocab, type(Vocabulary())))\n\ndef test_seer_tell_us_oh_wise_one_non_image():\n delphi = Seer()\n notanimg = 5\n with pytest.raises(Exception):\n caption = delphi.tell_us_oh_wise_one(notanimg)\n\ndef test_seer_tell_us_oh_wise_one_nonetype():\n delphi = Seer()\n nonetype = None\n with pytest.raises(Exception):\n caption = delphi.tell_us_oh_wise_one(nonetype)\n\ndef test_seer_tell_us_oh_wise_one_jpg():\n delphi = Seer()\n img = Image.open(\"test/sampleImage/golden retriever.jpg\")\n caption = delphi.tell_us_oh_wise_one(img)\n true_caption = \"a dog is sitting on a couch with a frisbee\"\n check.is_true(caption == true_caption)\n\ndef test_seer_tell_us_oh_wise_one_png():\n delphi = Seer()\n img = Image.open(\"test/sampleImage/blindside.png\")\n caption = delphi.tell_us_oh_wise_one(img)\n true_caption = \"a living room with a couch and a television\"\n check.is_true(caption == true_caption)\n\ndef test_seer_tell_us_oh_wise_one_black_and_white():\n delphi = Seer()\n img = Image.open(\"test/sampleImage/bandw.jpg\")\n caption = delphi.tell_us_oh_wise_one(img)\n true_caption = \"a black and white photo of a train station\"\n check.is_true(caption == true_caption)\n\n# helper function to test get_from_yt()\ndef get_vid_duration(path):\n v=cv2.VideoCapture(path)\n fps = v.get(cv2.CAP_PROP_FPS)\n frame_count = int(v.get(cv2.CAP_PROP_FRAME_COUNT))\n duration = int(frame_count/fps)\n return duration\n\n@pytest.mark.skipif(os.environ.get('CI') == 'true',\n reason=\"Travis' IP is prob on a blocklist.\")\ndef test_get_from_yt():\n parameters = {\n 'settings': {\n 'conf': .9,'poll': 5,'anti': 5,'search': [''],'runtime':100.0},\n 'video': ''\n }\n invalid_url1 = 'https://www.youtube.com/watch?v=sVuG2i93notvalid'\n invalid_url2 = 'www.youtube.com'\n invalid_url3 = ''\n invalid_url4 = 'https://vimeo.com/66457941'\n invalid_url5 = 'www.yutub.com/watch?v=dQw4w9WgXQ'\n url1 = 'https://www.youtube.com/watch?v=dQw4w9WgXcQ'\n url2 = 'www.youtube.com/watch?v=fJ9rUzIMcZQ'\n url3 = 'youtube.com/watch?v=VuNIsY6JdUw'\n expected_path1 = './test/Rick Astley - Never Gonna Give You Up (Video).mp4'\n expected_path2 = './test/Queen – Bohemian Rhapsody (Official Video Remastered).mp4'\n expected_path3 = './test/Taylor Swift - You Belong With Me.mp4'\n expected_duration1 = 212 # durations in seconds\n expected_duration2 = 359\n expected_duration3 = 228\n job0 = Job(parameters)\n job0.handle_vid()\n\n # test valid url1\n parameters['video'] = url1\n job1 = Job(parameters)\n job1.handle_vid()\n # get_from_yet is called in the initialization of job\n # if parameter video is a YouTube URL\n url1_path = job1.video_path\n check.equal(url1_path,expected_path1)\n check.equal(expected_duration1, get_vid_duration(url1_path))\n\n # test valid url2\n parameters['video'] = url2\n job2 = Job(parameters)\n job2.handle_vid()\n url2_path = job2.video_path\n check.equal(url2_path,expected_path2)\n check.equal(expected_duration2, get_vid_duration(url2_path))\n\n # test valid url3\n parameters['video'] = url3\n job3 = Job(parameters)\n job3.handle_vid()\n url3_path = job3.video_path\n check.equal(url3_path,expected_path3)\n check.equal(expected_duration3, get_vid_duration(url3_path))\n\n # test invalid inputs using arbitrary job to access get_from_yt() function\n with pytest.raises(Exception):\n job0.get_from_yt(invalid_url1)\n with pytest.raises(Exception):\n job0.get_from_yt(invalid_url2)\n with pytest.raises(Exception):\n job0.get_from_yt(invalid_url3)\n with pytest.raises(Exception):\n job0.get_from_yt(invalid_url4)\n with pytest.raises(Exception):\n job0.get_from_yt(invalid_url5)\n" ]
[ [ "numpy.array" ] ]
klusta-team/kwiklib
[ "617a6ceff55957728c3dc94109b64e4c427429c2", "617a6ceff55957728c3dc94109b64e4c427429c2" ]
[ "kwiklib/dataio/tools.py", "kwiklib/dataio/kwikloader.py" ]
[ "\"\"\"Utility functions for loading/saving files.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport os.path\nimport re\nimport cPickle\n\nimport numpy as np\nimport pandas as pd\n\n\n# -----------------------------------------------------------------------------\n# Utility data functions\n# -----------------------------------------------------------------------------\ndef check_dtype(data, dtype):\n if hasattr(data, 'dtype'):\n return data.dtype == dtype\n elif hasattr(data, 'dtypes'):\n return (data.dtypes == dtype).all()\n\ndef check_shape(data, shape):\n return tuple(data.shape) == shape\n\ndef get_array(data, copy=False, dosort=False):\n \"\"\"Get a NumPy array from a NumPy array or a Pandas data object (Series,\n DataFrame or Panel).\"\"\"\n if data is None:\n return None\n if type(data) == np.ndarray:\n if copy:\n return data.copy()\n else:\n return data\n elif isinstance(data, (pd.DataFrame, pd.Panel)):\n if dosort:\n return np.array(data.sort_index().values)\n else:\n return data.values\n elif isinstance(data, (pd.Int64Index, pd.Index)):\n if dosort:\n return np.sort(data.values)\n else:\n return data.values\n else:\n if dosort:\n return np.array(data.sort_index().values)\n else:\n return data.values\n\n\n# -----------------------------------------------------------------------------\n# Text files related functions\n# -----------------------------------------------------------------------------\ndef first_row(filepath):\n with open(filepath, 'r') as f:\n n = f.readline().strip()\n return int(n)\n\n# Faster load_text version if Pandas is installed.\n# if HAS_PANDAS:\ndef load_text(filepath, dtype, skiprows=0, delimiter=' '):\n if not filepath:\n raise IOError(\"The filepath is empty.\")\n with open(filepath, 'r') as f:\n for _ in xrange(skiprows):\n f.readline()\n x = pd.read_csv(f, header=None,\n sep=delimiter).values.astype(dtype).squeeze()\n return x\n\ndef save_text(filepath, data, header=None, fmt='%d', delimiter=' '):\n if isinstance(data, basestring):\n with open(filepath, 'w') as f:\n f.write(data)\n else:\n np.savetxt(filepath, data, fmt=fmt, newline='\\n', delimiter=delimiter)\n # Write a header.\n if header is not None:\n with open(filepath, 'r') as f:\n contents = f.read()\n contents_updated = str(header) + '\\n' + contents\n with open(filepath, 'w') as f:\n f.write(contents_updated)\n\n\n# -----------------------------------------------------------------------------\n# Binary files functions\n# -----------------------------------------------------------------------------\ndef load_binary(file, dtype=None, count=None):\n if dtype is None:\n dtype = np.dtype(np.int16)\n if count is None:\n X = np.fromfile(file, dtype=dtype)\n else:\n X = np.fromfile(file, dtype=dtype, count=count)\n return X\n\ndef save_binary(file, data):\n data.tofile(file)\n\ndef save_pickle(file, obj):\n with open(file, 'wb') as f:\n cPickle.dump(obj, f)\n\ndef load_pickle(file):\n with open(file, 'rb') as f:\n obj = cPickle.load(f)\n return obj\n\n\n# -----------------------------------------------------------------------------\n# Memory mapping\n# -----------------------------------------------------------------------------\ndef load_binary_memmap(file, dtype=None, shape=None):\n return np.memmap(file, dtype=dtype, shape=shape)\n\ndef get_chunk(f, dtype, start, stop, offset=0):\n itemsize = np.dtype(dtype).itemsize\n count = (stop - start)\n f.seek(offset + itemsize * start, os.SEEK_SET)\n return np.fromfile(f, dtype=dtype, count=count)\n\ndef get_chunk_line(f, dtype):\n return np.fromstring(f.readline(), dtype=dtype, sep=' ')\n\nclass MemMappedArray(object):\n def __init__(self, filename, dtype, header_size=0):\n self.filename = filename\n self.header_size = header_size\n self.dtype = dtype\n self.itemsize = np.dtype(self.dtype).itemsize\n self.f = open(filename, 'rb')\n\n def __getitem__(self, key):\n if isinstance(key, (int, long)):\n return get_chunk(self.f, self.dtype, key, key + 1,\n offset=self.header_size)[0]\n elif isinstance(key, slice):\n return get_chunk(self.f, self.dtype, key.start, key.stop,\n offset=self.header_size)\n\n def __del__(self):\n self.f.close()\n\nclass MemMappedBinary(object):\n def __init__(self, filename, dtype, rowsize=None):\n self.filename = filename\n self.dtype = dtype\n\n # Number of bytes of each item.\n self.itemsize = np.dtype(self.dtype).itemsize\n # Number of items in each row.\n self.rowsize = rowsize\n # Number of bytes in each row.\n self.rowsize_bytes = self.rowsize * self.itemsize\n # Current row.\n self.row = 0\n\n # Open the file in binary mode, even for text files.\n self.f = open(filename, 'rb')\n\n def next(self):\n \"\"\"Return the values in the next row.\"\"\"\n self.f.seek(self.rowsize_bytes * self.row, os.SEEK_SET)\n values = np.fromfile(self.f, dtype=self.dtype, count=self.rowsize)\n self.row += 1\n return values\n\n def close(self):\n self.f.close()\n\n def __del__(self):\n self.close()\n\nclass MemMappedText(object):\n BUFFER_SIZE = 10000\n\n def __init__(self, filename, dtype, skiprows=0):\n self.filename = filename\n self.dtype = dtype\n\n # Open the file in binary mode, even for text files.\n self.f = open(filename, 'rb')\n # Skip rows in non-binary mode.\n for _ in xrange(skiprows):\n self.f.readline()\n\n self._buffer_size = self.BUFFER_SIZE\n self._next_lines()\n\n def _next_lines(self):\n \"\"\"Read several lines at once as it's faster than f.readline().\"\"\"\n self._lines = self.f.readlines(self._buffer_size)\n self._nlines = len(self._lines)\n self._index = 0\n\n def _next_line(self):\n if self._index >= self._nlines:\n self._next_lines()\n if self._index < self._nlines:\n line = self._lines[self._index]\n self._index += 1\n else:\n line = ''\n return line\n\n def next(self):\n \"\"\"Return the values in the next row.\"\"\"\n # HACK: remove the double spaces.\n l = self._next_line()\n if not l:\n return None\n l = l.replace(' ', ' ')\n values = np.fromstring(l, dtype=self.dtype, sep=' ')\n return values\n\n def close(self):\n self.f.close()\n\n def __del__(self):\n self.close()\n\n\n# -----------------------------------------------------------------------------\n# Preprocessing functions\n# -----------------------------------------------------------------------------\ndef normalize(data, range=(-1., 1.), symmetric=False):\n \"\"\"Normalize an array so that all values fit in a given range.\n\n Symmetrical normalization means that after normalization, values equal to\n 0 stay equal to 0.\n\n \"\"\"\n m = data.min()\n M = data.max()\n\n if symmetric:\n vx = max(np.abs(m), np.abs(M))\n m, M = -vx, vx\n\n data = range[0] + (range[1] - range[0]) * (data - m) * (1. / (M - m))\n\n return data\n\n\n\n", "\"\"\"This module provides utility classes and functions to load spike sorting\ndata sets.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport os\nimport os.path\nimport shutil\nimport re\nfrom collections import Counter\n\nimport numpy as np\nimport pandas as pd\nimport tables as tb\n\nfrom loader import (Loader, default_group_info, reorder, renumber_clusters,\n default_cluster_info)\nfrom klustersloader import (find_filenames, save_clusters, convert_to_clu,\n find_filename, find_filename_or_new)\nfrom tools import (load_text, normalize,\n load_binary, load_pickle, save_text, get_array,\n first_row, load_binary_memmap)\nfrom selection import (select, select_pairs, get_spikes_in_clusters,\n get_some_spikes_in_clusters, get_some_spikes, get_indices, pandaize)\nfrom kwiklib.utils.logger import (debug, info, warn, exception, FileLogger,\n register, unregister)\nfrom kwiklib.utils.colors import COLORS_COUNT, generate_colors\nfrom kwiklib.dataio.kwik import add_cluster\nfrom kwiklib.dataio.klusterskwik import klusters_to_kwik\nfrom .experiment import Experiment\n\n\ndef add_missing_clusters(exp):\n\n shanks = sorted(exp.channel_groups.keys())\n\n for shank in shanks:\n cg = exp.channel_groups[shank]\n clusters = cg.clusters.main.keys()\n clusters_unique = np.unique(cg.spikes.clusters.main[:])\n # Find missing clusters in the kwik file.\n missing = sorted(set(clusters_unique)-set(clusters))\n\n # Add all missing clusters with a default color and \"Unsorted\" cluster group (group #3).\n for idx in missing:\n info(\"Adding missing cluster %d in shank %d.\" % (idx, shank))\n add_cluster(exp._files, channel_group_id='%d' % shank,\n id=str(idx),\n clustering='main',\n cluster_group=3)\n\n\n# -----------------------------------------------------------------------------\n# HDF5 Loader\n# -----------------------------------------------------------------------------\nclass KwikLoader(Loader):\n # TODO: change the clustering ('main' by default)\n\n def __init__(self, parent=None, filename=None, userpref=None):\n self.experiment = None\n super(KwikLoader, self).__init__(parent=parent, filename=filename, userpref=userpref)\n\n # Read functions.\n # ---------------\n def _report_progress_open(self, spike, nspikes, shank, nshanks):\n i = shank * 100 + float(spike)/nspikes*100\n n = nshanks * 100\n self.report_progress(i, n)\n\n def _consistency_check(self):\n exp = self.experiment\n chgrp = self.shank\n\n cg = exp.channel_groups[chgrp]\n clusters = cg.clusters.main.keys()\n clusters_unique = np.unique(cg.spikes.clusters.main[:])\n\n # Find missing clusters in the kwik file.\n missing = sorted(set(clusters_unique)-set(clusters))\n\n # Add all missing clusters with a default color and \"Unsorted\" cluster group (group #3).\n for idx in missing:\n warn(\"Consistency check: adding cluster %d in the kwik file\" % idx)\n add_cluster(exp._files, channel_group_id='%d' % chgrp,\n id=idx,\n clustering='main',\n cluster_group=3)\n\n def open(self, filename=None, shank=None):\n \"\"\"Open everything.\"\"\"\n if filename is None:\n filename = self.filename\n else:\n self.filename = filename\n dir, basename = os.path.split(filename)\n\n # Converting to kwik if needed\n # kwik = find_filename(basename, 'kwik', dir=dir)\n # xml = find_filename(basename, 'xml', dir=dir)\n # self.filename_clu = find_filename(basename, 'clu', dir=dir)\n self._filenames = find_filenames(filename)\n kwik = find_filename(basename, 'kwik', dir=dir)\n xml = self._filenames['xml']\n clu = self._filenames['clu']\n\n self.log_filename = find_filename_or_new(filename, 'kvlog', dir=dir)\n\n\n # Backup the .clu file.\n clu_original = find_filename_or_new(filename, 'clu_original')\n if os.path.exists(clu) and not os.path.exists(clu_original):\n shutil.copyfile(clu, clu_original)\n\n if not kwik:\n assert xml, ValueError(\"I need a valid .kwik file\")\n return\n\n self.experiment = Experiment(basename, dir=dir, mode='a')\n\n # CONSISTENCY CHECK\n # add missing clusters\n add_missing_clusters(self.experiment)\n\n # TODO\n # self.initialize_logfile()\n # Load the similarity measure chosen by the user in the preferences\n # file: 'gaussian' or 'kl'.\n # Refresh the preferences file when a new file is opened.\n # USERPREF.refresh()\n self.similarity_measure = self.userpref['similarity_measure'] or 'gaussian'\n debug(\"Similarity measure: {0:s}.\".format(self.similarity_measure))\n info(\"Opening {0:s}.\".format(self.experiment.name))\n self.shanks = sorted(self.experiment.channel_groups.keys())\n\n self.freq = self.experiment.application_data.spikedetekt.sample_rate\n\n self.fetdim = self.experiment.application_data.spikedetekt.n_features_per_channel\n self.nsamples = self.experiment.application_data.spikedetekt.extract_s_before + self.experiment.application_data.spikedetekt.extract_s_after\n\n self.set_shank(shank or self.shanks[0])\n\n # Shank functions.\n # ----------------\n def get_shanks(self):\n \"\"\"Return the list of shanks available in the file.\"\"\"\n return self.shanks\n\n def set_shank(self, shank):\n \"\"\"Change the current shank and read the corresponding tables.\"\"\"\n if not shank in self.shanks:\n warn(\"Shank {0:d} is not in the list of shanks: {1:s}\".format(\n shank, str(self.shanks)))\n return\n self.shank = shank\n\n # CONSISTENCY CHECK\n # self._consistency_check()\n\n self.nchannels = len(self.experiment.channel_groups[self.shank].channels)\n\n clusters = self.experiment.channel_groups[self.shank].spikes.clusters.main[:]\n self.clusters = pd.Series(clusters, dtype=np.int32)\n self.nspikes = len(self.clusters)\n\n self.features = self.experiment.channel_groups[self.shank].spikes.features\n self.masks = self.experiment.channel_groups[self.shank].spikes.masks\n self.waveforms = self.experiment.channel_groups[self.shank].spikes.waveforms_filtered\n\n if self.features is not None:\n nfet = self.features.shape[1]\n self.nextrafet = (nfet - self.nchannels * self.fetdim)\n else:\n self.nextrafet = 0\n\n # Load concatenated time samples: those are the time samples +\n # the start time of the corresponding recordings.\n spiketimes = self.experiment.channel_groups[self.shank].spikes.concatenated_time_samples[:] * (1. / self.freq)\n self.spiketimes = pd.Series(spiketimes, dtype=np.float64)\n self.duration = spiketimes[-1]\n\n self._update_data()\n\n self.read_clusters()\n\n def copy_clustering(self, clustering_from='original',\n clustering_to='main'):\n clusters = self.experiment.channel_groups[self.shank].spikes.clusters\n clusters.copy(clustering_from, clustering_to)\n\n\n # Read contents.\n # ---------------------\n def get_probe_geometry(self):\n return np.array([c.position\n for c in self.experiment.channel_groups[self.shank].channels])\n\n def read_clusters(self):\n # Read the cluster info.\n clusters = self.experiment.channel_groups[self.shank].clusters.main.keys()\n cluster_groups = [c.cluster_group or 0 for c in self.experiment.channel_groups[self.shank].clusters.main.values()]\n\n # cluster_colors = [c.application_data.klustaviewa.color\n # if c.application_data.klustaviewa.color is not None\n # else 1\n # for c in self.experiment.channel_groups[self.shank].clusters.main.values()]\n\n groups = self.experiment.channel_groups[self.shank].cluster_groups.main.keys()\n group_names = [g.name or 'Group' for g in self.experiment.channel_groups[self.shank].cluster_groups.main.values()]\n # group_colors = [g.application_data.klustaviewa.color or 1 for g in self.experiment.channel_groups[self.shank].cluster_groups.main.values()]\n\n # Create the cluster_info DataFrame.\n self.cluster_info = pd.DataFrame(dict(\n # color=cluster_colors,\n group=cluster_groups,\n ), index=clusters)\n # self.cluster_colors = self.cluster_info['color'].astype(np.int32)\n self.cluster_groups = self.cluster_info['group'].astype(np.int32)\n\n # Create the group_info DataFrame.\n self.group_info = pd.DataFrame(dict(\n # color=group_colors,\n name=group_names,\n ), index=groups)\n # self.group_colors = self.group_info['color'].astype(np.int32)\n self.group_names = self.group_info['name']\n\n\n # Writing capabilities.\n # ---------------------\n def set_cluster(self, spikes, cluster):\n if not hasattr(spikes, '__len__'):\n spikes = [spikes]\n\n self.experiment.channel_groups[self.shank].spikes.clusters.main[spikes] = cluster\n clusters = self.experiment.channel_groups[self.shank].spikes.clusters.main[:]\n self.clusters = pd.Series(clusters, dtype=np.int32)\n\n self._update_data()\n\n def set_cluster_groups(self, clusters, group):\n # self.cluster_groups.ix[clusters] = group\n if not hasattr(clusters, '__len__'):\n clusters = [clusters]\n\n clusters_gr = self.experiment.channel_groups[self.shank].clusters.main\n for cl in clusters:\n clusters_gr[cl].cluster_group = group\n\n self.read_clusters()\n\n def set_cluster_colors(self, clusters, color):\n # self.cluster_colors.ix[clusters] = color\n if not hasattr(clusters, '__len__'):\n clusters = [clusters]\n clusters_gr = self.experiment.channel_groups[self.shank].clusters.main\n for cl in clusters:\n clusters_gr[cl].application_data.klustaviewa.color = color\n\n self.read_clusters()\n\n def set_group_names(self, groups, name):\n # self.group_names.ix[groups] = name\n if not hasattr(groups, '__len__'):\n groups = [groups]\n groups_gr = self.experiment.channel_groups[self.shank].cluster_groups.main\n for gr in groups:\n groups_gr[gr].name = name\n\n self.read_clusters()\n\n def set_group_colors(self, groups, color):\n # self.group_colors.ix[groups] = color\n if not hasattr(groups, '__len__'):\n groups = [groups]\n\n groups_gr = self.experiment.channel_groups[self.shank].cluster_groups.main\n # for gr in groups:\n # groups_gr[gr].application_data.klustaviewa.color = color\n\n self.read_clusters()\n\n\n # Add.\n def add_cluster(self, cluster, group, color):\n # if cluster not in self.cluster_groups.index:\n # self.cluster_groups = self.cluster_groups.append(\n # pd.Series([group], index=[cluster])).sort_index()\n # if cluster not in self.cluster_colors.index:\n # self.cluster_colors = self.cluster_colors.append(\n # pd.Series([color], index=[cluster])).sort_index()\n\n self.experiment.channel_groups[self.shank].clusters.main.add_cluster(\n id=cluster,\n # color=color,\n cluster_group=group)\n\n self.read_clusters()\n\n def add_clusters(self, clusters, groups):\n # if cluster not in self.cluster_groups.index:\n # self.cluster_groups = self.cluster_groups.append(\n # pd.Series([group], index=[cluster])).sort_index()\n # if cluster not in self.cluster_colors.index:\n # self.cluster_colors = self.cluster_colors.append(\n # pd.Series([color], index=[cluster])).sort_index()\n for cluster, group in zip(clusters, groups):\n self.experiment.channel_groups[self.shank].clusters.main.add_cluster(\n id=cluster, cluster_group=group)\n self.read_clusters()\n\n def add_group(self, group, name):\n # if group not in self.group_colors.index:\n # self.group_colors = self.group_colors.append(\n # pd.Series([color], index=[group])).sort_index()\n # if group not in self.group_names.index:\n # self.group_names = self.group_names.append(\n # pd.Series([name], index=[group])).sort_index()\n\n groups = self.experiment.channel_groups[self.shank].cluster_groups.main\n groups.add_group(id=group, name=name,)\n\n self.read_clusters()\n\n # Remove.\n def remove_cluster(self, cluster):\n if np.any(np.in1d(cluster, self.clusters)):\n raise ValueError((\"Cluster {0:d} is not empty and cannot \"\n \"be removed.\").format(cluster))\n\n self.experiment.channel_groups[self.shank].clusters.main.remove_cluster(\n id=cluster,)\n\n self.read_clusters()\n\n def remove_group(self, group):\n if np.any(np.in1d(group, self.cluster_groups)):\n raise ValueError((\"Group {0:d} is not empty and cannot \"\n \"be removed.\").format(group))\n\n self.experiment.channel_groups[self.shank].cluster_groups.main.remove_group(\n id=group,)\n\n self.read_clusters()\n\n # Access to the data: spikes\n # --------------------------\n def select(self, spikes=None, clusters=None):\n if clusters is not None:\n if not hasattr(clusters, '__len__'):\n clusters = [clusters]\n spikes = get_spikes_in_clusters(clusters, self.clusters)\n self.spikes_selected = spikes\n self.clusters_selected = clusters\n\n # Log file.\n # ---------\n def initialize_logfile(self):\n self.logfile = FileLogger(self.filename_log, name='datafile',\n level=self.userpref['loglevel_file'])\n # Register log file.\n register(self.logfile)\n\n # Save.\n # -----\n def save(self, renumber=False):\n self.report_progress_save(1, 4)\n\n if renumber:\n self.renumber()\n self.clusters = self.clusters_renumbered\n self.cluster_info = self.cluster_info_renumbered\n self._update_data()\n\n # Save the clusters in the .clu file.\n clu = self._filenames['clu']\n clu_split = clu.split('.')\n clu_split[-1] = str(self.shank)\n clu = '.'.join(clu_split)\n save_clusters(clu,\n convert_to_clu(self.clusters, self.cluster_info['group']))\n\n self.report_progress_save(2, 4)\n\n # self.close()\n self.report_progress_save(3, 4)\n\n # self.open()\n self.report_progress_save(4, 4)\n\n\n # Close functions.\n # ----------------\n def close(self):\n \"\"\"Close the kwik HDF5 file.\"\"\"\n # if hasattr(self, 'kwik') and self.kwik.isopen:\n # self.kwik.flush()\n # self.kwik.close()\n if self.experiment is not None:\n self.experiment.close()\n self.experiment = None\n if hasattr(self, 'logfile'):\n unregister(self.logfile)\n\n\n" ]
[ [ "numpy.savetxt", "numpy.memmap", "numpy.sort", "numpy.fromfile", "numpy.abs", "pandas.read_csv", "numpy.fromstring", "numpy.dtype" ], [ "numpy.in1d", "numpy.array", "numpy.unique", "pandas.Series" ] ]
MinCiencia/ECQQ
[ "f93a01ce2dd140d073bd81afb9b4733c1d8a34c3" ]
[ "contributions_rnn/core/data.py" ]
[ "import tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport os\n\ndef _bytes_feature(value):\n \"\"\"Returns a bytes_list from a string / byte.\"\"\"\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef _float_feature(list_of_floats): # float32\n return tf.train.Feature(float_list=tf.train.FloatList(value=list_of_floats))\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\ndef label_frame(frame, labels):\n label_list = []\n\n for _, c in frame.iterrows():\n m1 = labels[labels['text'] == c['text']]\n m2 = labels[labels['tokens'] == c['tokens']]\n m = pd.concat([m1,m2]).drop_duplicates()\n\n y = m[m['label']!='NR']['label']\n if y.shape[0] != 0:\n label_list.append(y.values[0])\n else:\n label_list.append('')\n\n frame['label'] = label_list\n return frame\n\ndef train_val_test(frame, outdir, train_ptg, val_ptg, save=False):\n train_sets, val_sets, test_sets = [], [], []\n category_samples = frame.groupby('label')\n for label, subframe in category_samples:\n subframe = subframe.sample(frac=1)\n\n n_train = int(train_ptg*subframe.shape[0])\n n_val = int(val_ptg/2*subframe.shape[0])\n\n train = subframe.iloc[:n_train]\n val = subframe.iloc[n_train:n_train+n_val]\n test = subframe.iloc[n_train+n_val:]\n\n train_sets.append(train)\n val_sets.append(val)\n test_sets.append(test)\n\n train = pd.concat(train_sets)\n train['subset'] = ['train']*train.shape[0]\n val = pd.concat(val_sets)\n val['subset'] = ['val']*val.shape[0]\n test = pd.concat(test_sets)\n test['subset'] = ['test']*test.shape[0]\n\n training_set = pd.concat([train, val, test])\n if save:\n training_set.to_csv(os.path.join(outdir, 'samples.csv'), index=False)\n return training_set\n\ndef write_records(frame, label, folder, embedding):\n file = os.path.join(folder, '{}.record'.format(label))\n with tf.io.TFRecordWriter(file) as writer:\n for _, row in frame.iterrows():\n # Text encoding\n encoding = []\n for word in row['text'].split():\n try:\n encoding.append(embedding[word])\n except:\n continue\n \n encoding = np.array(encoding)\n if encoding.shape[0] > 0:\n dict_sequence = dict()\n for dim in range(encoding.shape[-1]):\n seqfeat = _float_feature(encoding[:, dim])\n seqfeat = tf.train.FeatureList(feature = [seqfeat])\n dict_sequence['dim_tok_{}'.format(dim)] = seqfeat\n element_lists = tf.train.FeatureLists(feature_list=dict_sequence)\n\n dict_features={\n 'text': _bytes_feature(str(row['text']).encode()),\n 'category': _bytes_feature(str(row['label']).encode()),\n 'label': _int64_feature(label),\n 'id': _int64_feature(int(row['id'])),\n 'length': _int64_feature(int(encoding.shape[0]))\n }\n element_context = tf.train.Features(feature = dict_features)\n ex = tf.train.SequenceExample(context = element_context,\n feature_lists= element_lists)\n writer.write(ex.SerializeToString())\n\ndef create_records(frame, embedding, outdir, train_ptg=0.5, val_ptg=0.5):\n os.makedirs(outdir, exist_ok=True)\n subset_frame = train_val_test(frame, outdir, train_ptg, val_ptg, save=True)\n for subset in ['train', 'val', 'test']:\n partial = subset_frame[subset_frame['subset'] == subset]\n classes = partial.groupby('label')\n\n for k, (_, samples) in enumerate(classes):\n folder = os.path.join(outdir, subset)\n os.makedirs(folder, exist_ok=True)\n write_records(samples, k, folder, embedding)\n\ndef create_prediction_record(frame, embedding, outdir):\n folder = os.path.join(outdir, 'prediction')\n os.makedirs(folder, exist_ok=True)\n write_records(frame, 0, folder, embedding)\n\n\ndef _parse(sample, n_cls):\n\n\n context_features = {'label': tf.io.FixedLenFeature([],dtype=tf.int64),\n 'length': tf.io.FixedLenFeature([],dtype=tf.int64),\n 'id': tf.io.FixedLenFeature([],dtype=tf.int64),\n 'category': tf.io.FixedLenFeature([], dtype=tf.string),\n 'text': tf.io.FixedLenFeature([], dtype=tf.string)}\n\n sequence_features = dict()\n for i in range(300):\n sequence_features['dim_tok_{}'.format(i)] = tf.io.VarLenFeature(dtype=tf.float32)\n\n context, sequence = tf.io.parse_single_sequence_example(\n serialized=sample,\n context_features=context_features,\n sequence_features=sequence_features\n )\n\n input_dict = dict()\n input_dict['id'] = tf.cast(context['id'], tf.int32)\n input_dict['category'] = tf.cast(context['category'], tf.string)\n input_dict['text'] = tf.cast(context['text'], tf.string)\n input_dict['length'] = tf.cast(context['length'], tf.int32)\n input_dict['label'] = tf.one_hot(tf.cast(context['label'], tf.int32), n_cls)\n\n casted_inp_parameters = []\n for i in range(300):\n seq_dim = sequence['dim_tok_{}'.format(i)]\n seq_dim = tf.sparse.to_dense(seq_dim)\n seq_dim = tf.cast(seq_dim, tf.float32)\n casted_inp_parameters.append(seq_dim)\n\n input_dict['input'] = tf.stack(casted_inp_parameters, axis=2)[0]\n\n return input_dict\n\ndef load_records(source, batch_size, return_cls=False, return_all=False):\n\n if return_all:\n datasets = [os.path.join(source, x) for x in os.listdir(source)]\n n_cls = len(datasets)\n dataset = tf.data.TFRecordDataset(datasets)\n dataset = dataset.map(lambda x: _parse(x, n_cls), num_parallel_calls=8)\n else:\n datasets = [tf.data.TFRecordDataset(os.path.join(source, x)) for x in os.listdir(source)]\n n_cls = len(datasets)\n datasets = [\n dataset.map(\n lambda x: _parse(x, n_cls), num_parallel_calls=8) for dataset in datasets\n ]\n datasets = [dataset.repeat() for dataset in datasets]\n datasets = [dataset.shuffle(5000, reshuffle_each_iteration=True) for dataset in datasets]\n dataset = tf.data.experimental.sample_from_datasets(datasets)\n\n dataset = dataset.padded_batch(batch_size).prefetch(buffer_size=tf.data.experimental.AUTOTUNE)\n\n if return_cls:\n return dataset, n_cls\n return dataset\n" ]
[ [ "tensorflow.data.experimental.sample_from_datasets", "tensorflow.data.TFRecordDataset", "tensorflow.train.Int64List", "tensorflow.train.Features", "tensorflow.stack", "pandas.concat", "tensorflow.cast", "tensorflow.train.FloatList", "tensorflow.io.FixedLenFeature", "tensorflow.constant", "tensorflow.train.FeatureLists", "numpy.array", "tensorflow.train.FeatureList", "tensorflow.io.parse_single_sequence_example", "tensorflow.io.VarLenFeature", "tensorflow.train.SequenceExample", "tensorflow.train.BytesList", "tensorflow.sparse.to_dense", "tensorflow.io.TFRecordWriter" ] ]
GeorgeAdamson23/StormDataAnalysis
[ "27aea327ea17d7b4bcea4b99ec5b0b9d82c70c64" ]
[ "analyze_events.py" ]
[ "# Created by George Adamson on 08/02/2019\r\n# This script will analyze the storm event data that has been loaded into a SQL database using the load_events.py script\r\n# By design, this script focuses on the Southeast Michigan region (DTX) and Washtenaw County, Michigan\r\n\r\nimport sqlite3\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\n# Open up the SQL database with storm event data\r\nconn = sqlite3.connect('StormEvents.sqlite')\r\ncur = conn.cursor()\r\n\r\n\r\n\r\n# Define a function to create the bar graphs for standard numStorms vs month\r\ndef createBarGraph(sqlstr,plotTitle):\r\n monthCount = {'January' : 0, 'February' : 0, 'March' : 0, 'April' : 0, 'May' : 0, 'June' : 0, 'July' : 0, 'August' : 0, 'September' : 0, 'October' : 0, 'November' : 0, 'December' : 0}\r\n\r\n for row in cur.execute(sqlstr):\r\n monthCount[row[1]] = monthCount[row[1]] + 1\r\n\r\n # Print to console to show num of occurrences\r\n print(plotTitle)\r\n print(monthCount)\r\n\r\n # Create a bar graph of the num of storm occurrences vs month\r\n plt.bar(range(len(monthCount)), list(monthCount.values()), align='center')\r\n plt.xticks(range(len(monthCount)), list(monthCount.keys()))\r\n plt.suptitle(plotTitle)\r\n fig = plt.gcf()\r\n fig.set_size_inches(13, 6)\r\n plt.show()\r\n\r\n# Research Question 1: What is the probability of a heavy snow or winter storm event in each month in Southeast Michigan (DTX)?\r\n# To ensure a single storm does not cause multiple storm events in multiple counties in DTX, I ensure that a unique episode_id is present. This way a single storm affecting\r\n# multiple counties will register as a single event for that month.\r\nsqlstr = '''SELECT DISTINCT episode_id,month_name FROM Storm_Events WHERE (state = 'MICHIGAN' AND (event_type = 'Heavy Snow' OR event_type = 'Winter Storm') AND wfo = 'DTX') ORDER BY month_name'''\r\ncreateBarGraph(sqlstr,\"Number of Heavy Snow or Winter Storm Events in Southeast Michigan (DTX) by Month From 2000-2018\")\r\n\r\n\r\n\r\n# Research Question 2: What is the probability of a heavy snow or winter storm event in each month in Washtenaw County, Michigan?\r\nsqlstr = '''SELECT DISTINCT episode_id,month_name FROM Storm_Events WHERE (state = 'MICHIGAN' AND (event_type = 'Heavy Snow' OR event_type = 'Winter Storm') AND cz_name = 'WASHTENAW') ORDER BY month_name'''\r\ncreateBarGraph(sqlstr,\"Number of Heavy Snow or Winter Storm Events in Washtenaw County, Michigan by Month From 2000-2018\")\r\n\r\n\r\n\r\n# Research Question 3: Probability of Ice Storms in Southeast Michigan (DTX) by Month\r\nsqlstr = '''SELECT DISTINCT episode_id,month_name FROM Storm_Events WHERE (state = 'MICHIGAN' AND (event_type = 'Ice Storm') AND wfo = 'DTX') ORDER BY month_name'''\r\ncreateBarGraph(sqlstr,\"Number of Ice Storm Events in Southeast Michigan (DTX) by Month From 2000-2018\")\r\n\r\n\r\n\r\n# Research Question 4: Probability of Ice Storms in Washtenaw County, Michigan by Month\r\nsqlstr = '''SELECT DISTINCT episode_id,month_name FROM Storm_Events WHERE (state = 'MICHIGAN' AND (event_type = 'Ice Storm') AND cz_name = 'WASHTENAW') ORDER BY month_name'''\r\ncreateBarGraph(sqlstr,\"Number of Ice Storm Events in Washtenaw County, Michigan by Month From 2000-2018\")\r\n\r\n\r\n\r\n# Research Question 5: Probability of Blizzards in Southeast Michigan (DTX) by Month\r\nsqlstr = '''SELECT DISTINCT episode_id,month_name FROM Storm_Events WHERE (state = 'MICHIGAN' AND (event_type = 'Blizzard') AND wfo = 'DTX') ORDER BY month_name'''\r\ncreateBarGraph(sqlstr,\"Number of Blizzard Events in Southeast Michigan (DTX) by Month From 2000-2018\")\r\n\r\n\r\n\r\n# Research Question 6: Probability of Thunderstorm Winds in Southeast Michigan (DTX) by Month\r\nsqlstr = '''SELECT DISTINCT episode_id,month_name FROM Storm_Events WHERE (state = 'MICHIGAN' AND (event_type = 'Thunderstorm Wind') AND wfo = 'DTX') ORDER BY month_name'''\r\ncreateBarGraph(sqlstr,\"Number of Thunderstorm Wind Events in Southeast Michigan (DTX) by Month From 2000-2018\")\r\n\r\n\r\n\r\n# Research Question 7: Probability of Thunderstorm Winds in Washtenaw County, Michigan by Month\r\nsqlstr = '''SELECT DISTINCT episode_id,month_name FROM Storm_Events WHERE (state = 'MICHIGAN' AND (event_type = 'Thunderstorm Wind') AND cz_name = 'WASHTENAW') ORDER BY month_name'''\r\ncreateBarGraph(sqlstr,\"Number of Thunderstorm Wind Events in Washtenaw County, Michigan by Month From 2000-2018\")\r\n\r\n\r\n\r\n# Research Question 8: Probability of Hail in Southeast Michigan (DTX) by Month\r\nsqlstr = '''SELECT DISTINCT episode_id,month_name FROM Storm_Events WHERE (state = 'MICHIGAN' AND (event_type = 'Hail') AND wfo = 'DTX') ORDER BY month_name'''\r\ncreateBarGraph(sqlstr,\"Number of Hail Events in Southeast Michigan (DTX) by Month From 2000-2018\")\r\n\r\n\r\n\r\n# Research Question 9: Probability of Hail in Washtenaw County, Michigan by Month\r\nsqlstr = '''SELECT DISTINCT episode_id,month_name FROM Storm_Events WHERE (state = 'MICHIGAN' AND (event_type = 'Hail') AND cz_name = 'WASHTENAW') ORDER BY month_name'''\r\ncreateBarGraph(sqlstr,\"Number of Hail Events in Washtenaw County, Michigan by Month From 2000-2018\")\r\n\r\n\r\n\r\n# Research Question 10: Probability of Tornados in Southeast Michigan (DTX) by Month\r\nsqlstr = '''SELECT DISTINCT episode_id,month_name FROM Storm_Events WHERE (state = 'MICHIGAN' AND (event_type = 'Tornado') AND wfo = 'DTX') ORDER BY month_name'''\r\ncreateBarGraph(sqlstr,\"Number of Tornado Events in Southeast Michigan (DTX) by Month From 2000-2018\")\r\n\r\n\r\n\r\n# Research Question 11: Probability of Tornados in Washtenaw County, Michigan by Month\r\nsqlstr = '''SELECT DISTINCT episode_id,month_name FROM Storm_Events WHERE (state = 'MICHIGAN' AND (event_type = 'Tornado') AND cz_name = 'WASHTENAW') ORDER BY month_name'''\r\ncreateBarGraph(sqlstr,\"Number of Tornado Events in Washtenaw County, Michigan by Month From 2000-2018\")\r\n\r\n\r\n\r\n# Research Question 12: Combined Seasonal Storm Climatology for Southeast Michigan (DTX)\r\n# This bar graph is a bit more involved, so I opted not to use the predefined function\r\nsqlstr = '''SELECT DISTINCT episode_id,month_name FROM Storm_Events WHERE (state = 'MICHIGAN' AND (event_type = 'Heavy Snow' OR event_type = 'Winter Storm') AND wfo = 'DTX') ORDER BY month_name'''\r\nmonthCountWinter = {'January' : 0, 'February' : 0, 'March' : 0, 'April' : 0, 'May' : 0, 'June' : 0, 'July' : 0, 'August' : 0, 'September' : 0, 'October' : 0, 'November' : 0, 'December' : 0}\r\n\r\nfor row in cur.execute(sqlstr):\r\n monthCountWinter[row[1]] = monthCountWinter[row[1]] + 1\r\n\r\nsqlstr2 = '''SELECT DISTINCT episode_id,month_name FROM Storm_Events WHERE (state = 'MICHIGAN' AND (event_type = 'Thunderstorm Wind') AND wfo = 'DTX') ORDER BY month_name'''\r\nmonthCountSummer = {'January' : 0, 'February' : 0, 'March' : 0, 'April' : 0, 'May' : 0, 'June' : 0, 'July' : 0, 'August' : 0, 'September' : 0, 'October' : 0, 'November' : 0, 'December' : 0}\r\n\r\nfor row in cur.execute(sqlstr2):\r\n monthCountSummer[row[1]] = monthCountSummer[row[1]] + 1\r\n\r\n# Create a bar graph to show both summer and winter storm data\r\nX = np.arange(len(monthCountWinter))\r\nax = plt.subplot(111)\r\nax.bar(X, monthCountWinter.values(), width=0.2, color='b', align='center')\r\nax.bar(X-0.2, monthCountSummer.values(), width=0.2, color='r', align='center')\r\nax.legend(('Heavy Snow/Winter Storms','Thunderstorm Wind Events'))\r\nplt.xticks(X, monthCountWinter.keys())\r\nplt.title(\"Combined Seasonal Storm Climatology for Southeast Michigan (DTX) by Month 2000-2018\", fontsize=17)\r\nfig = plt.gcf()\r\nfig.set_size_inches(13, 6)\r\nplt.show()\r\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.gcf", "matplotlib.pyplot.show", "matplotlib.pyplot.subplot" ] ]
vita-epfl/rock-pytorch
[ "6f4c86d3fec7fe3b0ce65d2687d144e9698e964f" ]
[ "rock/detect.py" ]
[ "import os\nimport time\nfrom pathlib import Path\nfrom typing import Tuple, Optional\n\nimport torch\nimport torch.utils.data\n\nimport rock.ssd.prior_boxes\nimport rock.ssd.encoder\nimport rock.datasets.transforms\nimport rock.datasets.image_folder\nimport rock.model.network\nimport rock.utils.load\nimport rock.utils.draw\n\n\ndef object_detection(model_path: str,\n image_folder_path: str = 'data/detection/images',\n detection_output_path: str = 'data/detection/output',\n scene_output_path: Optional[str] = None,\n depth_output_path: Optional[str] = None,\n normals_output_path: Optional[str] = None,\n device: torch.device = torch.device(\"cuda\"),\n aux: bool = True,\n aux_tasks: Tuple[str, ...] = ('scene', 'depth', 'normals'),\n conf_threshold: float = 0.4,\n throughput: bool = False,\n verbose: bool = True) -> None:\n \"\"\" Loads a model and detects images at a given path\n \"\"\"\n if detection_output_path:\n Path(detection_output_path).mkdir(parents=True, exist_ok=True)\n if scene_output_path:\n Path(scene_output_path).mkdir(parents=True, exist_ok=True)\n if depth_output_path:\n Path(depth_output_path).mkdir(parents=True, exist_ok=True)\n if normals_output_path:\n Path(normals_output_path).mkdir(parents=True, exist_ok=True)\n\n if verbose and not throughput:\n print(\"Running object detection with model: {}\".format(model_path))\n\n if throughput:\n print(\"Calculating throughput disables saving detection output to folder\")\n pboxes = rock.ssd.prior_boxes.pboxes_rock()\n encoder = rock.ssd.encoder.Encoder(pboxes)\n image_data = rock.datasets.image_folder.ImageFolder(image_folder_path)\n\n model = rock.model.network.rock_network(aux_tasks) if aux else rock.model.network.baseline_ssd()\n model = model.to(device)\n rock.utils.load.load_from_checkpoint(model_path, model, verbose=verbose)\n\n predict(model=model, dataset=image_data, encoder=encoder, device=device,\n conf_threshold=conf_threshold, detection_output_path=detection_output_path,\n scene_output_path=scene_output_path, depth_output_path=depth_output_path,\n normals_output_path=normals_output_path, aux=aux, aux_tasks=aux_tasks, throughput=throughput,\n verbose=verbose)\n\n if verbose and not throughput:\n print(\"Detections saved to: {}\".format(detection_output_path))\n print(\"Done!\")\n\n\ndef predict(model: torch.nn.Module,\n dataset: torch.utils.data.Dataset,\n encoder: rock.ssd.encoder.Encoder,\n detection_output_path: str,\n scene_output_path: str,\n depth_output_path: str,\n normals_output_path: str,\n device: torch.device,\n aux: bool,\n aux_tasks: Tuple[str, ...],\n conf_threshold: float,\n throughput: bool,\n verbose: bool) -> float:\n \"\"\" Performs object detection for a given model\n\n Returns the number of images evaluated per sec (forward pass) if show_images_per_sec is False, otherwise,\n prints the number of images evaluated per sec\n \"\"\"\n model.eval()\n model.to(device)\n\n batch_size = 1 if throughput else 8\n loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=2, drop_last=False)\n\n total_images = len(dataset)\n total_time = 0\n\n for i, (imgs, filenames) in enumerate(loader):\n tic = time.time()\n with torch.no_grad():\n imgs = imgs.to(device)\n ploc, plabel, *aux_out = model(imgs)\n\n toc = time.time()\n total_time += (toc - tic)\n\n # Save images only if we are not checking the throughput\n if not throughput:\n for j in range(imgs.shape[0]):\n save_path = os.path.join(detection_output_path, filenames[j])\n rock.utils.draw.draw_predictions(img=rock.utils.draw.inv_norm(imgs[j]),\n encoder=encoder, ploc=ploc, plabel=plabel, idx=j,\n label_map=rock.utils.draw.rock_label_map(), show=False,\n save_path=save_path, conf_threshold=conf_threshold)\n\n if aux:\n if 'scene' in aux_tasks and scene_output_path:\n scene = aux_out[0]\n scene_save_path = os.path.join(scene_output_path, filenames[j])\n scene_save_path = os.path.splitext(scene_save_path)[0] + '.txt'\n rock.utils.draw.write_scenes(scene[j], scene_save_path, log=True)\n\n if 'depth' in aux_tasks and depth_output_path:\n depth = aux_out[1]\n depth_save_path = os.path.join(depth_output_path, filenames[j])\n rock.utils.draw.draw_depth(depth[j], depth_save_path, log=True)\n\n if 'normals' in aux_tasks and normals_output_path:\n normals = aux_out[2]\n normals_save_path = os.path.join(normals_output_path, filenames[j])\n rock.utils.draw.draw_normals(normals[j], normals_save_path)\n\n if verbose or throughput:\n print(\"{}/{} images detected\".format((i+1) * batch_size, total_images), end='\\r')\n\n model.train()\n\n images_per_sec = total_images / total_time\n\n if throughput:\n print()\n print(\"Throughput: {:.2f} images/sec\".format(images_per_sec))\n elif verbose:\n print(\"{}/{} images detected\".format(total_images, total_images))\n\n return images_per_sec\n" ]
[ [ "torch.device", "torch.no_grad", "torch.utils.data.DataLoader" ] ]
PuzeLiu/mushroom-rl
[ "99942b425e66b4ddcc26009d7105dde23841e95d", "99942b425e66b4ddcc26009d7105dde23841e95d", "2625ee7f64d5613b3b9fba00f0b7a39fece88ca5" ]
[ "mushroom_rl/algorithms/policy_search/black_box_optimization/reps.py", "mushroom_rl/algorithms/value/td/double_q_learning.py", "mushroom_rl/features/features.py" ]
[ "import numpy as np\n\nfrom scipy.optimize import minimize\n\nfrom mushroom_rl.algorithms.policy_search.black_box_optimization import BlackBoxOptimization\nfrom mushroom_rl.utils.parameters import to_parameter\n\n\nclass REPS(BlackBoxOptimization):\n \"\"\"\n Episodic Relative Entropy Policy Search algorithm.\n \"A Survey on Policy Search for Robotics\", Deisenroth M. P., Neumann G.,\n Peters J.. 2013.\n\n \"\"\"\n def __init__(self, mdp_info, distribution, policy, eps, features=None):\n \"\"\"\n Constructor.\n\n Args:\n eps ([float, Parameter]): the maximum admissible value for the Kullback-Leibler\n divergence between the new distribution and the\n previous one at each update step.\n\n \"\"\"\n self._eps = to_parameter(eps)\n\n self._add_save_attr(_eps='mushroom')\n\n super().__init__(mdp_info, distribution, policy, features)\n\n def _update(self, Jep, theta):\n eta_start = np.ones(1)\n\n res = minimize(REPS._dual_function, eta_start,\n jac=REPS._dual_function_diff,\n bounds=((np.finfo(np.float32).eps, np.inf),),\n args=(self._eps(), Jep, theta))\n\n eta_opt = res.x.item()\n\n Jep -= np.max(Jep)\n\n d = np.exp(Jep / eta_opt)\n\n self.distribution.mle(theta, d)\n\n @staticmethod\n def _dual_function(eta_array, *args):\n eta = eta_array.item()\n eps, Jep, theta = args\n\n max_J = np.max(Jep)\n\n r = Jep - max_J\n sum1 = np.mean(np.exp(r / eta))\n\n return eta * eps + eta * np.log(sum1) + max_J\n\n @staticmethod\n def _dual_function_diff(eta_array, *args):\n eta = eta_array.item()\n eps, Jep, theta = args\n\n max_J = np.max(Jep)\n\n r = Jep - max_J\n\n sum1 = np.mean(np.exp(r / eta))\n sum2 = np.mean(np.exp(r / eta) * r)\n\n gradient = eps + np.log(sum1) - sum2 / (eta * sum1)\n\n return np.array([gradient])\n", "import numpy as np\nfrom copy import deepcopy\n\nfrom mushroom_rl.algorithms.value.td import TD\nfrom mushroom_rl.utils.table import EnsembleTable\n\n\nclass DoubleQLearning(TD):\n \"\"\"\n Double Q-Learning algorithm.\n \"Double Q-Learning\". Hasselt H. V.. 2010.\n\n \"\"\"\n def __init__(self, mdp_info, policy, learning_rate):\n Q = EnsembleTable(2, mdp_info.size)\n\n super().__init__(mdp_info, policy, Q, learning_rate)\n\n self._alpha_double = [deepcopy(self._alpha), deepcopy(self._alpha)]\n\n self._add_save_attr(\n _alpha_double='primitive'\n )\n\n assert len(self.Q) == 2, 'The regressor ensemble must' \\\n ' have exactly 2 models.'\n\n def _update(self, state, action, reward, next_state, absorbing):\n approximator_idx = 0 if np.random.uniform() < .5 else 1\n\n q_current = self.Q[approximator_idx][state, action]\n\n if not absorbing:\n q_ss = self.Q[approximator_idx][next_state, :]\n max_q = np.max(q_ss)\n a_n = np.array(\n [np.random.choice(np.argwhere(q_ss == max_q).ravel())])\n q_next = self.Q[1 - approximator_idx][next_state, a_n]\n else:\n q_next = 0.\n\n q = q_current + self._alpha_double[approximator_idx](state, action) * (\n reward + self.mdp_info.gamma * q_next - q_current)\n\n self.Q[approximator_idx][state, action] = q\n", "import numpy as np\n\nfrom ._implementations.basis_features import BasisFeatures\nfrom ._implementations.functional_features import FunctionalFeatures\nfrom ._implementations.tiles_features import TilesFeatures\nfrom ._implementations.torch_features import TorchFeatures\n\n\ndef Features(basis_list=None, tilings=None, tensor_list=None,\n n_outputs=None, function=None):\n \"\"\"\n Factory method to build the requested type of features. The types are\n mutually exclusive.\n\n Possible features are tilings (``tilings``), basis functions\n (``basis_list``), tensor basis (``tensor_list``), and functional mappings\n (``n_outputs`` and ``function``).\n\n The difference between ``basis_list`` and ``tensor_list`` is that the\n former is a list of python classes each one evaluating a single element of\n the feature vector, while the latter consists in a list of PyTorch modules\n that can be used to build a PyTorch network. The use of ``tensor_list`` is\n a faster way to compute features than `basis_list` and is suggested when\n the computation of the requested features is slow (see the Gaussian radial\n basis function implementation as an example). A functional mapping applies\n a function to the input computing an ``n_outputs``-dimensional vector,\n where the mapping is expressed by ``function``. If ``function`` is not\n provided, the identity is used.\n\n Args:\n basis_list (list, None): list of basis functions;\n tilings ([object, list], None): single object or list of tilings;\n tensor_list (list, None): list of dictionaries containing the\n instructions to build the requested tensors;\n n_outputs (int, None): dimensionality of the feature mapping;\n function (object, None): a callable function to be used as feature\n mapping. Only needed when using a functional mapping.\n\n Returns:\n The class implementing the requested type of features.\n\n \"\"\"\n if basis_list is not None and tilings is None and tensor_list is None and n_outputs is None:\n return BasisFeatures(basis_list)\n elif basis_list is None and tilings is not None and tensor_list is None and n_outputs is None:\n return TilesFeatures(tilings)\n elif basis_list is None and tilings is None and tensor_list is not None and n_outputs is None:\n return TorchFeatures(tensor_list)\n elif basis_list is None and tilings is None and tensor_list is None and n_outputs is not None:\n return FunctionalFeatures(n_outputs, function)\n else:\n raise ValueError('You must specify either: a list of basis, a list of tilings, '\n 'a list of tensors or the number of outputs '\n '(and optionally the functionional mapping to use).')\n\n\ndef get_action_features(phi_state, action, n_actions):\n \"\"\"\n Compute an array of size ``len(phi_state)`` * ``n_actions`` filled with\n zeros, except for elements from ``len(phi_state)`` * ``action`` to\n ``len(phi_state)`` * (``action`` + 1) that are filled with `phi_state`. This\n is used to compute state-action features.\n\n Args:\n phi_state (np.ndarray): the feature of the state;\n action (np.ndarray): the action whose features have to be computed;\n n_actions (int): the number of actions.\n\n Returns:\n The state-action features.\n\n \"\"\"\n if len(phi_state.shape) > 1:\n assert phi_state.shape[0] == action.shape[0]\n\n phi = np.ones((phi_state.shape[0], n_actions * phi_state[0].size))\n i = 0\n for s, a in zip(phi_state, action):\n start = s.size * int(a[0])\n stop = start + s.size\n\n phi_sa = np.zeros(n_actions * s.size)\n phi_sa[start:stop] = s\n\n phi[i] = phi_sa\n\n i += 1\n else:\n start = phi_state.size * action[0]\n stop = start + phi_state.size\n\n phi = np.zeros(n_actions * phi_state.size)\n phi[start:stop] = phi_state\n\n return phi\n" ]
[ [ "numpy.max", "numpy.array", "numpy.log", "numpy.ones", "numpy.exp", "numpy.finfo" ], [ "numpy.max", "numpy.random.uniform", "numpy.argwhere" ], [ "numpy.ones", "numpy.zeros" ] ]
RakeshJarupula/keras
[ "2ac6638e91d5aff77c22b45e9c8c84fb05a9e477", "2ac6638e91d5aff77c22b45e9c8c84fb05a9e477", "2ac6638e91d5aff77c22b45e9c8c84fb05a9e477" ]
[ "keras/layers/regularization/spatial_dropout3d.py", "keras/benchmarks/keras_examples_benchmarks/mnist_conv_custom_training_benchmark_test.py", "keras/layers/merging/concatenate.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains the SpatialDropout3D layer.\"\"\"\n# pylint: disable=g-classes-have-attributes,g-direct-tensorflow-import\n\nfrom keras import backend\nfrom keras.engine.input_spec import InputSpec\nfrom keras.layers.regularization.dropout import Dropout\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.SpatialDropout3D')\nclass SpatialDropout3D(Dropout):\n \"\"\"Spatial 3D version of Dropout.\n\n This version performs the same function as Dropout, however, it drops\n entire 3D feature maps instead of individual elements. If adjacent voxels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout3D will help promote independence\n between feature maps and should be used instead.\n\n Args:\n rate: Float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode,\n the channels dimension (the depth) is at index 1, in 'channels_last' mode\n is it at index 4. It defaults to the `image_data_format` value found in\n your Keras config file at `~/.keras/keras.json`. If you never set it, then\n it will be \"channels_last\".\n Call arguments:\n inputs: A 5D tensor.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (doing nothing).\n Input shape:\n 5D tensor with shape: `(samples, channels, dim1, dim2, dim3)` if\n data_format='channels_first'\n or 5D tensor with shape: `(samples, dim1, dim2, dim3, channels)` if\n data_format='channels_last'.\n Output shape: Same as input.\n References: - [Efficient Object Localization Using Convolutional\n Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n def __init__(self, rate, data_format=None, **kwargs):\n super().__init__(rate, **kwargs)\n if data_format is None:\n data_format = backend.image_data_format()\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError(\n f'`data_format` must be \"channels_last\" or \"channels_first\". '\n f'Received: data_format={data_format}.')\n self.data_format = data_format\n self.input_spec = InputSpec(ndim=5)\n\n def _get_noise_shape(self, inputs):\n input_shape = tf.shape(inputs)\n if self.data_format == 'channels_first':\n return (input_shape[0], input_shape[1], 1, 1, 1)\n elif self.data_format == 'channels_last':\n return (input_shape[0], 1, 1, 1, input_shape[4])\n", "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Benchmarks using custom training loop on MNIST dataset.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nimport timeit\nimport numpy as np\n\nfrom keras.benchmarks import benchmark_util\nfrom keras.benchmarks import distribution_util\n\n\nclass CustomMnistBenchmark(tf.test.Benchmark):\n \"\"\"Benchmarks for custom training loop using `tf.test.Benchmark`.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.num_classes = 10\n self.input_shape = (28, 28, 1)\n self.epochs = 15\n (x_train, y_train), _ = tf.keras.datasets.mnist.load_data()\n x_train = x_train.astype('float32') / 255\n x_train = np.expand_dims(x_train, -1)\n y_train = tf.keras.utils.to_categorical(y_train, self.num_classes)\n self.num_examples = x_train.shape[0]\n # Use `tf.data.Dataset` for custom training loop.\n self.train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n\n def _build_model(self):\n \"\"\"Model from https://keras.io/examples/vision/mnist_convnet/.\"\"\"\n model = tf.keras.Sequential([\n tf.keras.Input(shape=self.input_shape),\n tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(self.num_classes, activation='softmax'),\n ])\n\n return model\n\n def compute_loss(self, targets, predictions, loss_fn, batch_size):\n \"\"\"Compute average loss.\"\"\"\n per_example_loss = loss_fn(targets, predictions)\n return tf.nn.compute_average_loss(\n per_example_loss, global_batch_size=batch_size)\n\n @tf.function(reduce_retracing=True)\n def train_step(self, inputs, model, loss_fn, optimizer, batch_size):\n \"\"\"Compute loss and optimize model by optimizer.\n\n Args:\n inputs: `tf.data`.\n model: See `model` in `train_function()` method.\n loss_fn: See `loss_fn` in `train_function()` method.\n optimizer: See `optimizer` in `train_function()` method.\n batch_size: See `batch_size` in `train_function()` method.\n\n Returns:\n Loss value.\n \"\"\"\n train_x, train_y = inputs\n with tf.GradientTape() as tape:\n predictions = model(train_x, training=True)\n loss = self.compute_loss(train_y, predictions, loss_fn, batch_size)\n grads = tape.gradient(loss, model.trainable_weights)\n optimizer.apply_gradients(zip(grads, model.trainable_weights))\n return loss\n\n @tf.function(reduce_retracing=True)\n def distributed_train_step(self, batch_dataset, model, loss_fn, optimizer,\n batch_size, distribution_strategy):\n \"\"\"Train step in distribution strategy setting.\n\n Args:\n batch_dataset: `tf.data`.\n model: See `model` in `train_function()` method.\n loss_fn: See `loss_fn` in `train_function()` method.\n optimizer: See `optimizer` in `train_function()` method.\n batch_size: See `batch_size` in `train_function()` method.\n distribution_strategy: See `distribution_strategy` in `train_function()`\n method.\n\n Returns:\n Sum of per_replica_losses.\n \"\"\"\n per_replica_losses = distribution_strategy.run(\n self.train_step,\n args=(\n batch_dataset,\n model,\n loss_fn,\n optimizer,\n batch_size,\n ))\n return distribution_strategy.reduce(\n tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)\n\n def train_function(self,\n model,\n train_dataset,\n loss_fn,\n optimizer,\n epochs=2,\n distribution_strategy=None,\n batch_size=256):\n \"\"\"Train model in custom training loop and return average\n\n train_step_time.\n\n Args:\n model: Model function to be benchmarked.\n train_dataset: `tf.data` dataset. Should return a tuple of either (inputs,\n targets) or (inputs, targets, sample_weights).\n loss_fn: `tf.keras.losses.Loss` instance.\n optimizer: `tf.keras.optimizers` instance.\n epochs: Integer. Number of epochs to train the model. If unspecified,\n `epochs` will default to 2.\n distribution_strategy: Distribution strategies. It could be\n `multi_worker_mirrored`, `one_device`, `mirrored`. If unspecified,\n `distribution_strategy` will default to 'off'. Note that, `TPU` and\n `parameter_server` are not supported yet.\n batch_size: Integer. Number of samples per gradient update. If\n unspecified, `batch_size` will default to 32.\n\n Returns:\n Average train_step_time.\n \"\"\"\n train_step_time_list = []\n timer = timeit.default_timer\n\n total_loss = 0.0\n num_batches = 0\n for _ in range(epochs):\n # Iterate over the batches of the dataset.\n for batch_dataset in train_dataset:\n\n start_time = timer()\n\n if distribution_strategy is not None:\n total_loss += self.distributed_train_step(batch_dataset, model,\n loss_fn, optimizer,\n batch_size,\n distribution_strategy)\n else:\n total_loss += self.train_step(batch_dataset, model, loss_fn,\n optimizer, batch_size)\n num_batches += 1\n\n end_time = timer()\n train_step_time_list.append(end_time - start_time)\n\n return np.mean(train_step_time_list)\n\n def measure_performance(self,\n model,\n dataset,\n loss_fn,\n optimizer,\n batch_size=32,\n run_iters=4,\n epochs=10,\n distribution_strategy=None):\n \"\"\"Run models and measure the performance.\n\n Args:\n model_fn: Model function to be benchmarked.\n dataset: `tf.data` dataset. Should return a tuple of either (inputs,\n targets) or (inputs, targets, sample_weights).\n loss_fn: `tf.keras.losses.Loss` instance.\n optimizer: `tf.keras.optimizers` instance.\n batch_size: Integer. Number of samples per gradient update. If\n unspecified, `batch_size` will default to 32.\n run_iters: Integer. Number of iterations to run the performance\n measurement. If unspecified, `run_iters` will default to 4.\n epochs: Integer. Number of epochs to train the model. If unspecified,\n `epochs` will default to 10.\n distribution_strategy: Distribution strategies. It could be\n `multi_worker_mirrored`, `one_device`, `mirrored`. If unspecified,\n `distribution_strategy` will default to 'off'. Note that, `TPU` and\n `parameter_server` are not supported yet.\n\n Returns:\n Performance summary, which contains build_time, avg_epoch_time,\n wall_time, exp_per_sec, epochs, warmup_time, train_step_time.\n\n Raise:\n ValueError: if `dataset` is None or if `optimizer` instance is\n not provided or if `loss_fn` instance is not provided.\n \"\"\"\n if distribution_strategy is not None and \\\n not isinstance(dataset, tf.distribute.DistributedDataset):\n raise ValueError('tf.distribute.DistributedDataset'\n ' required in distribution strategy.')\n\n if distribution_strategy is None and \\\n not isinstance(dataset, tf.data.Dataset):\n raise ValueError('`tf.data` is required.')\n\n if not isinstance(loss_fn, tf.keras.losses.Loss):\n raise ValueError('`tf.keras.losses.Loss` instance '\n 'for loss_fn is required.')\n\n if not isinstance(optimizer, tf.keras.optimizers.Optimizer):\n raise ValueError('`tf.keras.optimizers` instance '\n 'for optimizer is required.')\n\n avg_epoch_time_list, train_step_time_list = [], []\n wall_time_list, exp_per_sec_list, warmup_time_list = [], [], []\n\n total_num_examples = epochs * self.num_examples\n\n for _ in range(run_iters):\n timer = timeit.default_timer\n start_time = timer()\n t1 = timer()\n self.train_function(model, dataset, loss_fn, optimizer, 1,\n distribution_strategy, batch_size)\n warmup_time = timer() - t1\n\n t2 = timer()\n train_step_time = self.train_function(model, dataset, loss_fn, optimizer,\n epochs, distribution_strategy,\n batch_size)\n end_time = timer()\n\n train_step_time_list.append(train_step_time)\n warmup_time_list.append(warmup_time)\n wall_time_list.append(end_time - start_time)\n exp_per_sec_list.append(total_num_examples / (end_time - t2))\n avg_epoch_time_list.append((end_time - t2) / epochs)\n\n metrics = []\n metrics.append({\n 'name': 'avg_epoch_time',\n 'value': np.mean(avg_epoch_time_list)\n })\n metrics.append({'name': 'exp_per_sec', 'value': np.mean(exp_per_sec_list)})\n metrics.append({'name': 'warmup_time', 'value': np.mean(warmup_time_list)})\n metrics.append({\n 'name': 'train_step_time',\n 'value': np.mean(train_step_time_list)\n })\n metrics.append({'name': 'epochs', 'value': epochs})\n\n wall_time = np.mean(wall_time_list)\n\n return metrics, wall_time\n\n def benchmark_custom_training_mnist_bs_128(self):\n \"\"\"Measure performance with batch_size=128 and run_iters=5.\"\"\"\n batch_size = 128\n run_iters = 5\n train_dataset = self.train_dataset.shuffle(\n buffer_size=1024).batch(batch_size)\n\n # Instantiate a loss function.\n loss_fn = tf.keras.losses.CategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.NONE)\n # Instantiate an optimizer to train the model.\n optimizer = tf.keras.optimizers.Adam()\n model = self._build_model()\n\n metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn,\n optimizer, batch_size,\n run_iters, self.epochs)\n extras = benchmark_util.get_keras_examples_metadata('conv', batch_size,\n '.keras.ctl_graph')\n self.report_benchmark(\n iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras)\n\n def benchmark_custom_training_mnist_bs_256(self):\n \"\"\"Measure performance with batch_size=256 and run_iters=5.\"\"\"\n batch_size = 256\n run_iters = 5\n train_dataset = self.train_dataset.shuffle(\n buffer_size=1024).batch(batch_size)\n\n # Instantiate a loss function.\n loss_fn = tf.keras.losses.CategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.NONE)\n # Instantiate an optimizer to train the model.\n optimizer = tf.keras.optimizers.Adam()\n model = self._build_model()\n\n metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn,\n optimizer, batch_size,\n run_iters, self.epochs)\n extras = benchmark_util.get_keras_examples_metadata('conv', batch_size,\n '.keras.ctl_graph')\n self.report_benchmark(\n iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras)\n\n def benchmark_custom_training_mnist_bs_512(self):\n \"\"\"Measure performance with batch_size=512 and run_iters=10.\"\"\"\n batch_size = 512\n run_iters = 5\n train_dataset = self.train_dataset.shuffle(\n buffer_size=1024).batch(batch_size)\n\n # Instantiate a loss function.\n loss_fn = tf.keras.losses.CategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.NONE)\n # Instantiate an optimizer to train the model.\n optimizer = tf.keras.optimizers.Adam()\n model = self._build_model()\n\n metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn,\n optimizer, batch_size,\n run_iters, self.epochs)\n extras = benchmark_util.get_keras_examples_metadata('conv', batch_size,\n '.keras.ctl_graph')\n self.report_benchmark(\n iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras)\n\n def benchmark_custom_training_mnist_bs_512_gpu_2(self):\n \"\"\"Measure performance with batch_size=512, run_iters=10, gpu=2 and\n\n distribution_strategy='mirrored'.\n \"\"\"\n batch_size = 512\n run_iters = 10\n train_dataset = self.train_dataset.shuffle(\n buffer_size=1024).batch(batch_size)\n\n distribution_strategy = 'mirrored'\n\n strategy = distribution_util.get_distribution_strategy(\n distribution_strategy=distribution_strategy, num_gpus=2)\n\n if distribution_strategy != 'off':\n train_dataset = strategy.experimental_distribute_dataset(train_dataset)\n\n strategy_scope = distribution_util.get_strategy_scope(strategy)\n\n with strategy_scope:\n # Instantiate a loss function.\n loss_fn = tf.keras.losses.CategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.NONE)\n # Instantiate an optimizer to train the model.\n optimizer = tf.keras.optimizers.Adam()\n model = self._build_model()\n\n metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn,\n optimizer, batch_size,\n run_iters, self.epochs,\n strategy)\n extras = benchmark_util.get_keras_examples_metadata('conv', batch_size,\n '.keras.ctl_graph')\n self.report_benchmark(\n iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Layer that concatenates several inputs.\"\"\"\n\n\nfrom keras import backend\nfrom keras.layers.merging.base_merge import _Merge\nfrom keras.utils import tf_utils\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.util.tf_export import keras_export\n\n\n@keras_export('keras.layers.Concatenate')\nclass Concatenate(_Merge):\n \"\"\"Layer that concatenates a list of inputs.\n\n It takes as input a list of tensors, all of the same shape except\n for the concatenation axis, and returns a single tensor that is the\n concatenation of all inputs.\n\n >>> x = np.arange(20).reshape(2, 2, 5)\n >>> print(x)\n [[[ 0 1 2 3 4]\n [ 5 6 7 8 9]]\n [[10 11 12 13 14]\n [15 16 17 18 19]]]\n >>> y = np.arange(20, 30).reshape(2, 1, 5)\n >>> print(y)\n [[[20 21 22 23 24]]\n [[25 26 27 28 29]]]\n >>> tf.keras.layers.Concatenate(axis=1)([x, y])\n <tf.Tensor: shape=(2, 3, 5), dtype=int64, numpy=\n array([[[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, 9],\n [20, 21, 22, 23, 24]],\n [[10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [25, 26, 27, 28, 29]]])>\n\n >>> x1 = tf.keras.layers.Dense(8)(np.arange(10).reshape(5, 2))\n >>> x2 = tf.keras.layers.Dense(8)(np.arange(10, 20).reshape(5, 2))\n >>> concatted = tf.keras.layers.Concatenate()([x1, x2])\n >>> concatted.shape\n TensorShape([5, 16])\n\n \"\"\"\n\n def __init__(self, axis=-1, **kwargs):\n \"\"\"Instantiates a Concatenate layer.\n\n >>> x = np.arange(20).reshape(2, 2, 5)\n >>> print(x)\n [[[ 0 1 2 3 4]\n [ 5 6 7 8 9]]\n [[10 11 12 13 14]\n [15 16 17 18 19]]]\n >>> y = np.arange(20, 30).reshape(2, 1, 5)\n >>> print(y)\n [[[20 21 22 23 24]]\n [[25 26 27 28 29]]]\n >>> tf.keras.layers.Concatenate(axis=1)([x, y])\n <tf.Tensor: shape=(2, 3, 5), dtype=int64, numpy=\n array([[[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, 9],\n [20, 21, 22, 23, 24]],\n [[10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [25, 26, 27, 28, 29]]])>\n\n Args:\n axis: Axis along which to concatenate.\n **kwargs: standard layer keyword arguments.\n \"\"\"\n super().__init__(**kwargs)\n self.axis = axis\n self.supports_masking = True\n self._reshape_required = False\n\n @tf_utils.shape_type_conversion\n def build(self, input_shape):\n # Used purely for shape validation.\n if len(input_shape) < 1 or not isinstance(input_shape[0], tuple):\n raise ValueError(\n 'A `Concatenate` layer should be called on a list of '\n f'at least 1 input. Received: input_shape={input_shape}')\n if all(shape is None for shape in input_shape):\n return\n reduced_inputs_shapes = [list(shape) for shape in input_shape]\n shape_set = set()\n for i in range(len(reduced_inputs_shapes)):\n del reduced_inputs_shapes[i][self.axis]\n shape_set.add(tuple(reduced_inputs_shapes[i]))\n\n if len(shape_set) != 1:\n err_msg = ('A `Concatenate` layer requires inputs with matching shapes '\n 'except for the concatenation axis. '\n f'Received: input_shape={input_shape}')\n # Make sure all the shapes have same ranks.\n ranks = set(len(shape) for shape in shape_set)\n if len(ranks) != 1:\n raise ValueError(err_msg)\n # Get the only rank for the set.\n (rank,) = ranks\n for axis in range(rank):\n # Skip the Nones in the shape since they are dynamic, also the axis for\n # concat has been removed above.\n unique_dims = set(\n shape[axis] for shape in shape_set if shape[axis] is not None)\n if len(unique_dims) > 1:\n raise ValueError(err_msg)\n\n def _merge_function(self, inputs):\n return backend.concatenate(inputs, axis=self.axis)\n\n @tf_utils.shape_type_conversion\n def compute_output_shape(self, input_shape):\n if ((not isinstance(input_shape, (tuple, list))) or\n (not isinstance(input_shape[0], (tuple, list)))):\n # The tf_utils.shape_type_conversion decorator turns tensorshapes\n # into tuples, so we need to verify that `input_shape` is a list/tuple,\n # *and* that the individual elements are themselves shape tuples.\n raise ValueError(\n 'A `Concatenate` layer should be called on a list of inputs. '\n f'Received: input_shape={input_shape}')\n input_shapes = input_shape\n output_shape = list(input_shapes[0])\n for shape in input_shapes[1:]:\n if output_shape[self.axis] is None or shape[self.axis] is None:\n output_shape[self.axis] = None\n break\n output_shape[self.axis] += shape[self.axis]\n return tuple(output_shape)\n\n def compute_mask(self, inputs, mask=None):\n if mask is None:\n return None\n if not isinstance(mask, (tuple, list)):\n raise ValueError(f'`mask` should be a list. Received mask={mask}')\n if not isinstance(inputs, (tuple, list)):\n raise ValueError(f'`inputs` should be a list. Received: inputs={inputs}')\n if len(mask) != len(inputs):\n raise ValueError(\n 'The lists `inputs` and `mask` should have the same length. '\n f'Received: inputs={inputs} of length {len(inputs)}, and '\n f'mask={mask} of length {len(mask)}')\n if all(m is None for m in mask):\n return None\n # Make a list of masks while making sure\n # the dimensionality of each mask\n # is the same as the corresponding input.\n masks = []\n for input_i, mask_i in zip(inputs, mask):\n if mask_i is None:\n # Input is unmasked. Append all 1s to masks,\n masks.append(tf.ones_like(input_i, dtype='bool'))\n elif backend.ndim(mask_i) < backend.ndim(input_i):\n # Mask is smaller than the input, expand it\n masks.append(tf.expand_dims(mask_i, axis=-1))\n else:\n masks.append(mask_i)\n concatenated = backend.concatenate(masks, axis=self.axis)\n return backend.all(concatenated, axis=-1, keepdims=False)\n\n def get_config(self):\n config = {\n 'axis': self.axis,\n }\n base_config = super().get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n@keras_export('keras.layers.concatenate')\ndef concatenate(inputs, axis=-1, **kwargs):\n \"\"\"Functional interface to the `Concatenate` layer.\n\n >>> x = np.arange(20).reshape(2, 2, 5)\n >>> print(x)\n [[[ 0 1 2 3 4]\n [ 5 6 7 8 9]]\n [[10 11 12 13 14]\n [15 16 17 18 19]]]\n >>> y = np.arange(20, 30).reshape(2, 1, 5)\n >>> print(y)\n [[[20 21 22 23 24]]\n [[25 26 27 28 29]]]\n >>> tf.keras.layers.concatenate([x, y],\n ... axis=1)\n <tf.Tensor: shape=(2, 3, 5), dtype=int64, numpy=\n array([[[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, 9],\n [20, 21, 22, 23, 24]],\n [[10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [25, 26, 27, 28, 29]]])>\n\n Args:\n inputs: A list of input tensors.\n axis: Concatenation axis.\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor, the concatenation of the inputs alongside axis `axis`.\n \"\"\"\n return Concatenate(axis=axis, **kwargs)(inputs)\n" ]
[ [ "tensorflow.python.util.tf_export.keras_export", "tensorflow.compat.v2.shape" ], [ "tensorflow.compat.v2.keras.Input", "tensorflow.compat.v2.keras.utils.to_categorical", "tensorflow.compat.v2.keras.layers.Dropout", "tensorflow.compat.v2.keras.layers.MaxPooling2D", "tensorflow.compat.v2.data.Dataset.from_tensor_slices", "tensorflow.compat.v2.keras.layers.Dense", "tensorflow.compat.v2.keras.layers.Conv2D", "numpy.mean", "tensorflow.compat.v2.keras.optimizers.Adam", "tensorflow.compat.v2.function", "tensorflow.compat.v2.GradientTape", "tensorflow.compat.v2.keras.layers.Flatten", "tensorflow.compat.v2.keras.losses.CategoricalCrossentropy", "tensorflow.compat.v2.keras.datasets.mnist.load_data", "tensorflow.compat.v2.nn.compute_average_loss", "numpy.expand_dims", "tensorflow.compat.v2.test.main" ], [ "tensorflow.compat.v2.ones_like", "tensorflow.compat.v2.expand_dims", "tensorflow.python.util.tf_export.keras_export" ] ]
Bodhis4ttva/LHC_Net
[ "8b47dff5117b078a99183afd1d103da06f37361c" ]
[ "Lib/Utils.py" ]
[ "from abc import ABC\r\n\r\nimport os\r\nimport random\r\nimport tensorflow as tf\r\nimport tensorflow_addons as tfa\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom shutil import copyfile\r\nimport csv\r\nfrom classification_models.tfkeras import Classifiers\r\nimport gc\r\n\r\n\r\ndef get_data(filename):\r\n csvfile = open(filename)\r\n reader = csv.reader(csvfile, delimiter=';')\r\n next(reader)\r\n data = []\r\n for row in reader:\r\n item = [row[0], row[2:]]\r\n data.append(item)\r\n images = np.zeros((len(data), 48, 48, 1), dtype='float32')\r\n labels = np.zeros((len(data)), dtype='float32')\r\n labels_full = np.zeros(shape=(len(data), 7), dtype='float32')\r\n for i in range(len(data)):\r\n images[i, :, :, :] = np.array(data[i][1]).reshape((48, 48, 1))\r\n labels[i] = np.array(data[i][0]).astype('float32')\r\n labels_full[i, int(labels[i])] = 1\r\n return images, labels_full\r\n\r\n\r\ndef etl_data(path):\r\n images, labels = get_data(path)\r\n images = tf.image.resize(images=images, size=(224, 224), method='bilinear').numpy()\r\n imagesRGB = np.zeros(shape=(images.shape[0], 224, 224, 3), dtype='float32')\r\n for i in range(images.shape[0]):\r\n imagesRGB[i, :, :, :] = tf.image.grayscale_to_rgb(tf.convert_to_tensor(images[i, :, :, :])).numpy()\r\n return imagesRGB, labels\r\n\r\n\r\nclass cb3(tf.keras.callbacks.Callback):\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n self.reports = []\r\n\r\n def on_epoch_end(self, epoch, logs={}):\r\n report = tf.keras.metrics.CategoricalAccuracy()(self.y, self.model.predict(self.x)).numpy()\r\n self.reports.append(report)\r\n print(\"Test Accuracy\", report)\r\n print(\"\")\r\n return\r\n\r\n\r\ndef augment(images, params):\r\n\r\n y = images\r\n\r\n if params['flip']:\r\n y = tf.image.flip_left_right(image=y)\r\n\r\n if params['zoom'] > 0 and params['zoom'] < 1:\r\n y = tf.image.central_crop(image=y,\r\n central_fraction=params['zoom'])\r\n y = tf.image.resize(images=y,\r\n size=[images.shape[1], images.shape[2]],\r\n method='bilinear',\r\n preserve_aspect_ratio=False)\r\n\r\n if params['shift_h'] != 0 or params['shift_v'] != 0:\r\n y = tfa.image.translate(images=y,\r\n translations=[params['shift_h'], params['shift_v']],\r\n interpolation='bilinear',\r\n fill_mode='nearest')\r\n if params['rot'] != 0:\r\n y = tfa.image.rotate(images=y,\r\n angles=params['rot'],\r\n interpolation='bilinear',\r\n fill_mode='nearest')\r\n\r\n return y\r\n\r\n\r\ndef TTA_Inference(model, x):\r\n pred_test = model.predict(x)\r\n zooms = [1] # 2\r\n rotations = [0, 0.4, -0.4] # 5\r\n shifts_h = [0, 10, -10] # 3\r\n shifts_v = [0, 10, -10] # 3\r\n flips = [False, True] # 2\r\n\r\n default_prediction_weight = 3\r\n count = default_prediction_weight\r\n predictions = default_prediction_weight*pred_test\r\n\r\n for i1 in range(len(zooms)):\r\n for i2 in range(len(rotations)):\r\n for i3 in range(len(shifts_h)):\r\n for i4 in range(len(shifts_v)):\r\n for i5 in range(len(flips)):\r\n params = {'zoom': zooms[i1],\r\n 'rot': rotations[i2],\r\n 'shift_h': shifts_h[i3],\r\n 'shift_v': shifts_v[i4],\r\n 'flip': flips[i5]}\r\n if params['zoom'] < 1 or params['rot'] != 0 or params['shift_h'] != 0 or params['shift_v'] != 0 or params['flip']:\r\n\r\n count = count + 1\r\n d = augment(x, params)\r\n preds = model.predict(d, batch_size=128)\r\n predictions = predictions + preds\r\n del d\r\n del preds\r\n del params\r\n gc.collect()\r\n gc.collect()\r\n gc.collect()\r\n\r\n Params = [[0.9, 0, 0, 0, False],\r\n [0.9, 0, 0, 0, True],\r\n [0.9, 0.15, 0, 0, False],\r\n [0.9, 0.15, 0, 0, True],\r\n [0.9, -0.15, 0, 0, False],\r\n [0.9, -0.15, 0, 0, True]]\r\n\r\n for i in range(len(Params)):\r\n params = {'zoom': Params[i][0],\r\n 'rot': Params[i][1],\r\n 'shift_h': Params[i][2],\r\n 'shift_v': Params[i][3],\r\n 'flip': Params[i][4]}\r\n count = count + 1\r\n d = augment(x, params)\r\n preds = model.predict(d, batch_size=128)\r\n predictions = predictions + preds\r\n\r\n del d\r\n del preds\r\n del params\r\n gc.collect()\r\n gc.collect()\r\n gc.collect()\r\n\r\n predictions = predictions / count\r\n return predictions\r\n\r\n\r\ndef Check_Unique(x):\r\n lose = 0\r\n for i in range(x.shape[0]):\r\n if sum(x[i, :] == x[i, :].max()) > 1:\r\n lose = lose + 1\r\n return lose\r\n\r\n" ]
[ [ "tensorflow.image.central_crop", "numpy.array", "tensorflow.convert_to_tensor", "numpy.zeros", "tensorflow.image.resize", "tensorflow.keras.metrics.CategoricalAccuracy", "tensorflow.image.flip_left_right" ] ]
EngineerFreak/Proj_01
[ "c7c670b861ff4a5e56c333ad741a22860bebcd8d" ]
[ "main.py" ]
[ "##################################################################################\n# Project: Interface de usuário para classificadores\n# Author: Rafael Alves (EngineerFreak)\n# Created: Rafael Alves (EngineerFreak) - 14.02.2021\n# Edited :\n##################################################################################\n\nimport streamlit as st\nfrom sklearn import datasets\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#importar os classificadores\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.decomposition import PCA\n\n########## Functions ##########\n\n# definir os datasets desejados para a UI\ndef get_dataset(dataset_name):\n if dataset_name == \"Iris\":\n data = datasets.load_iris()\n elif dataset_name == \"Cancer de mama\":\n data = datasets.load_breast_cancer()\n else:\n data = datasets.load_wine()\n x = data.data\n y = data.target\n return x, y\n\n# para parametrizar os diferentes tipos de classificadores conferir a documentacao do scikit-learn.org\n# e preparar os dados importantes para o classificador para a interface gráfica\ndef add_parameter_ui(classifer_name):\n params = dict()\n if classifer_name == \"KNN\":\n K = st.sidebar.slider(\"K\", 1, 15)\n params[\"K\"] = K\n elif classifer_name == \"SVM\":\n C = st.sidebar.slider(\"C\", 0.01, 10.0)\n params[\"C\"] = C\n else: # random forest\n max_depth = st.sidebar.slider(\"max_depth\", 2, 15)\n n_estimators = st.sidebar.slider(\"n_estimators\", 1, 100)\n params[\"max_depth\"] = max_depth\n params[\"n_estimators\"] = n_estimators\n return params\n\n# Definir os classificadores\ndef get_classifier(classifier_name, params):\n if classifier_name == \"KNN\":\n classifier = KNeighborsClassifier(n_neighbors=params[\"K\"])\n elif classifier_name == \"SVM\":\n classifier = SVC(C=params[\"C\"])\n else: # random forest\n classifier = RandomForestClassifier(n_estimators=params[\"n_estimators\"],\n max_depth=params[\"max_depth\"],\n random_state= 1234)\n return classifier\n\n# Classificacao\n\n########## End Functions ##########\n\n\n# para rodar este programa colocar na linha de comando: streamlit run [endereco_do_programa]/main.py\nif __name__ == '__main__':\n st.title(\"\"\"\n Interface de usuário (Streamlit) \n ### by Rafael Alves \n #### Instagram: @iamanengineerfreak\n \"\"\")\n\n st.write(\"\"\"\n # Explorando os diferentes tipos de classificadores\n Qual é o melhor?\n \"\"\")\n\n dataset_name = st.sidebar.selectbox(\"Selecione o dataset\", (\"Iris\", \"Cancer de mama\", \"Vinho\"))\n # st.sidebar.write(dataset_name)\n\n classifier_name = st.sidebar.selectbox(\"Selecione o classificador\", (\"KNN\", \"SVM\", \"Random Forest\"))\n # st.sidebar.write(classifier_name)\n\n x, y = get_dataset(dataset_name)\n st.write(\"formato do dataset\", x.shape)\n st.write(\"número de classes\", len(np.unique(y)))\n\n params = add_parameter_ui(classifier_name)\n classifier = get_classifier(classifier_name, params)\n\n # Precesso de Classificacao\n x_treino, x_teste, y_treino, y_teste = train_test_split(x,y, test_size=0.2, random_state=5678)\n\n classifier.fit(x_treino, y_treino)\n y_predict = classifier.predict(x_teste)\n\n accuracy = accuracy_score(y_teste, y_predict)\n st.write(f\"classificador = {classifier_name}\")\n st.write(f\"accurácia = {accuracy}\")\n\n # Plot do resultado\n pca = PCA(2) # 2D\n x_projected = pca.fit_transform(x)\n x1 = x_projected[:, 0]\n x2 = x_projected[:, 1]\n\n figure = plt.figure()\n plt.scatter(x1, x2, c=y, alpha=0.8, cmap=\"viridis\")\n plt.xlabel(\"Componente principal 1\")\n plt.ylabel(\"Componente principal 2\")\n plt.colorbar()\n\n #plt.show()\n st.pyplot(figure)\n\n #TODO\n # 0) Visao: Criar uma plataforma para o curso onde se poderá testar os diferentes algoritmos aprendidos.\n # 1) Adcionar mais parametros para os classificadores\n # 2) adcionar mais classificadores\n # 3) preparar o algoritmo para entrada de dados externos\n # 4) aumentar a quantidade de dados para classificacao\n # 5) Mais possibilidades de visualizacao de indicadores\n # 6) criar sistema interno para outros tipos de algoritmos dentro desta plataforma\n" ]
[ [ "matplotlib.pyplot.colorbar", "sklearn.datasets.load_breast_cancer", "sklearn.ensemble.RandomForestClassifier", "matplotlib.pyplot.xlabel", "sklearn.neighbors.KNeighborsClassifier", "matplotlib.pyplot.figure", "sklearn.metrics.accuracy_score", "sklearn.svm.SVC", "matplotlib.pyplot.ylabel", "numpy.unique", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.scatter", "sklearn.datasets.load_wine", "sklearn.decomposition.PCA", "sklearn.datasets.load_iris" ] ]
Delviet/3D-ResNets-PyTorch
[ "f6bf9ce3f41527916e4cd9b856f1dc50fdb52b6e" ]
[ "datasets/kinetics.py" ]
[ "import torch\r\nimport torch.utils.data as data\r\nfrom PIL import Image\r\nimport os\r\nimport math\r\nimport functools\r\nimport json\r\nimport copy\r\n\r\nfrom utils import load_value_file\r\n\r\n\r\ndef pil_loader(path):\r\n # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)\r\n with open(path, 'rb') as f:\r\n with Image.open(f) as img:\r\n return img.convert('RGB')\r\n\r\n\r\ndef accimage_loader(path):\r\n try:\r\n import accimage\r\n return accimage.Image(path)\r\n except IOError:\r\n # Potentially a decoding problem, fall back to PIL.Image\r\n return pil_loader(path)\r\n\r\n\r\ndef get_default_image_loader():\r\n from torchvision import get_image_backend\r\n if get_image_backend() == 'accimage':\r\n return accimage_loader\r\n else:\r\n return pil_loader\r\n\r\n\r\ndef video_loader(video_dir_path, frame_indices, image_loader):\r\n video = []\r\n for i in frame_indices:\r\n image_path = os.path.join(video_dir_path, 'image_{:05d}.jpg'.format(i))\r\n if os.path.exists(image_path):\r\n video.append(image_loader(image_path))\r\n else:\r\n return video\r\n\r\n return video\r\n\r\n\r\ndef get_default_video_loader():\r\n image_loader = get_default_image_loader()\r\n return functools.partial(video_loader, image_loader=image_loader)\r\n\r\n\r\ndef load_annotation_data(data_file_path):\r\n with open(data_file_path, 'r') as data_file:\r\n return json.load(data_file)\r\n\r\n\r\ndef get_class_labels(data):\r\n class_labels_map = {}\r\n index = 0\r\n for class_label in data['labels']:\r\n class_labels_map[class_label] = index\r\n index += 1\r\n return class_labels_map\r\n\r\n\r\ndef get_video_names_and_annotations(data, subset):\r\n video_names = []\r\n annotations = []\r\n\r\n for key, value in data['database'].items():\r\n this_subset = value['subset']\r\n if this_subset == subset:\r\n if subset == 'testing':\r\n video_names.append('test/{}'.format(key))\r\n else:\r\n label = value['annotations']['label']\r\n video_names.append('{}/{}'.format(label, key))\r\n annotations.append(value['annotations'])\r\n\r\n return video_names, annotations\r\n\r\n\r\ndef make_dataset(root_path, annotation_path, subset, n_samples_for_each_video,\r\n sample_duration):\r\n data = load_annotation_data(annotation_path)\r\n video_names, annotations = get_video_names_and_annotations(data, subset)\r\n class_to_idx = get_class_labels(data)\r\n idx_to_class = {}\r\n for name, label in class_to_idx.items():\r\n idx_to_class[label] = name\r\n\r\n dataset = []\r\n for i in range(len(video_names)):\r\n if i % 1000 == 0:\r\n print('dataset loading [{}/{}]'.format(i, len(video_names)))\r\n\r\n video_path = os.path.join(root_path, video_names[i])\r\n if not os.path.exists(video_path):\r\n continue\r\n\r\n n_frames_file_path = os.path.join(video_path, 'n_frames')\r\n n_frames = int(load_value_file(n_frames_file_path))\r\n if n_frames <= 0:\r\n continue\r\n\r\n begin_t = 1\r\n end_t = n_frames\r\n sample = {\r\n 'video': video_path,\r\n 'segment': [begin_t, end_t],\r\n 'n_frames': n_frames,\r\n 'video_id': video_names[i][:-14].split('/')[1]\r\n }\r\n if len(annotations) != 0:\r\n sample['label'] = class_to_idx[annotations[i]['label']]\r\n else:\r\n sample['label'] = -1\r\n\r\n if n_samples_for_each_video == 1:\r\n sample['frame_indices'] = list(range(1, n_frames + 1))\r\n dataset.append(sample)\r\n else:\r\n if n_samples_for_each_video > 1:\r\n step = max(1,\r\n math.ceil((n_frames - 1 - sample_duration) /\r\n (n_samples_for_each_video - 1)))\r\n else:\r\n step = sample_duration\r\n for j in range(1, n_frames, step):\r\n sample_j = copy.deepcopy(sample)\r\n sample_j['frame_indices'] = list(\r\n range(j, min(n_frames + 1, j + sample_duration)))\r\n dataset.append(sample_j)\r\n\r\n return dataset, idx_to_class\r\n\r\n\r\nclass Kinetics(data.Dataset):\r\n \"\"\"\r\n Args:\r\n root (string): Root directory path.\r\n spatial_transform (callable, optional): A function/transform that takes in an PIL image\r\n and returns a transformed version. E.g, ``transforms.RandomCrop``\r\n temporal_transform (callable, optional): A function/transform that takes in a list of frame indices\r\n and returns a transformed version\r\n target_transform (callable, optional): A function/transform that takes in the\r\n target and transforms it.\r\n loader (callable, optional): A function to load an video given its path and frame indices.\r\n Attributes:\r\n classes (list): List of the class names.\r\n class_to_idx (dict): Dict with items (class_name, class_index).\r\n imgs (list): List of (image path, class_index) tuples\r\n \"\"\"\r\n\r\n def __init__(self,\r\n root_path,\r\n annotation_path,\r\n subset,\r\n n_samples_for_each_video=1,\r\n spatial_transform=None,\r\n temporal_transform=None,\r\n target_transform=None,\r\n sample_duration=16,\r\n get_loader=get_default_video_loader):\r\n self.data, self.class_names = make_dataset(\r\n root_path, annotation_path, subset, n_samples_for_each_video,\r\n sample_duration)\r\n\r\n self.spatial_transform = spatial_transform\r\n self.temporal_transform = temporal_transform\r\n self.target_transform = target_transform\r\n self.loader = get_loader()\r\n\r\n def __getitem__(self, index):\r\n \"\"\"\r\n Args:\r\n index (int): Index\r\n Returns:\r\n tuple: (image, target) where target is class_index of the target class.\r\n \"\"\"\r\n path = self.data[index]['video']\r\n\r\n frame_indices = self.data[index]['frame_indices']\r\n if self.temporal_transform is not None:\r\n frame_indices = self.temporal_transform(frame_indices)\r\n clip = self.loader(path, frame_indices)\r\n if self.spatial_transform is not None:\r\n self.spatial_transform.randomize_parameters()\r\n clip = [self.spatial_transform(img) for img in clip]\r\n clip = torch.stack(clip, 0).permute(1, 0, 2, 3)\r\n\r\n target = self.data[index]\r\n if self.target_transform is not None:\r\n target = self.target_transform(target)\r\n\r\n return clip, target\r\n\r\n def __len__(self):\r\n return len(self.data)\r\n" ]
[ [ "torch.stack" ] ]
jloveric/high-order-implicit-representation
[ "0ab5118d62abd3990d63fcd600558c70d044df06" ]
[ "language_interpolation.py" ]
[ "from typing import List\n\nimport os\nfrom omegaconf import DictConfig, OmegaConf\nimport hydra\nfrom pytorch_lightning.metrics.functional import accuracy\nfrom high_order_layers_torch.layers import *\nfrom pytorch_lightning import LightningModule, Trainer\nimport torch.optim as optim\nimport torch\nfrom high_order_layers_torch.networks import *\nfrom single_text_dataset import SingleTextDataset\nfrom torchsummary import summary\nfrom single_text_dataset import dataset_from_file, encode_input_from_text, decode_output_to_text, ascii_to_float\nimport random\nfrom pytorch_lightning.metrics import Accuracy\n\n\n\nclass Net(LightningModule):\n def __init__(self, cfg: DictConfig):\n super().__init__()\n self.save_hyperparameters(cfg)\n self.cfg = cfg\n self.model = HighOrderMLP(\n layer_type=cfg.mlp.layer_type,\n n=cfg.mlp.n,\n n_in=cfg.mlp.n_in,\n n_hidden=cfg.mlp.n_in,\n n_out=cfg.mlp.n_out,\n in_width=cfg.mlp.input.width,\n in_segments=cfg.mlp.input.segments,\n out_width=128, # ascii has 128 characters\n out_segments=cfg.mlp.output.segments,\n hidden_width=cfg.mlp.hidden.width,\n hidden_layers=cfg.mlp.hidden.layers,\n hidden_segments=cfg.mlp.hidden.segments,\n )\n self.root_dir = f\"{hydra.utils.get_original_cwd()}\"\n self.loss = torch.nn.CrossEntropyLoss()\n self.accuracy = Accuracy(top_k=2)\n \n\n def forward(self, x):\n return self.model(x)\n\n def setup(self, stage):\n\n full_path = [f\"{self.root_dir}/{path}\" for path in self.cfg.filenames]\n self.train_dataset = SingleTextDataset(\n filenames=full_path, features=self.cfg.mlp.features, max_size=self.cfg.data.max_size)\n self.test_dataset = SingleTextDataset(\n filenames=full_path, features=self.cfg.mlp.features, max_size=self.cfg.data.max_size)\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n loss = self.loss(y_hat, y.flatten())\n\n diff = torch.argmax(y_hat,dim=1)-y.flatten()\n accuracy = torch.where(diff==0,1,0).sum()/len(diff)\n \n self.log(f'train_loss', loss, prog_bar=True)\n self.log(f'acc', accuracy, prog_bar=True)\n return loss\n\n def test_step(self, batch, batch_idx):\n return self.training_step(batch, batch_idx)\n\n def train_dataloader(self):\n trainloader = torch.utils.data.DataLoader(\n self.train_dataset, batch_size=self.cfg.batch_size, shuffle=True, num_workers=10)\n return trainloader\n\n def test_dataloader(self):\n\n testloader = torch.utils.data.DataLoader(\n self.test_dataset, batch_size=self.cfg.batch_size, shuffle=False, num_workers=10)\n return testloader\n\n def configure_optimizers(self):\n return optim.Adam(self.parameters(), lr=self.cfg.lr)\n\n\n@hydra.main(config_path=\"./config\", config_name=\"language_config\")\ndef run_language_interpolation(cfg: DictConfig):\n print(OmegaConf.to_yaml(cfg))\n print(\"Working directory : {}\".format(os.getcwd()))\n print(f\"Orig working directory : {hydra.utils.get_original_cwd()}\")\n\n if cfg.train is True:\n trainer = Trainer(max_epochs=cfg.max_epochs, gpus=cfg.gpus)\n model = Net(cfg)\n trainer.fit(model)\n print('testing')\n result = trainer.test(model)\n print('result', result)\n print('finished testing')\n print('best check_point', trainer.checkpoint_callback.best_model_path)\n print('loss', result[0]['train_loss'])\n return result[0]['train_loss']\n else:\n # plot some data\n print('evaluating result')\n print('cfg.checkpoint', cfg.checkpoint)\n checkpoint_path = f\"{hydra.utils.get_original_cwd()}/{cfg.checkpoint}\"\n print('checkpoint_path', checkpoint_path)\n model = Net.load_from_checkpoint(checkpoint_path)\n model.eval()\n\n text_in = cfg.text\n features = cfg.mlp.input.width\n\n # Make sure the prompt text is long enough. The network is expecting a prompt\n # of size features. It will take the last \"features\" characters from the\n # provided string and ignore the rest.\n text_in = text_in.rjust(features)\n\n for i in range(cfg.num_predict):\n encoding, text_used = encode_input_from_text(\n text_in=text_in, features=features)\n encoding = ascii_to_float(encoding).unsqueeze(dim=0)\n model.eval()\n output = model(encoding)\n values, indices, ascii = decode_output_to_text(\n encoding=output[0], topk=cfg.topk)\n\n # pick the next character weighted by probabilities of each character\n # prevents the same response for every query.\n actual = random.choices(ascii, values.tolist())\n text_in = text_in+actual[0]\n\n print('output:', text_in.replace('\\n', ' '))\n\n\nif __name__ == \"__main__\":\n run_language_interpolation()\n" ]
[ [ "torch.argmax", "torch.nn.CrossEntropyLoss", "torch.where", "torch.utils.data.DataLoader" ] ]
B0BBB/seq2seq.pytorch
[ "54bb0e9f3e5c7db7f257841ed652e8ff447b8ee4" ]
[ "seq2seq/models/transformer.py" ]
[ "import math\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .modules.normalization import LayerNorm1d\nfrom .modules.attention import MultiHeadAttention\nfrom .seq2seq_base import Seq2Seq\nfrom seq2seq.tools.config import PAD\nfrom .modules.state import State\nfrom .modules.weight_norm import weight_norm as wn\n\n\ndef positional_embedding(x, min_timescale=1.0, max_timescale=1.0e4):\n batch, length, channels = list(x.size())\n assert (channels % 2 == 0)\n num_timescales = channels // 2\n log_timescale_increment = (\n math.log(float(max_timescale) / float(min_timescale)) /\n (float(num_timescales) - 1.))\n position = torch.arange(0, length).float()\n inv_timescales = torch.arange(0, num_timescales).float()\n if x.is_cuda:\n position = position.cuda()\n inv_timescales = inv_timescales.cuda()\n\n inv_timescales.mul_(-log_timescale_increment).exp_().mul_(min_timescale)\n scaled_time = position.unsqueeze(1).expand(\n length, num_timescales) * inv_timescales.unsqueeze(0).expand(length, num_timescales)\n # scaled time is now length x num_timescales\n # length x channels\n signal = torch.cat([scaled_time.sin(), scaled_time.cos()], 1)\n return signal.unsqueeze(0).expand(batch, length, channels)\n\n\nclass EncoderBlock(nn.Module):\n\n def __init__(self, hidden_size=512, num_heads=8, inner_linear=1024, layer_norm=True, weight_norm=False, dropout=0):\n\n super(EncoderBlock, self).__init__()\n wn_func = wn if weight_norm else lambda x: x\n if layer_norm:\n self.lnorm1 = LayerNorm1d(hidden_size)\n self.lnorm2 = LayerNorm1d(hidden_size)\n self.dropout = nn.Dropout(dropout)\n self.attention = MultiHeadAttention(\n hidden_size, hidden_size, num_heads, dropout=dropout, causal=False, weight_norm=weight_norm)\n self.fc = nn.Sequential(wn_func(nn.Linear(hidden_size, inner_linear)),\n nn.ReLU(inplace=True),\n nn.Dropout(dropout),\n wn_func(nn.Linear(inner_linear, hidden_size)))\n\n def set_mask(self, mask):\n self.attention.set_mask_q(mask)\n self.attention.set_mask_k(mask)\n\n def forward(self, inputs):\n x = inputs\n res = x\n x, _ = self.attention(x, x, x)\n x = self.dropout(x).add_(res)\n x = self.lnorm1(x) if hasattr(self, 'lnorm1') else x\n res = x\n x = self.fc(x)\n x = self.dropout(x).add_(res)\n x = self.lnorm2(x) if hasattr(self, 'lnorm2') else x\n return x\n\n\nclass DecoderBlock(nn.Module):\n\n def __init__(self, hidden_size=512, num_heads=8, inner_linear=1024, layer_norm=True, weight_norm=False, dropout=0):\n\n super(DecoderBlock, self).__init__()\n wn_func = wn if weight_norm else lambda x: x\n if layer_norm:\n self.lnorm1 = LayerNorm1d(hidden_size)\n self.lnorm2 = LayerNorm1d(hidden_size)\n self.lnorm3 = LayerNorm1d(hidden_size)\n self.dropout = nn.Dropout(dropout)\n self.weight_norm = weight_norm\n self.attention = MultiHeadAttention(\n hidden_size, hidden_size, num_heads, dropout=dropout, causal=False, weight_norm=weight_norm)\n self.masked_attention = MultiHeadAttention(\n hidden_size, hidden_size, num_heads, dropout=dropout, causal=True, weight_norm=weight_norm)\n self.fc = nn.Sequential(wn_func(nn.Linear(hidden_size, inner_linear)),\n nn.ReLU(inplace=True),\n nn.Dropout(dropout),\n wn_func(nn.Linear(inner_linear, hidden_size)))\n\n def set_mask(self, mask, context_mask=None):\n if context_mask is not None:\n self.attention.set_mask_k(context_mask)\n self.masked_attention.set_mask_q(mask)\n self.masked_attention.set_mask_k(mask)\n\n def forward(self, inputs, context):\n x = inputs\n res = x\n x, _ = self.masked_attention(x, x, x)\n x = self.dropout(x).add_(res)\n x = self.lnorm1(x) if hasattr(self, 'lnorm1') else x\n res = x\n x, attn_enc = self.attention(x, context, context)\n x = self.dropout(x).add_(res)\n x = self.lnorm2(x) if hasattr(self, 'lnorm2') else x\n res = x\n x = self.fc(x)\n x = self.dropout(x).add_(res)\n x = self.lnorm3(x) if hasattr(self, 'lnorm3') else x\n\n return x, attn_enc\n\n\nclass TransformerAttentionEncoder(nn.Module):\n\n def __init__(self, vocab_size, hidden_size=512, embedding_size=None,\n num_layers=6, num_heads=8, inner_linear=1024,\n mask_symbol=PAD, layer_norm=True, weight_norm=False, dropout=0):\n\n super(TransformerAttentionEncoder, self).__init__()\n embedding_size = embedding_size or hidden_size\n self.hidden_size = hidden_size\n self.batch_first = True\n self.mask_symbol = mask_symbol\n self.embedder = nn.Embedding(\n vocab_size, embedding_size, padding_idx=PAD)\n self.scale_embedding = hidden_size ** 0.5\n self.dropout = nn.Dropout(dropout, inplace=True)\n self.blocks = nn.ModuleList([EncoderBlock(hidden_size, num_heads, inner_linear, layer_norm, weight_norm, dropout)\n for _ in range(num_layers)\n ])\n\n def forward(self, inputs, hidden=None):\n if self.mask_symbol is not None:\n padding_mask = inputs.eq(self.mask_symbol)\n else:\n padding_mask = None\n x = self.embedder(inputs).mul_(self.scale_embedding)\n x.add_(Variable(positional_embedding(x), requires_grad=False))\n x = self.dropout(x)\n\n for block in self.blocks:\n block.set_mask(padding_mask)\n x = block(x)\n\n return State(outputs=x, mask=padding_mask, batch_first=True)\n\n\nclass TransformerAttentionDecoder(nn.Module):\n\n def __init__(self, vocab_size, hidden_size=512, embedding_size=None,\n num_layers=6, num_heads=8, dropout=0, inner_linear=1024,\n mask_symbol=PAD, tie_embedding=True, layer_norm=True, weight_norm=False):\n\n super(TransformerAttentionDecoder, self).__init__()\n embedding_size = embedding_size or hidden_size\n self.batch_first = True\n self.mask_symbol = mask_symbol\n self.embedder = nn.Embedding(\n vocab_size, embedding_size, padding_idx=PAD)\n self.scale_embedding = hidden_size ** 0.5\n self.dropout = nn.Dropout(dropout, inplace=True)\n self.blocks = nn.ModuleList([DecoderBlock(hidden_size, num_heads, inner_linear, layer_norm, weight_norm, dropout)\n for _ in range(num_layers)\n ])\n self.classifier = nn.Linear(hidden_size, vocab_size)\n if tie_embedding:\n self.embedder.weight = self.classifier.weight\n\n def forward(self, inputs, state, get_attention=False):\n context = state.context\n if self.mask_symbol is not None:\n padding_mask = inputs.eq(self.mask_symbol)\n else:\n padding_mask = None\n x = self.embedder(inputs).mul_(self.scale_embedding)\n x.add_(Variable(positional_embedding(x), requires_grad=False))\n x = self.dropout(x)\n\n attention_scores = []\n for block in self.blocks:\n block.set_mask(padding_mask, context.mask)\n x, attn_enc = block(x, context.outputs)\n if get_attention:\n attention_scores.append(attn_enc)\n else:\n del attn_enc\n x = self.classifier(x)\n if get_attention:\n state.attention_score = attention_scores\n return x, state\n\n\nclass Transformer(Seq2Seq):\n\n def __init__(self, vocab_size, hidden_size=512, embedding_size=None, num_layers=6,\n num_heads=8, inner_linear=2048, dropout=0.1, tie_embedding=True,\n encoder=None, decoder=None, layer_norm=True, weight_norm=False):\n super(Transformer, self).__init__()\n embedding_size = embedding_size or hidden_size\n # keeping encoder, decoder None will result with default configuration\n encoder = encoder or {}\n decoder = decoder or {}\n encoder.setdefault('embedding_size', embedding_size)\n encoder.setdefault('hidden_size', hidden_size)\n encoder.setdefault('num_layers', num_layers)\n encoder.setdefault('num_heads', num_heads)\n encoder.setdefault('vocab_size', vocab_size)\n encoder.setdefault('layer_norm', layer_norm)\n encoder.setdefault('weight_norm', weight_norm)\n encoder.setdefault('dropout', dropout)\n encoder.setdefault('inner_linear', inner_linear)\n\n decoder.setdefault('embedding_size', embedding_size)\n decoder.setdefault('hidden_size', hidden_size)\n decoder.setdefault('num_layers', num_layers)\n decoder.setdefault('num_heads', num_heads)\n decoder.setdefault('tie_embedding', tie_embedding)\n decoder.setdefault('vocab_size', vocab_size)\n decoder.setdefault('layer_norm', layer_norm)\n decoder.setdefault('weight_norm', weight_norm)\n decoder.setdefault('dropout', dropout)\n decoder.setdefault('inner_linear', inner_linear)\n\n self.batch_first = True\n self.encoder = TransformerAttentionEncoder(**encoder)\n self.decoder = TransformerAttentionDecoder(**decoder)\n\n if tie_embedding:\n self.encoder.embedder.weight = self.decoder.classifier.weight\n\n def generate(self, input_list, state_list, k=1, feed_all_timesteps=True, get_attention=False):\n # TODO cache computation, not inputs\n return super(Transformer, self).generate(input_list, state_list, k=k,\n feed_all_timesteps=feed_all_timesteps,\n get_attention=get_attention)\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.arange", "torch.nn.ReLU", "torch.nn.Embedding" ] ]
duncanmmacleod/cpnest
[ "5b3cbcfb236f7350e7272dc5fc7f14990f34cdff" ]
[ "cpnest/cpnest.py" ]
[ "#! /usr/bin/env python\n# coding: utf-8\n\nimport multiprocessing as mp\nfrom ctypes import c_double, c_int\nimport numpy as np\nimport os\nimport sys\nimport signal\n\nfrom multiprocessing.sharedctypes import Value, Array\nfrom multiprocessing import Lock\nfrom multiprocessing.managers import SyncManager\n\nimport cProfile\n\n\nclass CheckPoint(Exception):\n print(\"Checkpoint exception raise\")\n pass\n\n\ndef sighandler(signal, frame):\n print(\"Handling signal {}\".format(signal))\n raise CheckPoint()\n\n\nclass CPNest(object):\n \"\"\"\n Class to control CPNest sampler\n cp = CPNest(usermodel,nlive=100,output='./',verbose=0,seed=None,maxmcmc=100,nthreads=None,balanced_sampling = True)\n \n Input variables:\n usermodel : an object inheriting cpnest.model.Model that defines the user's problem\n nlive : Number of live points (100)\n poolsize: Number of objects in the sampler pool (100)\n output : output directory (./)\n verbose: Verbosity, 0=silent, 1=progress, 2=diagnostic, 3=detailed diagnostic\n seed: random seed (default: 1234)\n maxmcmc: maximum MCMC points for sampling chains (100)\n nthreads: number of parallel samplers. Default (None) uses mp.cpu_count() to autodetermine\n nhamiltomnian: number of sampler threads using an hamiltonian samplers. Default: 0\n resume: determines whether cpnest will resume a run or run from scratch. Default: False.\n proposal: dictionary/list with custom jump proposals. key 'mhs' for the\n Metropolis-Hastings sampler, 'hmc' for the Hamiltonian Monte-Carlo sampler. Default: None\n n_periodic_checkpoint: int\n checkpoint the sampler every n_periodic_checkpoint iterations\n Default: None (disabled)\n \n \"\"\"\n def __init__(self,\n usermodel,\n nlive = 100,\n poolsize = 100,\n output = './',\n verbose = 0,\n seed = None,\n maxmcmc = 100,\n nthreads = None,\n nhamiltonian = 0,\n resume = False,\n proposals = None,\n n_periodic_checkpoint = None):\n if nthreads is None:\n self.nthreads = mp.cpu_count()\n else:\n self.nthreads = nthreads\n\n print('Running with {0} parallel threads'.format(self.nthreads))\n from .sampler import HamiltonianMonteCarloSampler, MetropolisHastingsSampler\n from .NestedSampling import NestedSampler\n from .proposal import DefaultProposalCycle, HamiltonianProposalCycle\n if proposals is None:\n proposals = dict(mhs=DefaultProposalCycle,\n hmc=HamiltonianProposalCycle)\n elif type(proposals) == list:\n proposals = dict(mhs=proposals[0],\n hmc=proposals[1])\n self.nlive = nlive\n self.verbose = verbose\n self.output = output\n self.poolsize = poolsize\n self.posterior_samples = None\n self.manager = RunManager(nthreads=self.nthreads)\n self.manager.start()\n self.user = usermodel\n self.resume = resume\n\n if seed is None: self.seed=1234\n else:\n self.seed=seed\n \n self.process_pool = []\n \n # instantiate the nested sampler class\n resume_file = os.path.join(output, \"nested_sampler_resume.pkl\")\n if not os.path.exists(resume_file) or resume == False:\n self.NS = NestedSampler(self.user,\n nlive = nlive,\n output = output,\n verbose = verbose,\n seed = self.seed,\n prior_sampling = False,\n manager = self.manager,\n n_periodic_checkpoint = n_periodic_checkpoint)\n else:\n self.NS = NestedSampler.resume(resume_file, self.manager, self.user)\n\n # instantiate the sampler class\n for i in range(self.nthreads-nhamiltonian):\n resume_file = os.path.join(output, \"sampler_{0:d}.pkl\".format(i))\n if not os.path.exists(resume_file) or resume == False:\n sampler = MetropolisHastingsSampler(self.user,\n maxmcmc,\n verbose = verbose,\n output = output,\n poolsize = poolsize,\n seed = self.seed+i,\n proposal = proposals['mhs'](),\n resume_file = resume_file,\n manager = self.manager\n )\n else:\n sampler = MetropolisHastingsSampler.resume(resume_file,\n self.manager,\n self.user)\n\n p = mp.Process(target=sampler.produce_sample)\n self.process_pool.append(p)\n \n for i in range(self.nthreads-nhamiltonian,self.nthreads):\n resume_file = os.path.join(output, \"sampler_{0:d}.pkl\".format(i))\n if not os.path.exists(resume_file) or resume == False:\n sampler = HamiltonianMonteCarloSampler(self.user,\n maxmcmc,\n verbose = verbose,\n output = output,\n poolsize = poolsize,\n seed = self.seed+i,\n proposal = proposals['hmc'](model=self.user),\n resume_file = resume_file,\n manager = self.manager\n )\n else:\n sampler = HamiltonianMonteCarloSampler.resume(resume_file,\n self.manager,\n self.user)\n p = mp.Process(target=sampler.produce_sample)\n self.process_pool.append(p)\n\n def run(self):\n \"\"\"\n Run the sampler\n \"\"\"\n if self.resume:\n signal.signal(signal.SIGTERM, sighandler)\n signal.signal(signal.SIGALRM, sighandler)\n signal.signal(signal.SIGQUIT, sighandler)\n signal.signal(signal.SIGINT, sighandler)\n signal.signal(signal.SIGUSR1, sighandler)\n signal.signal(signal.SIGUSR2, sighandler)\n \n #self.p_ns.start()\n for each in self.process_pool:\n each.start()\n try:\n self.NS.nested_sampling_loop()\n for each in self.process_pool:\n each.join()\n except CheckPoint:\n self.checkpoint()\n sys.exit(130)\n\n self.posterior_samples = self.get_posterior_samples(filename=None)\n if self.verbose>1: self.plot()\n \n #TODO: Clean up the resume pickles\n\n def get_nested_samples(self, filename='nested_samples.dat'):\n \"\"\"\n returns nested sampling chain\n Parameters\n ----------\n filename : string\n If given, file to save nested samples to\n\n Returns\n -------\n pos : :obj:`numpy.ndarray`\n \"\"\"\n import numpy.lib.recfunctions as rfn\n self.nested_samples = rfn.stack_arrays(\n [s.asnparray()\n for s in self.NS.nested_samples]\n ,usemask=False)\n if filename:\n np.savetxt(os.path.join(\n self.NS.output_folder,'nested_samples.dat'),\n self.nested_samples.ravel(),\n header=' '.join(self.nested_samples.dtype.names),\n newline='\\n',delimiter=' ')\n return self.nested_samples\n\n def get_posterior_samples(self, filename='posterior.dat'):\n \"\"\"\n Returns posterior samples\n\n Parameters\n ----------\n filename : string\n If given, file to save posterior samples to\n\n Returns\n -------\n pos : :obj:`numpy.ndarray`\n \"\"\"\n import numpy as np\n import os\n from .nest2pos import draw_posterior_many\n nested_samples = self.get_nested_samples()\n posterior_samples = draw_posterior_many([nested_samples],[self.nlive],verbose=self.verbose)\n posterior_samples = np.array(posterior_samples)\n # TODO: Replace with something to output samples in whatever format\n if filename:\n np.savetxt(os.path.join(\n self.NS.output_folder,'posterior.dat'),\n self.posterior_samples.ravel(),\n header=' '.join(posterior_samples.dtype.names),\n newline='\\n',delimiter=' ')\n return posterior_samples\n\n def plot(self, corner = True):\n \"\"\"\n Make diagnostic plots of the posterior and nested samples\n \"\"\"\n pos = self.posterior_samples\n from . import plot\n for n in pos.dtype.names:\n plot.plot_hist(pos[n].ravel(),name=n,filename=os.path.join(self.output,'posterior_{0}.png'.format(n)))\n for n in self.nested_samples.dtype.names:\n plot.plot_chain(self.nested_samples[n],name=n,filename=os.path.join(self.output,'nschain_{0}.png'.format(n)))\n import numpy as np\n plotting_posteriors = np.squeeze(pos.view((pos.dtype[0], len(pos.dtype.names))))\n if corner: plot.plot_corner(plotting_posteriors,labels=pos.dtype.names,filename=os.path.join(self.output,'corner.png'))\n\n def worker_sampler(self, producer_pipe, logLmin):\n cProfile.runctx('self.sampler.produce_sample(producer_pipe, logLmin)', globals(), locals(), 'prof_sampler.prof')\n \n def worker_ns(self):\n cProfile.runctx('self.NS.nested_sampling_loop(self.consumer_pipes)', globals(), locals(), 'prof_nested_sampling.prof')\n\n def profile(self):\n for i in range(0,self.NUMBER_OF_PRODUCER_PROCESSES):\n p = mp.Process(target=self.worker_sampler, args=(self.queues[i%len(self.queues)], self.NS.logLmin ))\n self.process_pool.append(p)\n for i in range(0,self.NUMBER_OF_CONSUMER_PROCESSES):\n p = mp.Process(target=self.worker_ns, args=(self.queues, self.port, self.authkey))\n self.process_pool.append(p)\n for each in self.process_pool:\n each.start()\n\n def checkpoint(self):\n self.manager.checkpoint_flag=1\n\n\nclass RunManager(SyncManager):\n def __init__(self, nthreads=None, **kwargs):\n super(RunManager,self).__init__(**kwargs)\n self.nconnected=mp.Value(c_int,0)\n self.producer_pipes = list()\n self.consumer_pipes = list()\n for i in range(nthreads):\n consumer, producer = mp.Pipe(duplex=True)\n self.producer_pipes.append(producer)\n self.consumer_pipes.append(consumer)\n self.logLmin=None\n self.nthreads=nthreads\n\n def start(self):\n super(RunManager, self).start()\n self.logLmin = mp.Value(c_double,-np.inf)\n self.checkpoint_flag=mp.Value(c_int,0)\n\n def connect_producer(self):\n \"\"\"\n Returns the producer's end of the pipe\n \"\"\"\n with self.nconnected.get_lock():\n n = self.nconnected.value\n pipe = self.producer_pipes[n]\n self.nconnected.value+=1\n return pipe, n\n" ]
[ [ "numpy.array" ] ]
tomstitt/PyMFEM
[ "b00199ec0d7a5fba891f656575e91a64d3e35eb5" ]
[ "mfem/_par/pmesh.py" ]
[ "# This file was automatically generated by SWIG (http://www.swig.org).\n# Version 4.0.2\n#\n# Do not make changes to this file unless you know what you are doing--modify\n# the SWIG interface file instead.\n\nfrom sys import version_info as _swig_python_version_info\nif _swig_python_version_info < (2, 7, 0):\n raise RuntimeError(\"Python 2.7 or later required\")\n\n# Import the low-level C/C++ module\nif __package__ or \".\" in __name__:\n from . import _pmesh\nelse:\n import _pmesh\n\ntry:\n import builtins as __builtin__\nexcept ImportError:\n import __builtin__\n\n_swig_new_instance_method = _pmesh.SWIG_PyInstanceMethod_New\n_swig_new_static_method = _pmesh.SWIG_PyStaticMethod_New\n\ndef _swig_repr(self):\n try:\n strthis = \"proxy of \" + self.this.__repr__()\n except __builtin__.Exception:\n strthis = \"\"\n return \"<%s.%s; %s >\" % (self.__class__.__module__, self.__class__.__name__, strthis,)\n\n\ndef _swig_setattr_nondynamic_instance_variable(set):\n def set_instance_attr(self, name, value):\n if name == \"thisown\":\n self.this.own(value)\n elif name == \"this\":\n set(self, name, value)\n elif hasattr(self, name) and isinstance(getattr(type(self), name), property):\n set(self, name, value)\n else:\n raise AttributeError(\"You cannot add instance attributes to %s\" % self)\n return set_instance_attr\n\n\ndef _swig_setattr_nondynamic_class_variable(set):\n def set_class_attr(cls, name, value):\n if hasattr(cls, name) and not isinstance(getattr(cls, name), property):\n set(cls, name, value)\n else:\n raise AttributeError(\"You cannot add class attributes to %s\" % cls)\n return set_class_attr\n\n\ndef _swig_add_metaclass(metaclass):\n \"\"\"Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass\"\"\"\n def wrapper(cls):\n return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())\n return wrapper\n\n\nclass _SwigNonDynamicMeta(type):\n \"\"\"Meta class to enforce nondynamic attributes (no new attributes) for a class\"\"\"\n __setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)\n\n\nimport weakref\n\nMFEM_VERSION = _pmesh.MFEM_VERSION\n\nMFEM_VERSION_STRING = _pmesh.MFEM_VERSION_STRING\n\nMFEM_VERSION_TYPE = _pmesh.MFEM_VERSION_TYPE\n\nMFEM_VERSION_TYPE_RELEASE = _pmesh.MFEM_VERSION_TYPE_RELEASE\n\nMFEM_VERSION_TYPE_DEVELOPMENT = _pmesh.MFEM_VERSION_TYPE_DEVELOPMENT\n\nMFEM_VERSION_MAJOR = _pmesh.MFEM_VERSION_MAJOR\n\nMFEM_VERSION_MINOR = _pmesh.MFEM_VERSION_MINOR\n\nMFEM_VERSION_PATCH = _pmesh.MFEM_VERSION_PATCH\n\nMFEM_HYPRE_VERSION = _pmesh.MFEM_HYPRE_VERSION\n\nimport mfem._par.mesh\nimport mfem._par.matrix\nimport mfem._par.vector\nimport mfem._par.array\nimport mfem._par.mem_manager\nimport mfem._par.operators\nimport mfem._par.sort_pairs\nimport mfem._par.ncmesh\nimport mfem._par.vtk\nimport mfem._par.element\nimport mfem._par.globals\nimport mfem._par.densemat\nimport mfem._par.geom\nimport mfem._par.intrules\nimport mfem._par.table\nimport mfem._par.hash\nimport mfem._par.vertex\nimport mfem._par.gridfunc\nimport mfem._par.coefficient\nimport mfem._par.sparsemat\nimport mfem._par.eltrans\nimport mfem._par.fe\nimport mfem._par.fespace\nimport mfem._par.fe_coll\nimport mfem._par.lininteg\nimport mfem._par.handle\nimport mfem._par.hypre\nimport mfem._par.restriction\nimport mfem._par.bilininteg\nimport mfem._par.linearform\nimport mfem._par.pncmesh\nimport mfem._par.communication\nimport mfem._par.sets\nclass ParMesh(mfem._par.mesh.Mesh):\n r\"\"\"Proxy of C++ mfem::ParMesh class.\"\"\"\n\n thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc=\"The membership flag\")\n __repr__ = _swig_repr\n\n def Finalize(self, refine=False, fix_orientation=False):\n r\"\"\"Finalize(ParMesh self, bool refine=False, bool fix_orientation=False)\"\"\"\n return _pmesh.ParMesh_Finalize(self, refine, fix_orientation)\n Finalize = _swig_new_instance_method(_pmesh.ParMesh_Finalize)\n\n def SetAttributes(self):\n r\"\"\"SetAttributes(ParMesh self)\"\"\"\n return _pmesh.ParMesh_SetAttributes(self)\n SetAttributes = _swig_new_instance_method(_pmesh.ParMesh_SetAttributes)\n\n def GetComm(self):\n r\"\"\"GetComm(ParMesh self) -> MPI_Comm\"\"\"\n return _pmesh.ParMesh_GetComm(self)\n GetComm = _swig_new_instance_method(_pmesh.ParMesh_GetComm)\n\n def GetNRanks(self):\n r\"\"\"GetNRanks(ParMesh self) -> int\"\"\"\n return _pmesh.ParMesh_GetNRanks(self)\n GetNRanks = _swig_new_instance_method(_pmesh.ParMesh_GetNRanks)\n\n def GetMyRank(self):\n r\"\"\"GetMyRank(ParMesh self) -> int\"\"\"\n return _pmesh.ParMesh_GetMyRank(self)\n GetMyRank = _swig_new_instance_method(_pmesh.ParMesh_GetMyRank)\n\n def GetLocalElementNum(self, global_element_num):\n r\"\"\"GetLocalElementNum(ParMesh self, long global_element_num) -> int\"\"\"\n return _pmesh.ParMesh_GetLocalElementNum(self, global_element_num)\n GetLocalElementNum = _swig_new_instance_method(_pmesh.ParMesh_GetLocalElementNum)\n\n def GetGlobalElementNum(self, local_element_num):\n r\"\"\"GetGlobalElementNum(ParMesh self, int local_element_num) -> long\"\"\"\n return _pmesh.ParMesh_GetGlobalElementNum(self, local_element_num)\n GetGlobalElementNum = _swig_new_instance_method(_pmesh.ParMesh_GetGlobalElementNum)\n gtopo = property(_pmesh.ParMesh_gtopo_get, doc=r\"\"\"gtopo : mfem::GroupTopology\"\"\")\n have_face_nbr_data = property(_pmesh.ParMesh_have_face_nbr_data_get, _pmesh.ParMesh_have_face_nbr_data_set, doc=r\"\"\"have_face_nbr_data : bool\"\"\")\n face_nbr_group = property(_pmesh.ParMesh_face_nbr_group_get, _pmesh.ParMesh_face_nbr_group_set, doc=r\"\"\"face_nbr_group : mfem::Array<(int)>\"\"\")\n face_nbr_elements_offset = property(_pmesh.ParMesh_face_nbr_elements_offset_get, _pmesh.ParMesh_face_nbr_elements_offset_set, doc=r\"\"\"face_nbr_elements_offset : mfem::Array<(int)>\"\"\")\n face_nbr_vertices_offset = property(_pmesh.ParMesh_face_nbr_vertices_offset_get, _pmesh.ParMesh_face_nbr_vertices_offset_set, doc=r\"\"\"face_nbr_vertices_offset : mfem::Array<(int)>\"\"\")\n face_nbr_elements = property(_pmesh.ParMesh_face_nbr_elements_get, doc=r\"\"\"face_nbr_elements : mfem::Array<(p.mfem::Element)>\"\"\")\n face_nbr_vertices = property(_pmesh.ParMesh_face_nbr_vertices_get, doc=r\"\"\"face_nbr_vertices : mfem::Array<(mfem::Vertex)>\"\"\")\n send_face_nbr_elements = property(_pmesh.ParMesh_send_face_nbr_elements_get, _pmesh.ParMesh_send_face_nbr_elements_set, doc=r\"\"\"send_face_nbr_elements : mfem::Table\"\"\")\n send_face_nbr_vertices = property(_pmesh.ParMesh_send_face_nbr_vertices_get, _pmesh.ParMesh_send_face_nbr_vertices_set, doc=r\"\"\"send_face_nbr_vertices : mfem::Table\"\"\")\n pncmesh = property(_pmesh.ParMesh_pncmesh_get, _pmesh.ParMesh_pncmesh_set, doc=r\"\"\"pncmesh : p.mfem::ParNCMesh\"\"\")\n\n def GetNGroups(self):\n r\"\"\"GetNGroups(ParMesh self) -> int\"\"\"\n return _pmesh.ParMesh_GetNGroups(self)\n GetNGroups = _swig_new_instance_method(_pmesh.ParMesh_GetNGroups)\n\n def GroupNVertices(self, group):\n r\"\"\"GroupNVertices(ParMesh self, int group) -> int\"\"\"\n return _pmesh.ParMesh_GroupNVertices(self, group)\n GroupNVertices = _swig_new_instance_method(_pmesh.ParMesh_GroupNVertices)\n\n def GroupNEdges(self, group):\n r\"\"\"GroupNEdges(ParMesh self, int group) -> int\"\"\"\n return _pmesh.ParMesh_GroupNEdges(self, group)\n GroupNEdges = _swig_new_instance_method(_pmesh.ParMesh_GroupNEdges)\n\n def GroupNTriangles(self, group):\n r\"\"\"GroupNTriangles(ParMesh self, int group) -> int\"\"\"\n return _pmesh.ParMesh_GroupNTriangles(self, group)\n GroupNTriangles = _swig_new_instance_method(_pmesh.ParMesh_GroupNTriangles)\n\n def GroupNQuadrilaterals(self, group):\n r\"\"\"GroupNQuadrilaterals(ParMesh self, int group) -> int\"\"\"\n return _pmesh.ParMesh_GroupNQuadrilaterals(self, group)\n GroupNQuadrilaterals = _swig_new_instance_method(_pmesh.ParMesh_GroupNQuadrilaterals)\n\n def GroupVertex(self, group, i):\n r\"\"\"GroupVertex(ParMesh self, int group, int i) -> int\"\"\"\n return _pmesh.ParMesh_GroupVertex(self, group, i)\n GroupVertex = _swig_new_instance_method(_pmesh.ParMesh_GroupVertex)\n\n def GroupEdge(self, group, i, *args):\n if len(args) == 0:\n from mfem.par import intp \n edge = intp()\n o = intp() \n _pmesh.ParMesh_GroupEdge(self, group, i, edge, o)\n return edge.value(), o.value()\n else:\n return _pmesh.ParMesh_GroupEdge(self, group, i, *args) \n\n\n\n def GroupTriangle(self, group, i, face, o):\n r\"\"\"GroupTriangle(ParMesh self, int group, int i, int & face, int & o)\"\"\"\n return _pmesh.ParMesh_GroupTriangle(self, group, i, face, o)\n GroupTriangle = _swig_new_instance_method(_pmesh.ParMesh_GroupTriangle)\n\n def GroupQuadrilateral(self, group, i, face, o):\n r\"\"\"GroupQuadrilateral(ParMesh self, int group, int i, int & face, int & o)\"\"\"\n return _pmesh.ParMesh_GroupQuadrilateral(self, group, i, face, o)\n GroupQuadrilateral = _swig_new_instance_method(_pmesh.ParMesh_GroupQuadrilateral)\n\n def GenerateOffsets(self, N, loc_sizes, offsets):\n r\"\"\"GenerateOffsets(ParMesh self, int N, HYPRE_Int [] loc_sizes, mfem::Array< HYPRE_Int > *[] offsets)\"\"\"\n return _pmesh.ParMesh_GenerateOffsets(self, N, loc_sizes, offsets)\n GenerateOffsets = _swig_new_instance_method(_pmesh.ParMesh_GenerateOffsets)\n\n def ExchangeFaceNbrData(self):\n r\"\"\"ExchangeFaceNbrData(ParMesh self)\"\"\"\n return _pmesh.ParMesh_ExchangeFaceNbrData(self)\n ExchangeFaceNbrData = _swig_new_instance_method(_pmesh.ParMesh_ExchangeFaceNbrData)\n\n def ExchangeFaceNbrNodes(self):\n r\"\"\"ExchangeFaceNbrNodes(ParMesh self)\"\"\"\n return _pmesh.ParMesh_ExchangeFaceNbrNodes(self)\n ExchangeFaceNbrNodes = _swig_new_instance_method(_pmesh.ParMesh_ExchangeFaceNbrNodes)\n\n def SetCurvature(self, order, discont=False, space_dim=-1, ordering=1):\n r\"\"\"SetCurvature(ParMesh self, int order, bool discont=False, int space_dim=-1, int ordering=1)\"\"\"\n return _pmesh.ParMesh_SetCurvature(self, order, discont, space_dim, ordering)\n SetCurvature = _swig_new_instance_method(_pmesh.ParMesh_SetCurvature)\n\n def GetNFaceNeighbors(self):\n r\"\"\"GetNFaceNeighbors(ParMesh self) -> int\"\"\"\n return _pmesh.ParMesh_GetNFaceNeighbors(self)\n GetNFaceNeighbors = _swig_new_instance_method(_pmesh.ParMesh_GetNFaceNeighbors)\n\n def GetFaceNbrGroup(self, fn):\n r\"\"\"GetFaceNbrGroup(ParMesh self, int fn) -> int\"\"\"\n return _pmesh.ParMesh_GetFaceNbrGroup(self, fn)\n GetFaceNbrGroup = _swig_new_instance_method(_pmesh.ParMesh_GetFaceNbrGroup)\n\n def GetFaceNbrRank(self, fn):\n r\"\"\"GetFaceNbrRank(ParMesh self, int fn) -> int\"\"\"\n return _pmesh.ParMesh_GetFaceNbrRank(self, fn)\n GetFaceNbrRank = _swig_new_instance_method(_pmesh.ParMesh_GetFaceNbrRank)\n\n def GetFaceToAllElementTable(self):\n r\"\"\"GetFaceToAllElementTable(ParMesh self) -> Table\"\"\"\n return _pmesh.ParMesh_GetFaceToAllElementTable(self)\n GetFaceToAllElementTable = _swig_new_instance_method(_pmesh.ParMesh_GetFaceToAllElementTable)\n\n def GetSharedFaceTransformations(self, sf, fill2=True):\n r\"\"\"GetSharedFaceTransformations(ParMesh self, int sf, bool fill2=True) -> FaceElementTransformations\"\"\"\n return _pmesh.ParMesh_GetSharedFaceTransformations(self, sf, fill2)\n GetSharedFaceTransformations = _swig_new_instance_method(_pmesh.ParMesh_GetSharedFaceTransformations)\n\n def GetFaceNbrElementTransformation(self, i):\n r\"\"\"GetFaceNbrElementTransformation(ParMesh self, int i) -> ElementTransformation\"\"\"\n return _pmesh.ParMesh_GetFaceNbrElementTransformation(self, i)\n GetFaceNbrElementTransformation = _swig_new_instance_method(_pmesh.ParMesh_GetFaceNbrElementTransformation)\n\n def GetNSharedFaces(self):\n r\"\"\"GetNSharedFaces(ParMesh self) -> int\"\"\"\n return _pmesh.ParMesh_GetNSharedFaces(self)\n GetNSharedFaces = _swig_new_instance_method(_pmesh.ParMesh_GetNSharedFaces)\n\n def GetSharedFace(self, sface):\n r\"\"\"GetSharedFace(ParMesh self, int sface) -> int\"\"\"\n return _pmesh.ParMesh_GetSharedFace(self, sface)\n GetSharedFace = _swig_new_instance_method(_pmesh.ParMesh_GetSharedFace)\n\n def ReorientTetMesh(self):\n r\"\"\"ReorientTetMesh(ParMesh self)\"\"\"\n return _pmesh.ParMesh_ReorientTetMesh(self)\n ReorientTetMesh = _swig_new_instance_method(_pmesh.ParMesh_ReorientTetMesh)\n\n def ReduceInt(self, value):\n r\"\"\"ReduceInt(ParMesh self, int value) -> long\"\"\"\n return _pmesh.ParMesh_ReduceInt(self, value)\n ReduceInt = _swig_new_instance_method(_pmesh.ParMesh_ReduceInt)\n\n def Rebalance(self, *args):\n r\"\"\"\n Rebalance(ParMesh self)\n Rebalance(ParMesh self, intArray partition)\n \"\"\"\n return _pmesh.ParMesh_Rebalance(self, *args)\n Rebalance = _swig_new_instance_method(_pmesh.ParMesh_Rebalance)\n\n def GetBoundingBox(self, ref = 2):\n from .vector import Vector\n min = Vector()\n max = Vector() \n _mesh.Mesh_GetBoundingBox(self, min, max, ref) \n return min.GetDataArray().copy(), max.GetDataArray().copy()\n\n\n\n def GetCharacteristics(self, h_min, h_max, kappa_min, kappa_max):\n r\"\"\"GetCharacteristics(ParMesh self, double & h_min, double & h_max, double & kappa_min, double & kappa_max)\"\"\"\n return _pmesh.ParMesh_GetCharacteristics(self, h_min, h_max, kappa_min, kappa_max)\n GetCharacteristics = _swig_new_instance_method(_pmesh.ParMesh_GetCharacteristics)\n\n def PrintVTU(self, *args, **kwargs):\n r\"\"\"PrintVTU(ParMesh self, std::string pathname, mfem::VTKFormat format=ASCII, bool high_order_output=False, int compression_level=0, bool bdr=False)\"\"\"\n return _pmesh.ParMesh_PrintVTU(self, *args, **kwargs)\n PrintVTU = _swig_new_instance_method(_pmesh.ParMesh_PrintVTU)\n\n def FindPoints(self, pp, warn=True, inv_trans=None): \n r\"\"\"count, element_id, integration_points = FindPoints(points, warn=True, int_trans=None)\"\"\"\n import numpy as np\n import mfem.par as mfem\n\n pp = np.array(pp, copy=False, dtype=float).transpose() \n M = mfem.DenseMatrix(pp.shape[0], pp.shape[1])\n M.Assign(pp)\n elem_ids = mfem.intArray()\n int_points = mfem.IntegrationPointArray()\n count = _mesh.Mesh_FindPoints(self, M, elem_ids, int_points, warn, inv_trans) \n elem_ids = elem_ids.ToList()\n return count, elem_ids, int_points\n\n\n\n def PrintSharedEntities(self, fname_prefix):\n r\"\"\"PrintSharedEntities(ParMesh self, char const * fname_prefix)\"\"\"\n return _pmesh.ParMesh_PrintSharedEntities(self, fname_prefix)\n PrintSharedEntities = _swig_new_instance_method(_pmesh.ParMesh_PrintSharedEntities)\n __swig_destroy__ = _pmesh.delete_ParMesh\n\n def __init__(self, *args):\n r\"\"\"\n __init__(ParMesh self, ParMesh pmesh, bool copy_nodes=True) -> ParMesh\n __init__(ParMesh self, MPI_Comm comm, Mesh mesh, int * partitioning_=None, int part_method=1) -> ParMesh\n __init__(ParMesh self, MPI_Comm comm, std::istream & input, bool refine=True) -> ParMesh\n __init__(ParMesh self, ParMesh orig_mesh, int ref_factor, int ref_type) -> ParMesh\n __init__(ParMesh self, MPI_Comm comm, char const * mesh_file) -> ParMesh\n \"\"\"\n _pmesh.ParMesh_swiginit(self, _pmesh.new_ParMesh(*args))\n\n def ParPrintToFile(self, mesh_file, precision):\n r\"\"\"ParPrintToFile(ParMesh self, char const * mesh_file, int const precision)\"\"\"\n return _pmesh.ParMesh_ParPrintToFile(self, mesh_file, precision)\n ParPrintToFile = _swig_new_instance_method(_pmesh.ParMesh_ParPrintToFile)\n\n def Print(self, *args):\n r\"\"\"\n Print(ParMesh self, std::ostream & out=out)\n Print(ParMesh self, char const * file, int precision=8)\n \"\"\"\n return _pmesh.ParMesh_Print(self, *args)\n Print = _swig_new_instance_method(_pmesh.ParMesh_Print)\n\n def PrintXG(self, *args):\n r\"\"\"\n PrintXG(ParMesh self, std::ostream & out=out)\n PrintXG(ParMesh self, char const * file, int precision=8)\n \"\"\"\n return _pmesh.ParMesh_PrintXG(self, *args)\n PrintXG = _swig_new_instance_method(_pmesh.ParMesh_PrintXG)\n\n def PrintAsOne(self, *args):\n r\"\"\"\n PrintAsOne(ParMesh self, std::ostream & out=out)\n PrintAsOne(ParMesh self, char const * file, int precision=8)\n \"\"\"\n return _pmesh.ParMesh_PrintAsOne(self, *args)\n PrintAsOne = _swig_new_instance_method(_pmesh.ParMesh_PrintAsOne)\n\n def PrintAsOneXG(self, *args):\n r\"\"\"\n PrintAsOneXG(ParMesh self, std::ostream & out=out)\n PrintAsOneXG(ParMesh self, char const * file, int precision=8)\n \"\"\"\n return _pmesh.ParMesh_PrintAsOneXG(self, *args)\n PrintAsOneXG = _swig_new_instance_method(_pmesh.ParMesh_PrintAsOneXG)\n\n def PrintInfo(self, *args):\n r\"\"\"\n PrintInfo(ParMesh self, std::ostream & out=out)\n PrintInfo(ParMesh self, char const * file, int precision=8)\n \"\"\"\n return _pmesh.ParMesh_PrintInfo(self, *args)\n PrintInfo = _swig_new_instance_method(_pmesh.ParMesh_PrintInfo)\n\n def ParPrint(self, *args):\n r\"\"\"\n ParPrint(ParMesh self, std::ostream & out)\n ParPrint(ParMesh self, char const * file, int precision=8)\n ParPrint(ParMesh self)\n \"\"\"\n return _pmesh.ParMesh_ParPrint(self, *args)\n ParPrint = _swig_new_instance_method(_pmesh.ParMesh_ParPrint)\n\n# Register ParMesh in _pmesh:\n_pmesh.ParMesh_swigregister(ParMesh)\n\n\n\n" ]
[ [ "numpy.array" ] ]
Crossfoot/autodE
[ "2c452c954a5268e6c9a80cd4efbbfe855e61f6bb" ]
[ "tests/test_path.py" ]
[ "import os\nimport numpy as np\nfrom autode.atoms import Atom\nfrom autode.methods import XTB, ORCA\nfrom autode.path import Path, AdaptivePath, PathPoint\nfrom autode.path.adaptive import pruned_active_bonds\nfrom autode.bonds import FormingBond, BreakingBond\nfrom autode.species import Species, Molecule\nfrom autode.units import Unit, KcalMol\n\ntest_species = Species(name='tmp', charge=0, mult=1, atoms=[Atom('He')])\ntest_mol = Molecule(smiles='O')\n\n\ndef test_path_properties_empty():\n\n path = Path()\n\n assert len(path) == 0\n assert isinstance(path.units, Unit)\n\n assert path == Path() # should be able to compare paths\n assert path != 0\n\n # With no species there should be no peak/saddle/energies\n assert len(path.rel_energies) == 0\n assert len(path.energies) == 0\n\n assert not path.contains_peak\n assert path.peak_idx is None\n assert not path.is_saddle(idx=0)\n\n # Should not plot plot a path without any structures\n path.plot_energies(save=True, name='tmp', color='black', xlabel='none')\n assert not os.path.exists('tmp.png')\n\n\ndef test_path_properties():\n p1 = PathPoint(test_species.copy(), constraints={})\n p1.energy = -3\n p2 = PathPoint(test_species.copy(), constraints={})\n p2.energy = -2\n\n path = Path(p1, p2, units=KcalMol)\n assert all(np.isclose(path.energies, np.array([-3, -2])))\n assert all(np.isclose(path.rel_energies, 627.509*np.array([0, 1])))\n\n p3 = PathPoint(test_species.copy(), constraints={})\n path = Path(p1, p2, p3)\n\n # There is an energy not set, should not be able to find a peak\n assert path.peak_idx is None\n assert not path.contains_peak\n assert not path.is_saddle(idx=1)\n\n # setting the energy of the final point should allow a peak\n path[2].energy = -3\n assert path.contains_peak\n assert path.peak_idx == 1\n assert path.is_saddle(idx=1)\n\n path.plot_energies(save=True, name='tmp', color='black', xlabel='none')\n assert os.path.exists('tmp.png')\n os.remove('tmp.png')\n\n # Should ba able to print an xyz file containing the structures along the\n # path\n path.print_geometries(name='tmp')\n assert os.path.exists('tmp.xyz')\n os.remove('tmp.xyz')\n\n\ndef test_point_properties():\n\n point = PathPoint(species=test_species.copy(), constraints={})\n\n assert point.energy is None\n assert point.grad is None\n assert point.constraints == {}\n assert point.species.name == 'tmp'\n\n point.energy = 1\n point.grad = np.zeros(shape=(1, 3))\n\n # Copies should not copy energy or gradients\n new_point = point.copy()\n assert new_point.energy is None\n assert new_point.grad is None\n\n\ndef test_pruning_bonds():\n\n h3 = Species(name='h3', charge=0, mult=2,\n atoms=[Atom('H'), Atom('H', x=1), Atom('H', x=0.5, y=0.5)])\n\n fbond = FormingBond(atom_indexes=(0, 1), species=h3)\n bbond1 = BreakingBond(atom_indexes=(0, 2), species=h3)\n bbond2 = BreakingBond(atom_indexes=(1, 2), species=h3)\n\n new_bonds = pruned_active_bonds(initial_species=h3,\n fbonds=[fbond], bbonds=[bbond1, bbond2])\n assert len(new_bonds) == 2\n # Should prune to one breaking and one forming bond\n assert ((isinstance(new_bonds[0], FormingBond) and\n isinstance(new_bonds[1], BreakingBond))\n or\n (isinstance(new_bonds[1], FormingBond) and\n isinstance(new_bonds[0], BreakingBond)))\n\n\ndef test_products_made():\n\n path = Path(PathPoint(species=test_mol, constraints={}))\n\n assert not path.products_made(product=None)\n # Species have no graphs\n assert not path.products_made(product=test_species)\n\n # with a single point and a molecule with the same graph then the products\n # are made, at the first point\n assert path.products_made(product=test_mol)\n\n diff_mol = test_mol.copy()\n diff_mol.graph.remove_edge(0, 1)\n assert not path.products_made(product=diff_mol)\n\n\ndef test_adaptive_path():\n\n species_no_atoms = Species(name='tmp', charge=0, mult=1, atoms=[])\n path1 = AdaptivePath(init_species=species_no_atoms,\n bonds=[],\n method=XTB())\n assert len(path1) == 0\n assert path1.method.name == 'xtb'\n assert len(path1.bonds) == 0\n\n assert path1 != 0\n assert path1 == path1\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
berfubuyukoz/pytorch_pretrained_BERT
[ "5e2dcceeeefd87e87dbe3d8dbc39cae563f9972d" ]
[ "examples/distillation/scripts/binarized_data.py" ]
[ "# coding=utf-8\n# Copyright 2019-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nPreprocessing script before training DistilBERT.\n\"\"\"\nimport argparse\nimport pickle\nimport random\nimport time\nimport numpy as np\nfrom pytorch_transformers import BertTokenizer\n\nfrom examples.distillation.utils import logger\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).\")\n parser.add_argument('--file_path', type=str, default='data/dump.txt',\n help='The path to the data.')\n parser.add_argument('--bert_tokenizer', type=str, default='bert-base-uncased',\n help=\"The tokenizer to use.\")\n parser.add_argument('--dump_file', type=str, default='data/dump',\n help='The dump file prefix.')\n args = parser.parse_args()\n\n\n logger.info(f'Loading Tokenizer ({args.bert_tokenizer})')\n bert_tokenizer = BertTokenizer.from_pretrained(args.bert_tokenizer)\n\n\n logger.info(f'Loading text from {args.file_path}')\n with open(args.file_path, 'r', encoding='utf8') as fp:\n data = fp.readlines()\n\n\n logger.info(f'Start encoding')\n logger.info(f'{len(data)} examples to process.')\n\n rslt = []\n iter = 0\n interval = 10000\n start = time.time()\n for text in data:\n text = f'[CLS] {text.strip()} [SEP]'\n token_ids = bert_tokenizer.encode(text)\n rslt.append(token_ids)\n\n iter += 1\n if iter % interval == 0:\n end = time.time()\n logger.info(f'{iter} examples processed. - {(end-start)/interval:.2f}s/expl')\n start = time.time()\n logger.info('Finished binarization')\n logger.info(f'{len(data)} examples processed.')\n\n\n dp_file = f'{args.dump_file}.{args.bert_tokenizer}.pickle'\n rslt_ = [np.uint16(d) for d in rslt]\n random.shuffle(rslt_)\n logger.info(f'Dump to {dp_file}')\n with open(dp_file, 'wb') as handle:\n pickle.dump(rslt_, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "numpy.uint16" ] ]
czyczyyzc/MyFCIS
[ "5cbabab01ef967fd173caafd2b440a5fd7307932" ]
[ "Mybase/solver.py" ]
[ "import os\nimport cv2\nimport pickle\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nfrom .load_weights import *\nfrom .optim_utils import *\nfrom .fcis_utils.make_image import *\nfrom .fcis_utils.bboxes_target_layer import *\n\nMODEL_SAVE_PATH = \"Mybase/Model\"\nMODEL_NAME = \"model.ckpt\"\n\nLOG_PATH1 = \"Mybase/logdata/train\"\nLOG_PATH2 = \"Mybase/logdata/val\"\nLOG_PATH3 = \"Mybase/logdata/test\"\n\"\"\"\nDATA_PATH1 = \"Mybase/tfrecords/train\"\nDATA_NAME1 = \"data.tfrecords-*\"\n\nDATA_PATH2 = \"Mybase/tfrecords/val\"\nDATA_NAME2 = \"data.tfrecords-*\"\n\nDATA_PATH3 = \"Mybase/tfrecords/test\"\nDATA_NAME3 = \"data.tfrecords-*\"\n\"\"\"\nN_GPU = 1\nGPU_S = 0\nGPU_E = 1\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nTRA_NUM = 8000 * N_GPU #80000\nVAL_NUM = 80 * N_GPU #8000\nTST_NUM = None\n\ndef get_data(fid):\n try: \n a = pickle.load(fid)\n return 1, a\n except EOFError: \n return 0, 0\n \ndef get_all_data(fid):\n data = []\n while(True):\n sig, dat = get_data(fid)\n if(sig == 0): break\n else:\n data.append(dat)\n return data\n\nclass Solver(object):\n \n def __init__(self, mdl, **kwargs):\n \n self.mdl = mdl\n self.opm_cfg = kwargs.pop('opm_cfg', {})\n self.bat_siz = kwargs.pop('bat_siz', 2)\n self.epc_num = kwargs.pop('epc_num', 10)\n self.itr_per_prt = kwargs.pop('itr_per_prt', 10)\n self.prt_ena = kwargs.pop('prt_ena', True) #print enable #verbose\n self.mov_ave_dca = kwargs.pop('mov_ave_dca', 0.99)\n self.epc_per_dca = kwargs.pop('epc_per_dca', 1)\n self.bat_siz_all = self.bat_siz * N_GPU\n\n if len(kwargs) > 0:\n extra = ', '.join('\"%s\"' % k for k in kwargs.keys())\n raise ValueError('Unrecognized arguments %s' % extra)\n\n self._reset()\n \n def _reset(self):\n # Set up some variables for book-keeping\n self.epc_cnt = 0\n self.best_val_acc = 0\n #self.loss_history = []\n #self.train_acc_history = []\n #self.val_acc_history = []\n \n #################################For FCIS##################################\n def _train_step(self, mtra=None, mtst=None, itr_per_epc=None, glb_stp=None):\n #将简单的运算放在CPU上,只有神经网络的训练过程放在GPU上\n with tf.device(\"/cpu:0\"):\n \n GV_tra = GeneratorForVOC(mod_tra=True, fil_nam='train', bat_siz=self.bat_siz_all, min_after_dequeue=200)\n GV_val = GeneratorForVOC(mod_tra=False, fil_nam='val', bat_siz=self.bat_siz_all, min_after_dequeue= 50)\n \n imgs_tra, gbxs_tra, gmks_tra, gbx_nums_tra, img_wdws_tra, img_hgts_tra_, img_wdhs_tra_ = GV_tra.get_input()\n imgs_val, gbxs_val, gmks_val, gbx_nums_val, img_wdws_val, img_hgts_val_, img_wdhs_val_ = GV_val.get_input()\n \n imgs = tf.cond(mtst, lambda: imgs_val, lambda: imgs_tra, strict=True)\n gbxs = tf.cond(mtst, lambda: gbxs_val, lambda: gbxs_tra, strict=True)\n gmks = tf.cond(mtst, lambda: gmks_val, lambda: gmks_tra, strict=True)\n gbx_nums = tf.cond(mtst, lambda: gbx_nums_val, lambda: gbx_nums_tra, strict=True)\n img_wdws = tf.cond(mtst, lambda: img_wdws_val, lambda: img_wdws_tra, strict=True)\n img_hgts_ = tf.cond(mtst, lambda: img_hgts_val_, lambda: img_hgts_tra_, strict=True)\n img_wdhs_ = tf.cond(mtst, lambda: img_wdhs_val_, lambda: img_wdhs_tra_, strict=True)\n \n #with tf.name_scope(\"input_image\"):\n # tf.summary.image(\"input\", X, 10)\n \n self.opm_cfg[\"decay_step\"] = itr_per_epc * self.epc_per_dca #decay\n tra_stp = update_rule(self.opm_cfg, glb_stp)\n \n self.mdl.mod_tra = True\n self.mdl.glb_pol = False\n self.mdl.inc_btm = True\n\n grds_lst = []\n loss_lst = []\n boxs_lst = []\n box_clss_lst = []\n box_prbs_lst = []\n box_msks_lst = []\n box_nums_lst = []\n for i in range(N_GPU):\n with tf.device(\"/gpu:%d\" % i):\n with tf.name_scope(\"GPU_%d\" % i) as scp:\n sta = i * self.bat_siz\n end = (i+1) * self.bat_siz\n loss, boxs, box_clss, box_prbs, box_msks, box_nums = \\\n self.mdl.forward(imgs[sta:end], None, gbxs[sta:end], gmks[sta:end], gbx_nums[sta:end], mtra, scp)\n #在第一次声明变量之后,将控制变量重用的参数设置为True。这样可以让不同的GPU更新同一组参数\n #注意tf.name_scope函数并不会影响tf.get_variable的命名空间\n tf.get_variable_scope().reuse_variables()\n #使用当前GPU计算所有变量的梯度\n grds = tra_stp.compute_gradients(loss[0])\n #print(grds)\n grds_lst .append(grds)\n loss_lst .append(loss)\n boxs_lst .append(boxs)\n box_clss_lst.append(box_clss)\n box_prbs_lst.append(box_prbs)\n box_msks_lst.append(box_msks)\n box_nums_lst.append(box_nums)\n #print(grds_lst)\n '''\n with tf.variable_scope(\"average\", reuse = tf.AUTO_REUSE):\n mov_ave = tf.train.ExponentialMovingAverage(self.mov_ave_dca, glb_stp)\n mov_ave_op = mov_ave.apply(tf.trainable_variables())\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mov_ave_op)\n '''\n with tf.variable_scope(\"optimize\", reuse = tf.AUTO_REUSE):\n grds = average_gradients(grds_lst)\n upd_opas = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(upd_opas):\n tra_opa = tra_stp.apply_gradients(grds, global_step=glb_stp)\n \n loss = tf.stack(loss_lst, axis=0) #一个向量而已\n loss = tf.reduce_mean(loss, axis=0)\n boxs = tf.concat(boxs_lst, axis=0) #以下皆有原来的维度\n box_clss = tf.concat(box_clss_lst, axis=0)\n box_prbs = tf.concat(box_prbs_lst, axis=0)\n box_msks = tf.concat(box_msks_lst, axis=0)\n box_nums = tf.concat(box_nums_lst, axis=0)\n #tf.summary.scalar(\"loss\", loss)\n #tf.summary.scalar(\"acc\", acc)\n #for grad, var in grads:\n # if grad is not None:\n # tf.summary.histogram(\"gradients_on_average/%s\" % var.op.name, grad)\n #for var in tf.trainable_variables():\n # tf.summary.histogram(var.op.name, var)\n return tra_opa, loss, boxs, box_clss, box_prbs, box_msks, box_nums, gbxs, gmks, gbx_nums\n \n \"\"\"\n ###############################For CLASSIFY################################\n def _train_step(self, mtra=None, mtst=None, itr_per_epc=None, glb_stp=None):\n #将简单的运算放在CPU上,只有神经网络的训练过程放在GPU上\n with tf.device(\"/cpu:0\"):\n \n GI_tra = GeneratorForImageNet(mod_tra=True, fil_nam='train', bat_siz=self.bat_siz_all, min_after_dequeue=250)\n GI_val = GeneratorForImageNet(mod_tra=False, fil_nam='val', bat_siz=self.bat_siz_all, min_after_dequeue=250)\n imgs_tra, lbls_tra = GI_tra.get_input()\n imgs_val, lbls_val = GI_val.get_input()\n imgs = tf.cond(mtst, lambda: imgs_val, lambda: imgs_tra, strict=True)\n lbls = tf.cond(mtst, lambda: lbls_val, lambda: lbls_tra, strict=True)\n #with tf.name_scope(\"input_image\"):\n # tf.summary.image(\"input\", X, 10)\n\n self.opm_cfg[\"decay_step\"] = itr_per_epc * self.epc_per_dca #decay\n tra_stp = update_rule(self.opm_cfg, glb_stp)\n\n self.mdl.mod_tra = True\n self.mdl.glb_pol = True\n\n grds_lst = []\n loss_lst = []\n scrs_lst = []\n for i in range(N_GPU):\n with tf.device(\"/gpu:%d\" % i):\n with tf.name_scope(\"GPU_%d\" % i) as scp:\n sta = i * self.bat_siz\n end = (i+1) * self.bat_siz\n loss, scrs = \\\n self.mdl.forward(imgs=imgs[sta:end], lbls=lbls[sta:end], gbxs=None, gbx_nums=None, mtra=mtra, scp=scp)\n\n #在第一次声明变量之后,将控制变量重用的参数设置为True。这样可以让不同的GPU更新同一组参数\n #注意tf.name_scope函数并不会影响tf.get_variable的命名空间\n tf.get_variable_scope().reuse_variables()\n #使用当前GPU计算所有变量的梯度\n grds = tra_stp.compute_gradients(loss[0])\n #print(grds)\n grds_lst.append(grds)\n loss_lst.append(loss)\n scrs_lst.append(scrs)\n '''\n with tf.variable_scope(\"average\", reuse = tf.AUTO_REUSE):\n mov_ave = tf.train.ExponentialMovingAverage(self.mov_ave_dca, glb_stp)\n mov_ave_op = mov_ave.apply(tf.trainable_variables())\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mov_ave_op)\n '''\n with tf.variable_scope(\"optimize\", reuse = tf.AUTO_REUSE):\n grds = average_gradients(grds_lst)\n upd_opas = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(upd_opas):\n tra_opa = tra_stp.apply_gradients(grds, global_step=glb_stp)\n\n loss = tf.stack(loss_lst, axis=0)\n scrs = tf.concat(scrs_lst, axis=0)\n loss = tf.reduce_mean(loss, axis=0)\n #tf.summary.scalar(\"loss\", loss)\n #tf.summary.scalar(\"acc\", acc)\n #for grad, var in grads:\n # if grad is not None:\n # tf.summary.histogram(\"gradients_on_average/%s\" % var.op.name, grad)\n #for var in tf.trainable_variables():\n # tf.summary.histogram(var.op.name, var)\n return tra_opa, loss, scrs, lbls\n \"\"\"\n \n #################################For FCIS##################################\n def _test_step(self, imgs=None):\n \n with tf.device(\"/cpu:0\"):\n \n mtra = tf.constant(False, dtype=tf.bool)\n self.mdl.mod_tra = False\n self.mdl.glb_pol = False\n self.mdl.inc_btm = True\n \n boxs_lst = []\n box_clss_lst = []\n box_prbs_lst = []\n box_msks_lst = []\n box_nums_lst = []\n for i in range(N_GPU):\n with tf.device(\"/gpu:%d\" % i):\n #with tf.device(\"/cpu:0\"):\n with tf.name_scope(\"GPU_%d\" % i) as scp:\n sta = i * self.bat_siz\n end = (i+1) * self.bat_siz\n boxs, box_clss, box_prbs, box_msks, box_nums = \\\n self.mdl.forward(imgs[sta:end], None, None, None, None, mtra, scp)\n #在第一次声明变量之后,将控制变量重用的参数设置为True。这样可以让不同的GPU更新同一组参数\n #注意tf.name_scope函数并不会影响tf.get_variable的命名空间\n tf.get_variable_scope().reuse_variables()\n boxs_lst .append(boxs )\n box_clss_lst.append(box_clss)\n box_prbs_lst.append(box_prbs)\n box_msks_lst.append(box_msks)\n box_nums_lst.append(box_nums)\n boxs = tf.concat(boxs_lst, axis=0) #以下皆有原来的维度\n box_clss = tf.concat(box_clss_lst, axis=0)\n box_prbs = tf.concat(box_prbs_lst, axis=0)\n box_msks = tf.concat(box_msks_lst, axis=0)\n box_nums = tf.concat(box_nums_lst, axis=0)\n return boxs, box_clss, box_prbs, box_msks, box_nums\n \n \n def concat(self, sess=None, fetches=None, feed_dict=None, itr_num=None, gen=None, tsrs=None, keps=None):\n \n rsts_lst = [[] for _ in range(len(fetches))]\n if keps != None:\n rsts_kep = [[] for _ in range(len(keps))]\n for _ in range(itr_num):\n if gen != None:\n feds = next(gen)\n for i, tsr in enumerate(tsrs):\n feed_dict[tsr] = feds[i]\n for i, kep in enumerate(keps):\n rsts_kep[i].append(feds[kep])\n rsts = sess.run(fetches, feed_dict=feed_dict)\n for i, rst in enumerate(rsts):\n rsts_lst[i].append(rst)\n for i, rst in enumerate(rsts_lst):\n rsts_lst[i] = np.concatenate(rst, axis=0)\n if keps != None:\n for i, rst in enumerate(rsts_kep):\n rsts_kep[i] = np.concatenate(rst, axis=0)\n return rsts_lst, rsts_kep\n else:\n return rsts_lst\n \n \n def merge(self, rsts=None, rst_nums=None):\n \n rst_imxs = []\n rsts_lst = [[] for _ in range(len(rsts))]\n for i, rst_num in enumerate(rst_nums): #batch\n rst_imxs.extend([i]*rst_num)\n for j, rst in enumerate(rsts): #tensors\n rsts_lst[j].append(rst[i][:rst_num])\n rst_imxs = np.asarray(rst_imxs, dtype=np.int32)\n for i, rst in enumerate(rsts_lst):\n rsts_lst[i] = np.concatenate(rst, axis=0)\n return rsts_lst, rst_imxs\n \n \n #################################For FCIS##################################\n def train(self):\n \n itr_per_epc = max(TRA_NUM // self.bat_siz_all, 1)\n if TRA_NUM % self.bat_siz_all != 0:\n itr_per_epc += 1\n tra_itr_num = self.epc_num * itr_per_epc\n \n val_itr_num = max(VAL_NUM // self.bat_siz_all, 1)\n if VAL_NUM % self.bat_siz_all != 0:\n val_itr_num += 1\n\n tf.reset_default_graph()\n mtra = tf.placeholder(dtype=tf.bool, name=\"train\")\n mtst = tf.placeholder(dtype=tf.bool, name=\"test\")\n glb_stp = tf.Variable(0, trainable=False, name=\"global_step\", dtype=tf.int64)\n \n tra_opa, loss, boxs, box_clss, box_prbs, box_msks, box_nums, gbxs, gmks, gbx_nums = \\\n self._train_step(mtra, mtst, itr_per_epc, glb_stp)\n #tf.summary.scalar('loss', loss)\n #summary_op = tf.summary.merge_all()\n #summary_loss = tf.summary.merge(loss)\n #writer = tf.summary.FileWriter(LOG_PATH, sess.graph, flush_secs=5) #tf.get_default_graph() \n #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = 0.8)\n #config = tf.ConfigProto(log_device_placement = False, allow_soft_placement = True, gpu_options = gpu_options)\n config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)\n with tf.Session(config=config) as sess:\n \n init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())\n sess.run(init_op)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess = sess, coord = coord)\n saver = tf.train.Saver()\n \"\"\"\n ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH+\"/222\")\n if ckpt and ckpt.model_checkpoint_path:\n var = tf.global_variables()\n var = [v for v in var if \"resnet_block2_0_0/\" in v.name or \"resnet_block2_0_1/\" in v.name \\\n or \"resnet_block2_0_2/\" in v.name or \"layers_module1_0/\" in v.name or \"layers_module1_2/\" in v.name \\\n or \"layers_module1_3/\" in v.name or \"layers_module1_4/\" in v.name]\n saver = tf.train.Saver(var)\n saver.restore(sess, ckpt.model_checkpoint_path)\n ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH+\"/333\")\n if ckpt and ckpt.model_checkpoint_path:\n var = tf.global_variables()\n var = [v for v in var if \"resnet_block2_0_3/\" in v.name]\n var = [v for v in var if \"average/\" not in v.name and \"optimize/\" not in v.name]\n saver = tf.train.Saver(var)\n saver.restore(sess, ckpt.model_checkpoint_path)\n saver = tf.train.Saver()\n \"\"\"\n saver = tf.train.Saver()\n ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)\n if ckpt and ckpt.model_checkpoint_path:\n '''\n var = tf.global_variables()\n mydict = load_weights()\n mykeys = mydict.keys()\n for i, v in enumerate(var):\n if v.name in mykeys:\n sess.run(tf.assign(v, mydict[v.name], validate_shape=True, use_locking=True))\n else:\n print(v.name)\n saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=glb_stp)\n return\n '''\n #var = tf.global_variables()\n #var = [v for v in var if \"layers_module1_0/\" in v.name or \"layers_module1_1/\" in v.name]\n #var = [v for v in var if \"average/\" not in v.name and \"optimize/\" not in v.name]\n #var_ave = tf.train.ExponentialMovingAverage(self.mv_ave_decay, glb_stp)\n #var = var_ave.variables_to_restore()\n #saver = tf.train.Saver(var)\n saver.restore(sess, ckpt.model_checkpoint_path)\n saver = tf.train.Saver()\n \n #初始化box评估类\n BT = BboxesTargetLayer()\n \n with open(os.path.join(LOG_PATH1, \"loss\"), 'ab') as fid_tra_loss, \\\n open(os.path.join(LOG_PATH1, \"accs\"), 'ab') as fid_tra_accs, \\\n open(os.path.join(LOG_PATH2, \"accs\"), 'ab') as fid_val_accs:\n \n for t in range(tra_itr_num): \n epc_end = (t + 1) % itr_per_epc == 0\n if epc_end:\n self.epc_cnt += 1\n itr_sta = (t == 0)\n itr_end = (t == tra_itr_num - 1)\n\n #_, summary, loss1, = sess.run([train_op, summary_op, loss], feed_dict = {mtrain: True})\n #writer.add_summary(summary, global_step=glb_stp.eval())\n _, loss_kep = sess.run([tra_opa, loss], feed_dict={mtra: True, mtst: False})\n \n if self.prt_ena and t % self.itr_per_prt == 0:\n pickle.dump(loss_kep, fid_tra_loss, pickle.HIGHEST_PROTOCOL)\n print('(Iteration %d / %d) losses: %s' % (t + 1, tra_itr_num, str(loss_kep)))\n\n #if itr_sta or itr_end or epc_end:\n if itr_end or epc_end: \n saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=glb_stp)\n ###Get the training accuracy###\n print('Get the training accuracy!')\n fetches = [boxs, box_clss, box_prbs, box_msks, box_nums, gbxs, gmks, gbx_nums]\n feed_dict = {mtra: False, mtst: False}\n boxs_kep, box_clss_kep, box_prbs_kep, box_msks_kep, box_nums_kep, gbxs_kep, gmks_kep, gbx_nums_kep = \\\n self.concat(sess, fetches, feed_dict, val_itr_num)\n [boxs_kep, box_clss_kep, box_prbs_kep, box_msks_kep], box_imxs = \\\n self.merge([boxs_kep, box_clss_kep, box_prbs_kep, box_msks_kep], box_nums_kep)\n [gbxs_kep, gmks_kep], gbx_imxs = \\\n self.merge([gbxs_kep, gmks_kep], gbx_nums_kep)\n boxs_kep = (boxs_kep, box_clss_kep, box_prbs_kep, box_imxs)\n gbxs_kep = (gbxs_kep[:, :-1], gbxs_kep[:, -1], gbx_imxs)\n '''\n gbxs_kep = (gbxs_kep[:, :-1], gbxs_kep[:, -1]>0, gbx_imxs)\n '''\n #print(boxs_kep)\n #print(gbxs_kep)\n rsts = BT.generate_boxs_pre(boxs_kep, gbxs_kep)\n pickle.dump(rsts, fid_tra_accs, pickle.HIGHEST_PROTOCOL)\n print('')\n ###Get the validaiton accuracy###\n print('Get the validaiton accuracy!')\n fetches = [boxs, box_clss, box_prbs, box_msks, box_nums, gbxs, gmks, gbx_nums]\n feed_dict = {mtra: False, mtst: True}\n boxs_kep, box_clss_kep, box_prbs_kep, box_msks_kep, box_nums_kep, gbxs_kep, gmks_kep, gbx_nums_kep = \\\n self.concat(sess, fetches, feed_dict, val_itr_num)\n [boxs_kep, box_clss_kep, box_prbs_kep, box_msks_kep], box_imxs = \\\n self.merge([boxs_kep, box_clss_kep, box_prbs_kep, box_msks_kep], box_nums_kep)\n [gbxs_kep, gmks_kep], gbx_imxs = \\\n self.merge([gbxs_kep, gmks_kep], gbx_nums_kep)\n boxs_kep = (boxs_kep, box_clss_kep, box_prbs_kep, box_imxs)\n gbxs_kep = (gbxs_kep[:, :-1], gbxs_kep[:, -1], gbx_imxs)\n '''\n gbxs_kep = (gbxs_kep[:, :-1], gbxs_kep[:, -1]>0, gbx_imxs)\n '''\n #print(boxs_kep)\n #print(gbxs_kep)\n rsts = BT.generate_boxs_pre(boxs_kep, gbxs_kep)\n pickle.dump(rsts, fid_val_accs, pickle.HIGHEST_PROTOCOL)\n print('')\n coord.request_stop()\n coord.join(threads)\n \n \"\"\"\n #####################################For CLASSIFY#####################################\n def train(self):\n \n itr_per_epc = max(TRA_NUM // self.bat_siz_all, 1)\n if TRA_NUM % self.bat_siz_all != 0:\n itr_per_epc += 1\n tra_itr_num = self.epc_num * itr_per_epc\n \n val_itr_num = max(VAL_NUM // self.bat_siz_all, 1)\n if VAL_NUM % self.bat_siz_all != 0:\n val_itr_num += 1\n\n tf.reset_default_graph()\n mtra = tf.placeholder(dtype=tf.bool, name=\"train\")\n mtst = tf.placeholder(dtype=tf.bool, name=\"test\")\n glb_stp = tf.Variable(0, trainable=False, name=\"global_step\", dtype=tf.int64)\n \n tra_opa, loss, scrs, lbls = \\\n self._train_step(mtra, mtst, itr_per_epc, glb_stp)\n \n scrs_tmp = tf.placeholder(dtype=tf.float32, name=\"scrs_tmp\")\n lbls_tmp = tf.placeholder(dtype=tf.int32, name=\"lbls_tmp\")\n acc_top1 = tf.reduce_mean(tf.cast(tf.nn.in_top_k(scrs_tmp, lbls_tmp, k=1), tf.float32))\n acc_top5 = tf.reduce_mean(tf.cast(tf.nn.in_top_k(scrs_tmp, lbls_tmp, k=5), tf.float32))\n #tf.summary.scalar('loss', loss)\n #summary_op = tf.summary.merge_all()\n #summary_loss = tf.summary.merge(loss)\n #writer = tf.summary.FileWriter(LOG_PATH, sess.graph, flush_secs=5) #tf.get_default_graph() \n #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = 0.8)\n #config = tf.ConfigProto(log_device_placement = False, allow_soft_placement = True, gpu_options = gpu_options)\n #config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True, device_count={\"CPU\": 2}, \\\n # inter_op_parallelism_threads=16, intra_op_parallelism_threads=16)\n config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)\n with tf.Session(config=config) as sess:\n \n init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())\n sess.run(init_op)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n \n saver = tf.train.Saver()\n ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)\n if ckpt and ckpt.model_checkpoint_path:\n \n var = tf.global_variables()\n mydict = load_weights()\n mykeys = mydict.keys()\n for i, v in enumerate(var):\n if v.name in mykeys:\n sess.run(tf.assign(v, mydict[v.name], validate_shape=True, use_locking=True))\n else:\n print(v.name)\n saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=glb_stp)\n return\n \n #var = tf.global_variables()\n #var = [v for v in var if \"layers_module1_0/\" in v.name or \"layers_module1_1/\" in v.name]\n #var = [v for v in var if \"average/\" not in v.name and \"optimize/\" not in v.name]\n #var = [v for v in var if \"_myprd2\" not in v.name and \"optimize/\" not in v.name]\n #var_ave = tf.train.ExponentialMovingAverage(self.mv_ave_decay, glb_stp)\n #var = var_ave.variables_to_restore()\n #saver = tf.train.Saver(var)\n saver.restore(sess, ckpt.model_checkpoint_path)\n saver = tf.train.Saver()\n\n with open(os.path.join(LOG_PATH1, \"loss\"), 'ab') as fid_tra_loss, \\\n open(os.path.join(LOG_PATH1, \"accs\"), 'ab') as fid_tra_accs, \\\n open(os.path.join(LOG_PATH2, \"accs\"), 'ab') as fid_val_accs:\n \n for t in range(tra_itr_num):\n epc_end = (t + 1) % itr_per_epc == 0\n if epc_end:\n self.epc_cnt += 1\n itr_sta = (t == 0)\n itr_end = (t == tra_itr_num - 1)\n\n #_, summary, loss1, = sess.run([train_op, summary_op, loss], feed_dict = {mtrain: True})\n #writer.add_summary(summary, global_step=glb_stp.eval())\n _, loss_kep = sess.run([tra_opa, loss], feed_dict = {mtra: True, mtst: False})\n \n if self.prt_ena and t % self.itr_per_prt == 0:\n pickle.dump(loss_kep, fid_tra_loss, pickle.HIGHEST_PROTOCOL)\n print('(Iteration %d / %d) losses: %s' % (t + 1, tra_itr_num, str(loss_kep)))\n\n #if itr_sta or itr_end or epc_end:\n if itr_end or epc_end: \n saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=glb_stp)\n \n fetches = [scrs, lbls]\n feed_dict = {mtra: False, mtst: False}\n tra_scrs, tra_lbls = self.concat(sess, fetches, feed_dict, val_itr_num)\n fetches = [scrs, lbls]\n feed_dict = {mtra: False, mtst: True}\n val_scrs, val_lbls = self.concat(sess, fetches, feed_dict, val_itr_num)\n \n tra_acc_top1, tra_acc_top5 = \\\n sess.run([acc_top1, acc_top5], feed_dict={scrs_tmp: tra_scrs, lbls_tmp:tra_lbls})\n val_acc_top1, val_acc_top5 = \\\n sess.run([acc_top1, acc_top5], feed_dict={scrs_tmp: val_scrs, lbls_tmp:val_lbls}) \n pickle.dump([tra_acc_top1, tra_acc_top5], fid_tra_accs, pickle.HIGHEST_PROTOCOL)\n pickle.dump([val_acc_top1, val_acc_top5], fid_val_accs, pickle.HIGHEST_PROTOCOL)\n if self.prt_ena:\n print('(Epoch %d / %d) tra_acc1: %f, tra_acc5: %f, val_acc1: %f, val_acc5: %f'\\\n % (self.epc_cnt, self.epc_num, tra_acc_top1, tra_acc_top5, val_acc_top1, val_acc_top5))\n coord.request_stop()\n \"\"\"\n \n #################################For DSSD##################################\n def test(self):\n global TST_NUM\n #GC = GeneratorForCOCO(mod_tra=False, fil_nam='train', bat_siz=self.bat_siz_all, min_after_dequeue=200)\n GV = GeneratorForVOC (mod_tra=False, fil_nam='train', bat_siz=self.bat_siz_all, min_after_dequeue=200)\n cat_idx_to_cls_nam = GV.cat_idx_to_cls_nam\n img_siz_max = GV.img_siz_max\n max_num = GV.max_num\n if TST_NUM == None: TST_NUM = GV.img_num_tst\n print(\"There are {:d} pictures to test!\".format(TST_NUM))\n tst_itr_num = max(TST_NUM // self.bat_siz_all, 1)\n if TST_NUM % self.bat_siz_all != 0:\n tst_itr_num += 1\n \n tf.reset_default_graph()\n imgs = tf.placeholder(dtype=tf.float32, shape=[self.bat_siz_all, img_siz_max, img_siz_max, 3], name=\"images\")\n img_hgts_ = tf.placeholder(dtype=tf.int32, shape=[self.bat_siz_all], name=\"img_hgts_\")\n img_wdhs_ = tf.placeholder(dtype=tf.int32, shape=[self.bat_siz_all], name=\"img_wdhs_\")\n \n glb_stp = tf.Variable(0, trainable=False, name=\"global_step\", dtype=tf.int64)\n \n boxs, box_clss, box_prbs, box_msks, box_nums = self._test_step(imgs)\n\n #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = 0.8)\n #config = tf.ConfigProto(log_device_placement = False, allow_soft_placement = True, gpu_options = gpu_options)\n config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)\n with tf.Session(config=config) as sess:\n \n init_op = (tf.global_variables_initializer(), tf.local_variables_initializer())\n sess.run(init_op)\n \n saver = tf.train.Saver()\n ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)\n if ckpt and ckpt.model_checkpoint_path:\n #var = tf.global_variables()\n #var_ave = tf.train.ExponentialMovingAverage(self.mv_ave_decay, glb_stp)\n #var_to_restore = var_ave.variables_to_restore()\n #saver = tf.train.Saver(var_to_restore)\n saver.restore(sess, ckpt.model_checkpoint_path)\n saver = tf.train.Saver()\n else:\n print(\"No checkpoint file found!\")\n return\n \n with open(os.path.join(LOG_PATH3, \"imgs\"), 'wb') as fid_tst_imgs, \\\n open(os.path.join(LOG_PATH3, \"boxs\"), 'wb') as fid_tst_boxs, \\\n open(os.path.join(LOG_PATH3, \"msks\"), 'wb') as fid_tst_msks:\n '''\n sri = ['ID', 'PATH', 'TYPE', 'SCORE', 'XMIN', 'YMIN', 'XMAX', 'YMAX']\n sri = \",\".join(sri)\n sri = sri + '\\n'\n fid_tst_rsts.writelines([sri])\n '''\n fetches = [boxs, box_clss, box_prbs, box_msks, box_nums]\n feed_dict = {}\n [boxs_kep, box_clss_kep, box_prbs_kep, box_msks_kep, box_nums_kep],[img_nams, img_wdws, img_hgts_, img_wdhs_] = \\\n self.concat(sess, fetches, feed_dict, tst_itr_num, GV.get_input2(sess), [imgs], [1, 2, 3, 4])\n [boxs_kep, box_clss_kep, box_prbs_kep, box_msks_kep], box_imxs = \\\n self.merge([boxs_kep, box_clss_kep, box_prbs_kep, box_msks_kep], box_nums_kep)\n \n for i, img_nam in enumerate(img_nams):\n img_wdw = img_wdws [i]\n img_hgt_ = img_hgts_[i]\n img_wdh_ = img_wdhs_[i]\n idxs = np.where(box_imxs==i)[0]\n if len(idxs) == 0:\n print('There is no boxs for image %s' %(img_nam))\n continue\n boxs = boxs_kep [idxs]\n box_clss = box_clss_kep[idxs]\n box_prbs = box_prbs_kep[idxs]\n box_msks = box_msks_kep[idxs]\n _, boxs, msks = GV.recover_instances(None, boxs, box_msks, img_wdw, img_hgt_, img_wdh_)\n box_clss = box_clss[:, np.newaxis].astype(dtype=np.float32, copy=False)\n box_prbs = box_prbs[:, np.newaxis]\n boxs = np.concatenate([boxs, box_clss, box_prbs], axis=-1)\n pickle.dump(img_nam, fid_tst_imgs, pickle.HIGHEST_PROTOCOL)\n pickle.dump(boxs, fid_tst_boxs, pickle.HIGHEST_PROTOCOL)\n pickle.dump(msks, fid_tst_msks, pickle.HIGHEST_PROTOCOL)\n '''\n box_idxs = np.arange(len(boxs)) + 1\n for i, box in enumerate(boxs):\n box_ymn, box_xmn, box_ymx, box_xmx = box\n box_idx = box_idxs[i]\n box_cls = box_clss[i]\n box_cls = cat_idx_to_cls_nam[box_cls]\n box_prb = box_prbs[i]\n sri = [str(box_idx), img_nam, box_cls, str(box_prb), \\\n str(box_xmn), str(box_ymn), str(box_xmx), str(box_ymx)]\n sri = \",\".join(sri)\n sri = sri + '\\n'\n fid_tst_boxs.writelines([sri])\n '''\n \n def display_detections(self, show=True, save=True):\n \n #GC = GeneratorForCOCO(mod_tra=False, fil_nam='train', bat_siz=self.bat_siz_all, min_after_dequeue=500)\n GV = GeneratorForVOC (mod_tra=False, fil_nam='train', bat_siz=self.bat_siz_all, min_after_dequeue=500)\n cat_nam_to_cls_idx = GV.cat_nam_to_cls_idx\n \n with open(os.path.join(LOG_PATH3, \"imgs\"), 'rb') as fid_tst_imgs, \\\n open(os.path.join(LOG_PATH3, \"boxs\"), 'rb') as fid_tst_boxs, \\\n open(os.path.join(LOG_PATH3, \"msks\"), 'rb') as fid_tst_msks:\n \n while True:\n try: \n img_nam = pickle.load(fid_tst_imgs)\n boxs = pickle.load(fid_tst_boxs)\n msks = pickle.load(fid_tst_msks)\n img_fil = os.path.join(\"Mybase/datasets/test\", img_nam)\n img = cv2.imread(img_fil)\n if type(img) != np.ndarray:\n print(\"Failed to find image %s\" %(img_fil))\n continue\n img_hgt, img_wdh = img.shape[0], img.shape[1]\n if img.size == img_hgt * img_wdh:\n print ('Gray Image %s' %(img_fil))\n img_zro = np.empty((img_hgt, img_wdh, 3), dtype=np.uint8)\n img_zro[:, :, :] = img[:, :, np.newaxis]\n img = img_zro\n assert img.size == img_wdh * img_hgt * 3, '%s' % str(self.get_idx)\n img = img[:, :, ::-1]\n box_prbs = boxs[:, -1]\n box_clss = boxs[:, -2].astype(dtype=np.int32, copy=False)\n boxs = boxs[:, :-2]\n GV.display_instances(img, boxs, box_clss, box_prbs, msks, show, save, img_nam, \"Mybase/datasets/result\")\n except EOFError: \n return\n \n \n def show_loss_acc(self):\n\n with open(os.path.join(LOG_PATH1, \"loss\"), 'rb') as fid_train_loss, \\\n open(os.path.join(LOG_PATH1, \"mAP\"), 'rb') as fid_train_mAP, \\\n open(os.path.join(LOG_PATH2, \"mAP\"), 'rb') as fid_val_mAP:\n \n loss_history = get_all_data(fid_train_loss)\n train_acc_history = get_all_data(fid_train_mAP)\n val_acc_history = get_all_data(fid_val_mAP)\n\n plt.figure(1)\n\n plt.subplot(2, 1, 1)\n plt.title('Training loss')\n plt.xlabel('Iteration')\n\n plt.subplot(2, 1, 2)\n plt.title('accuracy')\n plt.xlabel('Epoch')\n \n #plt.subplot(3, 1, 3)\n #plt.title('Validation accuracy')\n #plt.xlabel('Epoch')\n \n plt.subplot(2, 1, 1)\n plt.plot(loss_history, 'o')\n\n plt.subplot(2, 1, 2)\n plt.plot(train_acc_history, '-o', label=\"train_acc\")\n plt.plot(val_acc_history, '-o', label=\"val_acc\")\n\n for i in [1, 2]:\n plt.subplot(2, 1, i)\n plt.legend(loc='upper center', ncol=4)\n\n plt.gcf().set_size_inches(15, 15)\n \n plt.show()\n \n def show_image_with_bbox(self, thresh = 0.8, num_classes = 21, im_h = 512, im_w = 512):\n \n classes = ('__background__', 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')\n ind_to_class = dict(zip(range(num_classes), classes))\n \n with open(os.path.join(LOG_PATH3, \"bbox\"), 'rb') as fid_test_bbox, \\\n open(os.path.join(LOG_PATH3, \"image\"), 'rb') as fid_test_image:\n \n bboxes_all = get_all_data(fid_test_bbox)\n images_all = get_all_data(fid_test_image)\n\n num_iter = int(len(images_all))\n print(\"The total num of iterations is %d\" %(num_iter))\n\n count = 1\n \n for it in range(num_iter):\n \n bboxes_gpu = bboxes_all[it]\n images_gpu = images_all[it]\n num_gpu = len(images_gpu)\n print(\"The total num of gpus is %d\" %(num_gpu))\n\n for gpu in range(num_gpu):\n \n bboxes_batch = bboxes_gpu[gpu]\n images_batch = images_gpu[gpu]\n num_batch = len(images_batch)\n print(\"The test batch size is %d\" %(num_batch))\n\n for bt in range(num_batch):\n \n bboxes_class = bboxes_batch[bt]\n image = images_batch[bt]\n\n plt.figure(count)\n plt.cla()\n plt.imshow(image)\n count += 1\n \n for cls in range(1, num_classes):\n \n bboxes = bboxes_class[cls]\n num_box = len(bboxes)\n if(num_box == 0):\n continue\n class_name = ind_to_class[cls]\n print(\"The class {:s} has {:d} bboxes\".format(class_name, num_box))\n \n for i in range(num_box):\n \n score = bboxes[i, 4]\n if score > thresh:\n plt.gca().add_patch(\n plt.Rectangle((bboxes[i, 1]*im_w, bboxes[i, 0]*im_h),\n (bboxes[i, 3] - bboxes[i, 1])*im_w,\n (bboxes[i, 2] - bboxes[i, 0])*im_h, fill=False,\n edgecolor='g', linewidth=3)\n )\n\n plt.gca().text(bboxes[i, 1]*im_w, bboxes[i, 0]*im_h - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n print('')\n #plt.title('{} {:.3f}'.format(class_name, score))\n plt.show()\n" ]
[ [ "tensorflow.train.start_queue_runners", "tensorflow.train.get_checkpoint_state", "numpy.where", "matplotlib.pyplot.gcf", "tensorflow.stack", "tensorflow.control_dependencies", "tensorflow.local_variables_initializer", "tensorflow.global_variables_initializer", "numpy.concatenate", "numpy.empty", "tensorflow.concat", "tensorflow.Variable", "tensorflow.train.Saver", "tensorflow.ConfigProto", "tensorflow.constant", "tensorflow.variable_scope", "tensorflow.get_variable_scope", "matplotlib.pyplot.gca", "tensorflow.get_collection", "matplotlib.pyplot.subplot", "tensorflow.train.Coordinator", "tensorflow.Session", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "tensorflow.placeholder", "tensorflow.name_scope", "matplotlib.pyplot.show", "numpy.asarray", "tensorflow.cond", "matplotlib.pyplot.xlabel", "tensorflow.reset_default_graph", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.cla", "tensorflow.device", "matplotlib.pyplot.Rectangle", "tensorflow.reduce_mean", "matplotlib.pyplot.imshow" ] ]
JIAQING-XIE/Fea2Fea
[ "10ca1dabecff121fdcfccd5c043c8b2984970d35" ]
[ "src/plot_dist.py" ]
[ "import seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\n\ntxt_dir = os.path.join('/home/jiaqing/桌面/Fea2Fea/', 'Result')\n\ndef read_file(dataset = 'planetoid', dataset_name = 'Cora', Property ='Degree', method = 'dist', count_zero = False):\n dir_name = []\n planetoid = ['PubMed','Cora', 'Citeseer'] # Must in this order\n tudataset = ['PROTEINS', 'ENZYMES', 'NCI1']\n if dataset == 'planetoid':\n for name in planetoid:\n dir_name.append(txt_dir + '/Planetoid/' + name + '_property.txt')\n elif dataset == 'tudataset':\n for name in tudataset:\n dir_name.append(txt_dir + '/TUdataset/' + name + '_property.txt')\n\n df = pd.DataFrame()\n\n if dataset == 'planetoid':\n for (element,name) in zip(dir_name,planetoid):\n temp = pd.read_csv(element, sep='\\t')\n #print(temp.describe())\n if Property == 'Degree': \n prop = temp[temp[Property]<16]\n if Property == 'Clustering_coefficient':\n prop = temp[temp[Property]< 0.001]\n if Property == 'Pagerank':\n prop = temp[temp[Property]< 0.0008]\n if Property == 'Aver_path_len':\n prop = temp[temp[Property]< 16]\n prop = prop[Property]\n df[name] = prop\n df.dropna()\n if count_zero and Property == 'Clustering_coefficient':\n total_zero = df[dataset_name].value_counts(0)\n print(\"\\n\")\n\n if dataset == 'tudataset':\n for (element,name) in zip(dir_name,tudataset):\n temp = pd.read_csv(element, sep='\\t')\n #print(temp.describe())\n if Property == 'Degree': \n prop = temp[temp[Property]<16]\n if Property == 'Clustering_coefficient':\n prop = temp[temp[Property]< 2]\n if Property == 'Pagerank':\n prop = temp[temp[Property]< 0.2]\n if Property == 'Aver_path_len':\n prop = temp[temp[Property]< 16]\n prop = prop[Property]\n df[name] = prop\n df.dropna()\n if count_zero and Property == 'Clustering_coefficient':\n total_zero = df[dataset_name].value_counts(0)\n print(\"total nodes with zero clustering coefficient in Cora dataset:{}\".format(total_zero))\n print(\"\\n\")\n\n if method == 'dist': \n plt.figure(figsize=(9,7))\n sns.set(font_scale = 1.5) \n sns.distplot(df[dataset_name], axlabel = Property)\n plt.savefig(txt_dir + '/data distribution/tudataset/' + dataset_name +'_' + Property +'_dis.eps', dpi = 800, format = 'eps', bbox_inches='tight')\n \n return\n df = df.melt(var_name=Property, value_name='value')\n plt.figure(figsize=(10,10))\n sns.set(font_scale = 2) \n sns.violinplot(x=Property, y='value', data=df)\n plt.savefig(txt_dir + '/violin_plot/' + Property +'_' + dataset +'_vp.eps', dpi = 800, format ='eps') # \n\n plt.show()\n\n \nif __name__ == \"__main__\":\n prop = ['Degree', 'Clustering_coefficient', 'Pagerank', 'Aver_path_len']\n planetoid = ['PubMed','Cora', 'Citeseer']\n tudataset = ['PROTEINS', 'ENZYMES', 'NCI1']\n for dataset_name in planetoid: \n for i in prop:\n read_file(dataset = 'planetoid', dataset_name = dataset_name, Property=i, count_zero=True)\n\n \n" ]
[ [ "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
roger-yu-ds/iLab1
[ "17561b2f27fd972644387a7308875245454160c6" ]
[ "UTILS/utils.py" ]
[ "# This script is used to load the data\r\n\r\nfrom pathlib import WindowsPath\r\nfrom pathlib import Path\r\nimport pandas as pd\r\nimport xlwings as xw\r\nimport datetime\r\nimport pickle\r\nfrom typing import Tuple\r\nfrom typing import Dict\r\nfrom typing import List\r\nfrom collections import defaultdict\r\nimport spacy\r\nfrom UTILS import feature_extraction\r\nimport numpy as np\r\n\r\ndef cl_or_wf(filename: str) -> str:\r\n \"\"\"Determines if the dataframe is WF or CL\"\"\"\r\n \r\n if 'CL' in filename.upper():\r\n result = 'CL'\r\n elif 'WF' in filename.upper():\r\n result = 'WF'\r\n else:\r\n result = input(f'There is no \"CL\" or \"WF\" in the name {filename}, please enter one: ').upper()\r\n \r\n return result\r\n\r\n\r\ndef convert_excel_float_to_date(excel_float: float):\r\n \"\"\"Fixes the dtype from float to date\"\"\"\r\n \r\n if excel_float is not None:\r\n excel_start_date = datetime.datetime(year = 1900, month = 1, day = 1)\r\n days = datetime.timedelta(days=excel_float)\r\n adjustment = datetime.timedelta(days = -2)\r\n\r\n result = excel_start_date + days + adjustment\r\n \r\n else:\r\n result = None\r\n \r\n return result\r\n\r\n\r\ndef define_dtypes(column: pd.Series, \r\n service: str, \r\n col_dtypes: pd.DataFrame) -> pd.Series:\r\n \r\n \"\"\"Defines the column dtypes of the dataframe\"\"\"\r\n \r\n col_dtypes = col_dtypes.query('service == @service')\r\n column_name = column.name\r\n col_dtypes.set_index(keys = ['service', 'field'], inplace = True)\r\n col_type = col_dtypes.loc[(service, column_name), 'type']\r\n \r\n if col_type is None:\r\n result = column\r\n elif col_type == 'datetime64':\r\n if column.dtype == 'float64':\r\n result = column.apply(convert_excel_float_to_date)\r\n else:\r\n result = column\r\n elif col_type == 'ordinal':\r\n result = pd.Categorical(column, ordered = True)\r\n elif col_type == 'list':\r\n result = column.str.split(',')\r\n elif col_type == 'int64':\r\n result = column.fillna(0).astype(col_type)\r\n elif col_type == 'str':\r\n result = column.fillna('').astype(col_type)\r\n else:\r\n result = column.astype(col_type)\r\n \r\n return result\r\n \r\n\r\ndef main(data_path: str, column_dtypes_path: str = None):\r\n \r\n df_dict = {}\r\n for filename in Path(data_path).glob('*.xlsx'):\r\n print(f'Opening {filename.name}')\r\n wb = xw.Book(str(filename))\r\n service = cl_or_wf(filename.name)\r\n df_dict[service] = wb.sheets[0].used_range.options(pd.DataFrame, index = False, header = True).value\r\n \r\n wb.app.quit()\r\n \r\n # Update the column dtypes\r\n if column_dtypes_path is None:\r\n column_dtypes_path = Path.cwd().joinpath('CONFIG').joinpath('column_types.csv')\r\n print(f'Using the default column types {column_dtypes_path}.')\r\n \r\n column_dtypes_df = pd.read_csv(column_dtypes_path)\r\n \r\n for key, value in df_dict.items():\r\n df_dict[key] = value.apply(lambda x: define_dtypes(x, key, column_dtypes_df))\r\n \r\n \r\n return df_dict\r\n\r\n\r\ndef save_object(obj, filename: str, output_dir: WindowsPath):\r\n \"\"\"Saves the obj as a pickle file in the output_dir as filename\r\n \r\n Keyword arguments\r\n obj -- the object to be saved\r\n filename -- the name of the file\r\n output_dir -- a pathlib.WindowsPath object into which the pickle file will \r\n be saved\r\n \"\"\"\r\n \r\n path = output_dir.joinpath(filename)\r\n \r\n print(f'Pickling to {path}.')\r\n \r\n with open(path, 'wb') as outfile:\r\n pickle.dump(obj, outfile)\r\n \r\n#TODO\r\n# Handle existing columns\r\ndef add_column(\r\n df: pd.DataFrame, \r\n data_dir: WindowsPath,\r\n filename: str):\r\n \"\"\"Joins a pickled objects (in /OUTPUT/columns) to the right of \r\n the DataFrame; join on the index\r\n \r\n Keyword arguments\r\n df -- the dataframe of the student data\r\n path -- a WindowsPath object \r\n filename -- a string containing the name of the file\r\n \"\"\"\r\n df_copy = df.copy(deep=True)\r\n\r\n s = load_column(data_dir, filename)\r\n # Drop duplicates and keep the first one\r\n s = s[~s.index.duplicated(keep='last')]\r\n \r\n # If the input is a series\r\n if isinstance(s, pd.core.frame.Series):\r\n new_cols = [s.name]\r\n elif isinstance(s, pd.core.frame.DataFrame):\r\n new_cols = s.columns.tolist()\r\n \r\n if all(col in df_copy.columns for col in new_cols):\r\n print(f'{new_cols} already exists.')\r\n else: \r\n s.name = filename\r\n \r\n df_copy = df_copy.join(s)\r\n \r\n return df_copy\r\n\r\n\r\ndef load_column(\r\n path: WindowsPath,\r\n filename: str):\r\n \"\"\"Returns a pickled Series filename path\r\n \r\n Keyword arguments\r\n path -- a WindowsPath object \r\n filename -- a string containing the name of the file\r\n \"\"\"\r\n \r\n with open(path.joinpath(filename), 'rb') as infile:\r\n s = pickle.load(infile)\r\n \r\n s.name = filename\r\n \r\n return s\r\n\r\n\r\ndef fix_column_ratings(df: pd.DataFrame):\r\n \"\"\"Updates the low student ratings for those with the intent of \r\n PRESSED_WRONG_BUTTON, because they intended to rate highly instead, which\r\n was confirmed by the comments. The mapping 1 → 5, 2 → 4.\r\n Saves output to disk.\r\n \r\n Keyword arguments\r\n df -- the dataframe of the student data\r\n \"\"\"\r\n \r\n query = 'luis_intent_pickle == \"PRESSED_WRONG_BUTTON\" and student_rating < 3'\r\n df.loc[df.query(query).index, 'student_rating_fixed'] = df.query(query)['student_rating'].map({1:5, 2:4, 3:3})\r\n \r\n # Save to disk\r\n path = Path.cwd().joinpath('OUTPUT').joinpath('student_rating_fixed')\r\n with open(path, 'wb') as out:\r\n pickle.dump(df.student_rating_fixed, out)\r\n \r\n \r\ndef get_seconds_from_timedelta(td):\r\n \"\"\"Convert pandas._libs.tslibs.timedeltas.Timedelta to seconds\r\n \r\n Keyword arguments\r\n td -- a pandas._libs.tslibs.timedeltas.Timedelta object\r\n \"\"\"\r\n \r\n unit = td.days * 24 * 3600 + td.seconds\r\n return unit\r\n\r\n\r\ndef rename_columns(df: pd.DataFrame, service: str, config_df: pd.DataFrame) -> pd.DataFrame:\r\n \"\"\"Rename the columns of the dataframe according to the config file\r\n \r\n Keyword arguments\r\n df -- pd.DataFrame of the data set\r\n service -- \"wf\" or \"cl\"\r\n config_df -- mapping of original and new column names\r\n \"\"\"\r\n \r\n service = service.lower()\r\n \r\n # Drop the columns that don't have the new name\r\n config_df = config_df.query('service == @service').dropna(subset = ['new_column_name'], how='any', axis='rows')\r\n \r\n # Make a dictionary of {original_column_name: new_column_name}\r\n rename_dict = {pair[0]:pair[1] for pair in config_df[['original_column_name', 'new_column_name']].to_dict(orient='split')['data']}\r\n \r\n return df.rename(columns=rename_dict)\r\n\r\n\r\ndef load_data(\r\n data_dir: WindowsPath,\r\n filename: str = None\r\n ) -> Tuple[pd.DataFrame, pd.DataFrame]:\r\n \"\"\"Loading the two data sets from xlsx files.\r\n \r\n Keyword arguments\r\n data_dir -- a pathlib.WindowsPath object to the DATA folder\r\n filename -- a string that contains the location of the data, if None then\r\n load the original two Excel files\r\n \"\"\"\r\n \r\n if filename:\r\n path = data_dir.joinpath(filename)\r\n \r\n if '.csv' in filename:\r\n result = pd.read_csv(path)\r\n # Else assume it's a pickle file\r\n else:\r\n with open(path, 'rb') as infile:\r\n result = pickle.load(infile)\r\n else:\r\n wb = xw.Book(str(data_dir.joinpath('CL_20190823.xlsx')))\r\n cl_df = wb.sheets[0].used_range.options(pd.DataFrame, index = False, header = True).value\r\n \r\n wb = xw.Book(str(data_dir.joinpath('WF_20190826.xlsx')))\r\n wf_df = wb.sheets[0].used_range.options(pd.DataFrame, index = False, header = True).value\r\n \r\n wb.app.quit()\r\n \r\n result = (cl_df, wf_df)\r\n \r\n return result\r\n\r\ndef load_config(filename: str, config_dir: WindowsPath) -> pd.DataFrame:\r\n \"\"\"Load csv config files from the /CONFIG/ folder.\r\n \r\n Keyword arguments\r\n filename -- name of the config file to be loaded, it is assumed to be of \r\n csv format\r\n data_dir -- a pathlib.WindowsPath object to the DATA folder.\r\n \"\"\"\r\n \r\n filename = filename + '.csv' if '.csv' not in filename else filename\r\n \r\n config_path = config_dir.joinpath(filename)\r\n \r\n return pd.read_csv(config_path)\r\n\r\n\r\ndef set_column_type(column: pd.Series, \r\n service: str, \r\n config: pd.DataFrame) -> pd.Series:\r\n \"\"\"Set the dtype of each column according to the config file.\r\n \r\n Keyword arguments\r\n column -- a column of the data set\r\n service -- 'CL' or 'WF'\r\n config -- a dataframe with the mapping of column to data type\r\n \"\"\"\r\n \r\n service = service.upper()\r\n \r\n config = config.query('service == @service')\r\n column_name = column.name\r\n col_type = config.loc[(service, column_name), 'type']\r\n \r\n if col_type is None:\r\n result = column\r\n \r\n # Somehow loading from Excel leads to a float64 type instead of a date type\r\n elif col_type == 'datetime64':\r\n if column.dtype == 'float64':\r\n result = column.apply(convert_excel_float_to_date)\r\n else:\r\n result = column\r\n elif col_type == 'ordinal':\r\n result = pd.Categorical(column, ordered = True)\r\n \r\n # Some columns, such as the 'qualifications' column contains comma \r\n # separated values, these are converted into a list\r\n elif col_type == 'list':\r\n result = column.str.split(',')\r\n elif col_type == 'int64':\r\n result = column.fillna(0).astype(col_type)\r\n elif col_type == 'str':\r\n result = column.fillna('').astype(col_type)\r\n else:\r\n result = column.astype(col_type)\r\n \r\n return result\r\n\r\n\r\ndef set_column_type2(column: pd.Series, \r\n config: pd.DataFrame) -> pd.Series:\r\n \"\"\"Set the dtype of each column according to the config file. Doesn't \r\n require the data set to be divided in to \"CL\" and \"WF\".\r\n \r\n Keyword arguments\r\n column -- a column of the data set\r\n service -- 'CL' or 'WF'\r\n config -- a dataframe with the mapping of column to data type\r\n \"\"\"\r\n \r\n column_name = column.name\r\n col_type = config.loc[column_name, 'type']\r\n \r\n if (col_type is None) or column_name == 'student_comment_processed':\r\n result = column\r\n \r\n # Somehow loading from Excel leads to a float64 type instead of a date type\r\n elif col_type == 'datetime64':\r\n if column.dtype == 'float64':\r\n result = column.apply(convert_excel_float_to_date)\r\n else:\r\n result = column\r\n elif col_type == 'ordinal':\r\n result = pd.Categorical(column, ordered = True)\r\n \r\n # Some columns, such as the 'qualifications' column contains comma \r\n # separated values, these are converted into a list\r\n elif col_type == 'list':\r\n result = column.str.split(',')\r\n elif col_type == 'int64':\r\n result = column.fillna(0).astype(col_type)\r\n elif col_type == 'str':\r\n result = column.fillna('').astype(col_type)\r\n else:\r\n result = column.astype(col_type)\r\n \r\n return result\r\n\r\n\r\ndef merge_datasets(df_dict: Dict[str, pd.DataFrame]) -> pd.DataFrame:\r\n \"\"\"Merge the CL and WF data sets into one dataframe. Merging is done on \r\n the common columns.\r\n \r\n Keyword arguments\r\n df1 -- Either the CL dataframe or the WF dataframe\r\n df2 -- Either the WF dataframe or the CL dataframe\r\n \"\"\"\r\n \r\n temp_cl = df_dict['cl'].copy(deep=True)\r\n temp_wf = df_dict['wf'].copy(deep=True)\r\n \r\n # Add the service so they can be grouped later\r\n temp_cl['service'] = 'cl'\r\n temp_wf['service'] = 'wf'\r\n \r\n cl_set = set(temp_cl.columns)\r\n wf_set = set(temp_wf.columns)\r\n \r\n common_variables = cl_set.intersection(wf_set)\r\n \r\n result_df = pd.concat([temp_cl[common_variables], temp_wf[common_variables]], ignore_index=True)\r\n \r\n return result_df\r\n\r\n\r\ndef calc_time_difference(s1: pd.Series, s2: pd.Series) -> pd.Series:\r\n \"\"\"Calculate the difference in seconds between two columns of type \r\n datetime.\r\n \r\n Keyword arguments\r\n s1 -- the series of the earlier time (dtype datetime64), typically the time \r\n of submission\r\n s2 -- the series of the later time (dtype datetime64), typically the time \r\n of completion\r\n \"\"\"\r\n \r\n diff = (s2 - s1).apply(get_seconds_from_timedelta)\r\n \r\n return diff\r\n\r\n\r\ndef get_special_characters(comments: pd.Series) -> Dict[str, int]:\r\n \"\"\"Finds and counts of non-alphanumeric characters in the series. \r\n \r\n Keyword arguments\r\n comments -- a spacy.tokens.doc.Doc object\r\n \"\"\"\r\n count_dict = defaultdict(int)\r\n \r\n for comment in comments:\r\n for token in comment:\r\n if not token.is_alpha and not token.is_punct:\r\n count_dict[token.orth_] += 1\r\n \r\n return count_dict\r\n\r\n\r\ndef replace_with_apostrophe(comments: pd.Series) -> pd.Series:\r\n \"\"\"Replaces ’ with an apostrophe\r\n \r\n Keyword arguments\r\n comments -- a series of dtype 'object' \r\n \"\"\"\r\n \r\n comments_replaced = comments.str.replace('’', \"'\")\r\n return comments_replaced\r\n\r\n\r\ndef remove_stopwords(student_comment: str) -> pd.Series:\r\n \"\"\"Remove the stopwords from the student comments\r\n \r\n Keyword arguments\r\n student_comment -- a spacy.tokens.doc.Doc object\r\n \"\"\"\r\n \r\n return student_comment.apply(lambda x: ' '.join([token.text for token in x if not token.is_stop]))\r\n\r\n\r\ndef get_luis_url(config_path: str = None) -> str:\r\n \"\"\"Get the API endpoint for the LUIS.ai API\r\n \r\n Keyword arguments\r\n config_path -- a string to the csv file that contains the authoring key and\r\n the app ID\r\n \"\"\"\r\n \r\n if not config_path:\r\n config_path = (Path\r\n .cwd()\r\n .parents[0]\r\n .joinpath('CONFIG')\r\n .joinpath('luis_keys.csv'))\r\n \r\n config_df = pd.read_csv(config_path).set_index('key')\r\n\r\n authoring_key = config_df.loc['authoring_key', 'value']\r\n app_id = config_df.loc['app_id', 'value']\r\n \r\n luis_url = f'https://australiaeast.api.cognitive.microsoft.com/luis/v2.0/apps/{app_id}?verbose=true&timezoneOffset=0&subscription-key={authoring_key}'\r\n\r\n return luis_url\r\n\r\n\r\ndef map_columns(\r\n s: pd.Series,\r\n mapping_df: pd.DataFrame\r\n ) -> pd.Series:\r\n \"\"\"Map the values of a pd.Series to another one based on the mapping_df. \r\n This is used for cleaning up categorical values.\r\n \r\n Keyword arguments\r\n s -- the pd.Series containing the original data\r\n mapping_df -- the pd.DataFrame containing the mappings\r\n \"\"\"\r\n \r\n original_df = pd.DataFrame(s)\r\n \r\n original_df = original_df.merge(\r\n mapping_df,\r\n how='left',\r\n left_on=original_df.columns[0],\r\n right_on=mapping_df.columns[0],\r\n )\r\n \r\n return original_df.iloc[:,-1]\r\n\r\n\r\ndef calc_percentage_counts(\r\n s: pd.Series,\r\n names: List[str] = None\r\n ) -> pd.DataFrame:\r\n \"\"\"Create a pd.DataFrame of value counts and percentage counts of a \r\n categorical pd.Series\r\n \r\n Keyword arguments\r\n s -- the pd.Series containing the original data\r\n names -- the column names\r\n \"\"\"\r\n \r\n value_counts = s.value_counts()\r\n percentage_counts = value_counts / len(s)\r\n \r\n if names:\r\n col1 = names[0]\r\n col2 = names[1]\r\n else:\r\n col1 = 'count'\r\n col2 = 'proportion'\r\n \r\n df = pd.DataFrame({col1: value_counts,\r\n col2: percentage_counts,\r\n }\r\n ).sort_index()\r\n \r\n return df\r\n\r\n\r\ndef map_column_dtype(\r\n column: pd.Series,\r\n mapping_df: pd.DataFrame,\r\n ) -> pd.DataFrame:\r\n \"\"\"Change the dtype of a pd.Series to the value specified in mapping_df\r\n \r\n Keyword arguments\r\n s -- the pd.Series containing the original data\r\n names -- the column names\r\n \"\"\"\r\n \r\n column_copy = column.copy(deep=True)\r\n \r\n column_type = mapping_df.loc[column_copy.name, 'type']\r\n if column_type == 'int64':\r\n column_copy = column_copy.fillna(0)\r\n \r\n column_copy = column_copy.astype(column_type)\r\n \r\n return column_copy\r\n\r\n\r\ndef simple_impute(\r\n df: pd.DataFrame,\r\n imputation_dict = None\r\n ) -> pd.DataFrame:\r\n \"\"\"Fill the missing data with zeroes and \"missing\" for numeric and \r\n categorical variables, respectively.\r\n \r\n Keyword arguments\r\n s -- the pd.Series containing the original data\r\n imputation_dict -- the value to impute for a particular data type; the \r\n default is {'category': 'missing', 'numeric': 0}\r\n \"\"\"\r\n \r\n df_copy = df.copy(deep=True)\r\n if imputation_dict is None:\r\n imputation_dict = {'category': 'missing',\r\n 'numeric': 0}\r\n \r\n # Categorical variables\r\n # First add the \"missing\" category to the categorical variables, then fill\r\n # the null values with \"missing'\r\n cat_columns = (df_copy\r\n .select_dtypes(include='category')\r\n .columns\r\n )\r\n \r\n df_copy.loc[:, cat_columns] = (\r\n df_copy\r\n .select_dtypes(include='category')\r\n .apply(lambda x: x\r\n .cat\r\n .add_categories(imputation_dict['category'])\r\n .fillna('missing')\r\n if any(x.isnull()) \r\n else x\r\n ) \r\n )\r\n \r\n # Non-categorical variables\r\n non_cat_columns = (\r\n df_copy\r\n .select_dtypes(exclude='category')\r\n .columns\r\n )\r\n df_copy.loc[:, non_cat_columns] = (\r\n df_copy\r\n .select_dtypes(exclude='category')\r\n .fillna(imputation_dict['numeric'])\r\n )\r\n \r\n return df_copy\r\n \r\n \r\ndef preprocess(\r\n df: pd.DataFrame,\r\n service: str\r\n ) -> pd.DataFrame:\r\n \"\"\"Takes a dataframe of the format CL or WF and outputs a format that is \r\n accepted by the machine learning models.\r\n \r\n Keyword arguments\r\n df -- the dataframe containing the rows to be fed into the machine learning\r\n algorithm for prediction\r\n service -- either \"cl\" or \"wf\"\r\n \"\"\"\r\n \r\n# =============================================================================\r\n# Set up directories\r\n# =============================================================================\r\n config_dir = Path.cwd().joinpath('CONFIG')\r\n \r\n# =============================================================================\r\n# Define column types\r\n# =============================================================================\r\n column_types = load_config('mapping_column_types.csv', config_dir)\r\n column_types.set_index(keys = ['service', 'field'], inplace = True)\r\n \r\n df_formatted = df.apply(lambda x: set_column_type(x,\r\n service.upper(), \r\n column_types))\r\n \r\n# =============================================================================\r\n# Rename columns\r\n# =============================================================================\r\n config_column_names = load_config('columns_original.csv', config_dir)\r\n df_renamed = rename_columns(df_formatted, \r\n service.upper(), \r\n config_column_names)\r\n \r\n# =============================================================================\r\n# Add \"wait_seconds\" to WF\r\n# =============================================================================\r\n if service.lower() == 'wf':\r\n s1 = df_renamed.started_at\r\n s2 = df_renamed.completed_at\r\n diff = calc_time_difference(s1, s2)\r\n df_renamed['wait_seconds'] = diff\r\n\r\n# =============================================================================\r\n# Add a service column\r\n# =============================================================================\r\n df_renamed['service'] = service.lower()\r\n \r\n# =============================================================================\r\n# Subset to the common set of columns in both CL and WF\r\n# =============================================================================\r\n common_columns = pd.read_csv(config_dir.joinpath('common_columns.csv'))\r\n df_renamed = df_renamed[common_columns.column.values]\r\n \r\n# =============================================================================\r\n# Extract features\r\n# =============================================================================\r\n result_df = df_renamed.copy(deep=True)\r\n result_df['student_comment_apostrophe'] = (\r\n replace_with_apostrophe(result_df.student_comment)\r\n )\r\n \r\n # spaCy\r\n nlp = spacy.load('en_core_web_lg')\r\n # Create spaCy doc objects\r\n result_df['student_comment_processed'] = (\r\n result_df.student_comment_apostrophe.apply(nlp)\r\n )\r\n # Remove Stopwords\r\n result_df['student_comment_no_stopwords'] = (\r\n remove_stopwords(result_df.student_comment_processed)\r\n )\r\n # Cleaning up year_level\r\n mapping_year_level = load_data(config_dir,'mapping_year_level.csv')\r\n result_df['year_level_cleaned'] = map_columns(\r\n result_df.year_level,\r\n mapping_year_level\r\n )\r\n result_df.drop(labels='year_level', axis='columns', inplace=True)\r\n # Numeric rating, instead of categorical\r\n result_df['student_rating_numeric'] = (\r\n result_df.student_rating.astype('float')\r\n )\r\n # Length of characters and words\r\n result_df['student_comment_char_length'] = (\r\n result_df\r\n .student_comment\r\n .apply(lambda x: feature_extraction.get_comment_length(x, 'character'))\r\n )\r\n result_df['student_comment_word_length'] = (\r\n result_df\r\n .student_comment\r\n .apply(lambda x: feature_extraction.get_comment_length(x, 'word'))\r\n )\r\n # Adding POS tags\r\n student_comment_pos_tags = (\r\n feature_extraction\r\n .get_pos_tags(result_df.student_comment_processed)\r\n )\r\n result_df = pd.concat([result_df, student_comment_pos_tags], axis='columns')\r\n # Adding number of PERSON entities\r\n result_df['student_comment_num_person_entities'] = (\r\n result_df\r\n .student_comment_processed\r\n .apply(lambda x: feature_extraction.count_entities(x, 'PERSON'))\r\n )\r\n \r\n config_dir = Path.cwd().joinpath('CONFIG')\r\n \r\n # Adding tutor and student average ratings\r\n# =============================================================================\r\n# Load the tutor ratings and student ratings for lookup\r\n# =============================================================================\r\n tutor_ratings = pd.read_csv(config_dir.joinpath('tutor_ratings.csv'))\r\n student_ratings = pd.read_csv(config_dir.joinpath('student_ratings.csv'))\r\n \r\n # Get the latest score for each tutor_id and student_id, note that \r\n # result_df could have multiple rows \r\n for elem in [\r\n 'average_tutor_rating_5d_total',\r\n 'average_tutor_rating_over_5d_cl',\r\n 'average_tutor_rating_over_5d_wf',\r\n ]:\r\n result_df[elem] = (\r\n result_df\r\n .tutor_id\r\n .apply(lambda x: get_latest_score(df=tutor_ratings,\r\n id_number=x,\r\n student_tutor='tutor',\r\n score_type=elem))\r\n )\r\n \r\n for elem in [\r\n 'average_student_rating_5d_total',\r\n 'average_student_rating_over_5d_cl',\r\n 'average_student_rating_over_5d_wf',\r\n ]:\r\n result_df[elem] = (\r\n result_df\r\n .student_id\r\n .apply(lambda x: get_latest_score(df=student_ratings,\r\n id_number=x,\r\n student_tutor='student',\r\n score_type=elem))\r\n )\r\n \r\n # Student start date\r\n result_df['student_start_date'] = (\r\n feature_extraction\r\n .get_student_start_date(result_df)\r\n )\r\n \r\n # Add tutor age\r\n tutor_dates_df = (result_df[['started_at', 'tutor_birth_year']]\r\n .replace(0, np.nan)\r\n .dropna(how='any', axis='rows')\r\n )\r\n result_df['tutor_age'] = tutor_dates_df.started_at.dt.year - tutor_dates_df.tutor_birth_year\r\n \r\n # Add tutor experience in days\r\n tutor_days_df = (result_df[['started_at', 'tutor_start_date']]\r\n .replace(0, np.nan)\r\n .dropna(how='any', axis='rows')\r\n )\r\n result_df['tutor_experience_days'] = (tutor_days_df.started_at - tutor_days_df.tutor_start_date).dt.days\r\n \r\n # Add tutor number of sessions\r\n tutor_sessions = feature_extraction.expanding_count(\r\n df = result_df,\r\n grouping_var = 'tutor_id',\r\n split_by_service = True,\r\n )\r\n result_df['tutor_num_sessions_cl'] = tutor_sessions['tutor_num_sessions_cl']\r\n result_df['tutor_num_sessions_wf'] = tutor_sessions['tutor_num_sessions_wf']\r\n \r\n tutor_sessions_total = feature_extraction.expanding_count(\r\n df = result_df,\r\n grouping_var = 'tutor_id',\r\n split_by_service = False,\r\n )\r\n result_df['tutor_sessions_total'] = tutor_sessions_total\r\n \r\n # Add student number of sessions\r\n student_sessions = feature_extraction.expanding_count(\r\n df = result_df,\r\n grouping_var = 'student_id',\r\n split_by_service = True,\r\n )\r\n result_df['student_num_sessions_cl'] = student_sessions['student_num_sessions_cl']\r\n result_df['student_num_sessions_wf'] = student_sessions['student_num_sessions_wf']\r\n \r\n student_sessions_total = feature_extraction.expanding_count(\r\n df = result_df,\r\n grouping_var = 'student_id',\r\n split_by_service = False,\r\n )\r\n result_df['student_sessions_total'] = student_sessions_total \r\n \r\n # Add sex guess\r\n result_df['sex_guess'] = feature_extraction.guess_sex(result_df.first_name)\r\n \r\n# =============================================================================\r\n# Set data types\r\n# =============================================================================\r\n mapping_column_types_extended = load_data(\r\n config_dir,\r\n 'mapping_column_types_extended.csv'\r\n ).set_index('columns') \r\n \r\n result_df['student_rating'] = 0\r\n \r\n result_df = (result_df\r\n .apply(lambda x: map_column_dtype(x, mapping_column_types_extended))\r\n ) \r\n \r\n# =============================================================================\r\n# Fill missing data\r\n# =============================================================================\r\n result_df = simple_impute(result_df)\r\n \r\n# =============================================================================\r\n# Using ml_columns.csv, add empty columns\r\n# =============================================================================\r\n ml_columns_df = load_data(\r\n config_dir,\r\n 'ml_columns.csv',\r\n )\r\n ml_columns = ml_columns_df.query('use == 1')['columns'].values\r\n \r\n missing_cols = set(ml_columns) - set(result_df.columns)\r\n for col in missing_cols:\r\n result_df[col] = 0\r\n \r\n result_df = result_df[ml_columns]\r\n \r\n# =============================================================================\r\n# Define column types\r\n# =============================================================================\r\n mapping_column_types_extended = load_data(\r\n config_dir,\r\n 'mapping_column_types_extended.csv'\r\n ).set_index('columns')\r\n \r\n result_df = (result_df\r\n .apply(lambda x: map_column_dtype(x, mapping_column_types_extended))\r\n ) \r\n \r\n# =============================================================================\r\n# Get dummies\r\n# =============================================================================\r\n result_df = pd.get_dummies(result_df,\r\n drop_first=True)\r\n\r\n \r\n \r\n# =============================================================================\r\n# Convert to dummy variable for the machine learning model\r\n# =============================================================================\r\n \r\n return result_df\r\n\r\n\r\ndef get_latest_score(df: pd.DataFrame,\r\n id_number: float,\r\n student_tutor: str,\r\n score_type: str\r\n ) -> float:\r\n \"\"\"Gets the latest student or tutor score\r\n \"\"\"\r\n score = 3\r\n if student_tutor == 'tutor':\r\n score_df = df.query('tutor_id == @id_number')\r\n score = score_df[score_df.started_at == score_df.started_at.max()][score_type].values[0]\r\n if student_tutor == 'student':\r\n score_df = df.query('student_id == @id_number')\r\n score = score_df[score_df.started_at == score_df.started_at.max()][score_type].values[0]\r\n \r\n score = 3 if np.isnan(score) else score\r\n \r\n return score\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" ]
[ [ "numpy.isnan", "pandas.DataFrame", "pandas.Categorical", "pandas.concat", "pandas.read_csv", "pandas.get_dummies" ] ]
chenyao96/TVDNet
[ "26fb2bfe65612b89b1a98edfe3d3e7308c6272e3", "26fb2bfe65612b89b1a98edfe3d3e7308c6272e3" ]
[ "TVDNet/test.py", "TVDNet/models/spinal_net.py" ]
[ "import torch\nimport numpy as np\nfrom models import spinal_net\nimport cv2\nimport decoder\nimport os\nfrom dataset import BaseDataset\nimport draw_points\n# from boostnet_labeldata.tools import draw_gt_pts3\nimport argparse\n\ndef apply_mask(image, mask, alpha=0.5):\n \"\"\"Apply the given mask to the image.\n \"\"\"\n color = np.random.rand(3)\n for c in range(3):\n image[:, :, c] = np.where(mask == 1,\n image[:, :, c] *\n (1 - alpha) + alpha * color[c] * 255,\n image[:, :, c])\n return image\n\nclass Network(object):\n def __init__(self, args):\n torch.manual_seed(317)\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n heads = {'hm': args.num_classes,\n 'reg': 2*args.num_classes,\n 'wh': 2*4,}\n # 'mid_point':2*args.num_classes,}\n\n self.model = spinal_net.SpineNet(heads=heads,\n pretrained=True,\n down_ratio=args.down_ratio,\n final_kernel=1,\n head_conv=256)\n self.num_classes = args.num_classes\n self.decoder = decoder.DecDecoder(K=args.K, conf_thresh=args.conf_thresh)\n self.dataset = {'spinal': BaseDataset}\n\n def load_model(self, model, resume):\n checkpoint = torch.load(resume, map_location=lambda storage, loc: storage)\n print('loaded weights from {}, epoch {}'.format(resume, checkpoint['epoch']))\n state_dict_ = checkpoint['state_dict']\n model.load_state_dict(state_dict_, strict=False)\n return model\n\n def map_mask_to_image(self, mask, img, color=None):\n if color is None:\n color = np.random.rand(3)\n mask = np.repeat(mask[:, :, np.newaxis], 3, axis=2)\n mskd = img * mask\n clmsk = np.ones(mask.shape) * mask\n clmsk[:, :, 0] = clmsk[:, :, 0] * color[0] * 256\n clmsk[:, :, 1] = clmsk[:, :, 1] * color[1] * 256\n clmsk[:, :, 2] = clmsk[:, :, 2] * color[2] * 256\n img = img + 1. * clmsk - 1. * mskd\n return np.uint8(img)\n\n\n def test(self, args, save):\n save_path = args.weights_dir # +args.dataset #_spinal_12.7(best)\n self.model = self.load_model(self.model, os.path.join(save_path, args.resume))\n self.model = self.model.to(self.device)\n self.model.eval()\n\n dataset_module = self.dataset[args.dataset]\n dsets = dataset_module(data_dir=args.data_dir,\n phase='test',\n input_h=args.input_h,\n input_w=args.input_w,\n down_ratio=args.down_ratio)\n\n data_loader = torch.utils.data.DataLoader(dsets,\n batch_size=1,\n shuffle=False,\n num_workers=1,\n pin_memory=True)\n\n\n for cnt, data_dict in enumerate(data_loader):\n images = data_dict['images'][0]\n img_id = data_dict['img_id'][0]\n images = images.to('cuda')\n print('processing {}/{} image ... {}'.format(cnt, len(data_loader), img_id))\n with torch.no_grad():\n output = self.model(images)\n # print(output)\n hm = output['hm'] #\n wh = output['wh']\n reg = output['reg']\n\n torch.cuda.synchronize(self.device)\n pts2 = self.decoder.ctdet_decode(hm, wh, reg) # 17, 11\n # print(pts2.shape)\n pts0 = pts2.copy()\n pts0[:, :10] *= args.down_ratio\n\n print('total pts num is {}'.format(len(pts2)))\n\n ori_image = dsets.load_image(dsets.img_ids.index(img_id))\n # ori_image2 = draw_gt_pts3(img_id,ori_image) # 画原图\n # ori_image2 = cv2.resize(ori_image2, (args.input_w, args.input_h))\n\n ori_image_regress = cv2.resize(ori_image, (args.input_w, args.input_h))\n ori_image_points = ori_image_regress.copy()\n ori_image_heatmap =ori_image_regress.copy()\n\n h,w,c = ori_image.shape\n pts0 = np.asarray(pts0, np.float32)\n # pts0[:,0::2] = pts0[:,0::2]/args.input_w*w\n # pts0[:,1::2] = pts0[:,1::2]/args.input_h*h\n sort_ind = np.argsort(pts0[:,1])\n # print(sort_ind)\n pts0 = pts0[sort_ind]\n # print(pts0)\n\n ori_image_heatmap, ori_image_regress, ori_image_points = \\\n draw_points.draw_landmarks_regress_test(pts0,ori_image_heatmap,ori_image_regress,ori_image_points)\n\n ori_image_heatmap = cv2.resize(ori_image_heatmap, (512, 800))\n ori_image_regress = cv2.resize(ori_image_regress, (512, 800))\n ori_image_points = cv2.resize(ori_image_points, (512, 800))\n\n cv2.imshow('ori_image_heatmap', ori_image_heatmap)\n cv2.imshow('ori_image_regress', ori_image_regress)\n cv2.imshow('ori_image_points', ori_image_points)\n # cv2.imshow('gt_image_points', ori_image2)\n k = cv2.waitKey(0) & 0xFF\n if k == ord('q'):\n cv2.destroyAllWindows()\n exit()\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='CenterNet Modification Implementation')\n parser.add_argument('--transfer', type=bool, default=False, help='transfer train flag')\n parser.add_argument('--num_epoch', type=int, default=50, help='Number of epochs')\n parser.add_argument('--batch_size', type=int, default=4, help='Number of epochs')\n parser.add_argument('--num_workers', type=int, default=4, help='Number of workers')\n parser.add_argument('--init_lr', type=float, default=1.25e-4, help='Init learning rate') # 1.25e-4\n parser.add_argument('--down_ratio', type=int, default=4, help='down ratio')\n parser.add_argument('--input_h', type=int, default=1024, help='input height')\n parser.add_argument('--input_w', type=int, default=512, help='input width')\n parser.add_argument('--K', type=int, default=17, help='maximum of objects')\n parser.add_argument('--conf_thresh', type=float, default=0.2, help='confidence threshold')\n parser.add_argument('--seg_thresh', type=float, default=0.5, help='confidence threshold')\n parser.add_argument('--num_classes', type=int, default=1, help='number of classes')\n parser.add_argument('--ngpus', type=int, default=1, help='number of gpus')\n parser.add_argument('--resume', type=str, default='model_last_2020-11-16 15:55:16.pth', help='weights to be resumed')\n parser.add_argument('--weights_dir', type=str, default='weights_spinal', help='weights dir')\n parser.add_argument('--data_dir', type=str, default='Datasets/', help='data directory')\n parser.add_argument('--phase', type=str, default='test', help='data directory')\n parser.add_argument('--dataset', type=str, default='spinal', help='data directory')\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = parse_args()\n is_object = Network(args)\n is_object.test(args, save=False)", "from .dec_net import DecNet\nfrom . import resnet\nimport torch.nn as nn\nimport numpy as np\n\n\nclass SpineNet(nn.Module):\n def __init__(self, heads, pretrained, down_ratio, final_kernel, head_conv):\n super(SpineNet, self).__init__()\n assert down_ratio in [2, 4, 8, 16]\n channels = [3, 64, 64, 128, 256, 512]\n self.l1 = int(np.log2(down_ratio))\n self.base_network = resnet.resnet34(pretrained=pretrained) #34 pretrained\n\n # 将resnet进行锁住不进行训练\n # for p in self.parameters():\n # p.requires_grad = False\n\n self.dec_net = DecNet(heads, final_kernel, head_conv, channels[self.l1])\n\n def forward(self, x):\n x = self.base_network(x)\n dec_dict = self.dec_net(x)\n return dec_dict\n\n\n" ]
[ [ "numpy.uint8", "numpy.random.rand", "torch.cuda.synchronize", "numpy.asarray", "torch.no_grad", "numpy.ones", "torch.manual_seed", "numpy.where", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.load", "numpy.argsort", "numpy.repeat" ], [ "numpy.log2" ] ]
Horizon2333/videoqa_dataset_visualization
[ "38145885362fc0780bb041dd9dcdbc5f140162e7" ]
[ "msvd_visualize.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@Author : Horizon\r\n@Date : 2021-03-28 15:54:32\r\n\"\"\"\r\n\r\nimport os\r\nimport cv2\r\nimport json\r\nimport random\r\nimport argparse\r\nimport matplotlib.pyplot as plt\r\n\r\ndef get_msvd_item(msvd_path, anno):\r\n\r\n question = anno['question']\r\n answer = anno['answer']\r\n\r\n video_id = anno['video_id']\r\n\r\n cap = cv2.VideoCapture(os.path.join(msvd_path, \"video/vid{}.avi\".format(video_id)))\r\n\r\n assert cap.isOpened()\r\n\r\n _, first_frame = cap.read()\r\n\r\n first_frame = cv2.cvtColor(first_frame, cv2.COLOR_BGR2RGB)\r\n\r\n cap.release()\r\n\r\n return first_frame, question, answer\r\n\r\n\r\n\r\nif(__name__ == \"__main__\"):\r\n\r\n parser = argparse.ArgumentParser(description='check data')\r\n parser.add_argument('--path', dest='path', default=\"F:/Dataset/MSVD-QA\", type=str)\r\n args = parser.parse_args()\r\n\r\n msvd_path = args.path\r\n\r\n msvd_val_annotation_path = os.path.join(msvd_path, \"val_qa.json\")\r\n\r\n with open(msvd_val_annotation_path) as f:\r\n annotation = json.load(f)\r\n \r\n annotation_length = len(annotation)\r\n\r\n font={ 'size': 15,\r\n 'family': 'Times New Roman',\r\n 'style':'italic'}\r\n\r\n plt.figure(1, figsize=(16, 9))\r\n plt.title(\"MSVD-QA dataset visualization\")\r\n \r\n for i in range(4):\r\n\r\n random_index = random.randint(0, annotation_length)\r\n\r\n random_anno = annotation[random_index]\r\n\r\n frame, question, answer = get_msvd_item(msvd_path, random_anno)\r\n\r\n plt.subplot(2, 2, i+1, xticks=[], yticks=[])\r\n\r\n frame_height = frame.shape[0]\r\n\r\n plt.imshow(frame)\r\n plt.text(0, frame_height * 1.06, \"Q: \"+question.capitalize(), fontdict=font)\r\n plt.text(0, frame_height * 1.12, \"A: \"+answer.capitalize() , fontdict=font)\r\n \r\n plt.show()\r\n\r\n" ]
[ [ "matplotlib.pyplot.subplot", "matplotlib.pyplot.title", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow" ] ]
yuqZY/ECG_Encoder_Physionet2015
[ "5642de51838b028f2315e25ea2bd8e620ef0e65c" ]
[ "models/util.py" ]
[ "import numpy as np\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.nn import init\r\n\r\n\r\n# def initialize_weights(net_l):\r\n# if not isinstance(net_l, list):\r\n# net_l = [net_l]\r\n# for net in net_l:\r\n# for m in net.modules():\r\n# if isinstance(m, nn.Conv1d):\r\n# init.kaiming_normal_(m.weight, a=0, mode='fan_in')\r\n# if m.bias is not None:\r\n# m.bias.data.zero_()\r\n# elif isinstance(m, nn.Linear):\r\n# init.kaiming_normal_(m.weight, a=0, mode='fan_in')\r\n# if m.bias is not None:\r\n# m.bias.data.zero_()\r\n# elif isinstance(m, nn.BatchNorm1d):\r\n# init.constant_(m.weight, 1)\r\n# init.constant_(m.bias.data, 0.0)\r\n\r\n\r\ndef initialize_weights(net_l):\r\n if not isinstance(net_l, list):\r\n net_l = [net_l]\r\n for net in net_l:\r\n for m in net.modules():\r\n if isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv2d):\r\n init.xavier_uniform_(m.weight, gain=1)\r\n if m.bias is not None:\r\n m.bias.data.zero_()\r\n elif isinstance(m, nn.Linear):\r\n init.xavier_uniform_(m.weight, gain=1)\r\n if m.bias is not None:\r\n m.bias.data.zero_()\r\n elif isinstance(m, nn.BatchNorm1d) or isinstance(m, nn.BatchNorm2d):\r\n init.constant_(m.weight, 1)\r\n init.constant_(m.bias.data, 0.0)\r\n\r\n\r\ndef initialize_params(model, batch_norm=True):\r\n param_dict = dict(list(model.named_parameters()))\r\n conv_weight = param_dict[\"conv.weight\"]\r\n init.xavier_uniform_(conv_weight, gain=1)\r\n if not batch_norm:\r\n conv_bias = param_dict[\"conv.bias\"]\r\n init.constant_(conv_bias, 0)\r\n else:\r\n bnorm_weight = param_dict[\"bnorm.weight\"]\r\n bnorm_bias = param_dict[\"bnorm.bias\"]\r\n init.constant_(bnorm_weight, 1)\r\n init.constant_(bnorm_bias, 0)\r\n\r\n\r\ndef to_dense_prediction_model(model, axis=(2, 3)):\r\n \"\"\"\r\n Transform a sequential model with strides to a model that outputs\r\n dense predictions by removing the strides and instead inserting dilations.\r\n Modifies model in-place.\r\n\r\n Parameters\r\n ----------\r\n model\r\n axis: int or (int,int)\r\n Axis to transform (in terms of intermediate output axes)\r\n can either be 2, 3, or (2,3).\r\n \r\n Notes\r\n -----\r\n Does not yet work correctly for average pooling.\r\n Prior to version 0.1.7, there had been a bug that could move strides\r\n backwards one layer.\r\n\r\n \"\"\"\r\n if not hasattr(axis, \"__len__\"):\r\n axis = [axis]\r\n assert all([ax in [2, 3] for ax in axis]), \"Only 2 and 3 allowed for axis\"\r\n axis = np.array(axis) - 2\r\n stride_so_far = np.array([1, 1])\r\n for module in model.modules():\r\n if hasattr(module, \"dilation\"):\r\n assert module.dilation == 1 or (module.dilation == (1, 1)), (\r\n \"Dilation should equal 1 before conversion, maybe the model is \"\r\n \"already converted?\"\r\n )\r\n new_dilation = [1, 1]\r\n for ax in axis:\r\n new_dilation[ax] = int(stride_so_far[ax])\r\n module.dilation = tuple(new_dilation)\r\n if hasattr(module, \"stride\"):\r\n if not hasattr(module.stride, \"__len__\"):\r\n module.stride = (module.stride, module.stride)\r\n stride_so_far *= np.array(module.stride)\r\n new_stride = list(module.stride)\r\n for ax in axis:\r\n new_stride[ax] = 1\r\n module.stride = tuple(new_stride)\r\n\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\nimport numpy as np\r\n\r\n\r\ndef wrap(y, dtype='float'):\r\n y_wrap = Variable(torch.from_numpy(y))\r\n if dtype == 'float':\r\n y_wrap = y_wrap.float()\r\n elif dtype == 'long':\r\n y_wrap = y_wrap.long()\r\n\r\n if torch.cuda.is_available():\r\n y_wrap = y_wrap.cuda()\r\n return y_wrap\r\n\r\n\r\ndef unwrap(y_wrap):\r\n if y_wrap.is_cuda:\r\n y = y_wrap.cpu().data.numpy()\r\n else:\r\n y = y_wrap.data.numpy()\r\n return y\r\n\r\n\r\ndef wrap_X(X):\r\n X_wrap = copy.deepcopy(X)\r\n for jet in X_wrap:\r\n jet[\"content\"] = wrap(jet[\"content\"])\r\n return X_wrap\r\n\r\n\r\ndef unwrap_X(X_wrap):\r\n X_new = []\r\n for jet in X_wrap:\r\n jet[\"content\"] = unwrap(jet[\"content\"])\r\n X_new.append(jet)\r\n return X_new\r\n\r\n\r\n##Batchization of the jets using LOUPPE'S code\r\ndef batch(jets):\r\n jet_children = [] # [n_nodes, 2]=> jet_children[nodeid, 0], jet_children[nodeid, 1]\r\n offset = 0\r\n for j, jet in enumerate(jets):\r\n tree = np.copy(jet[\"tree\"])\r\n tree[tree != -1] += offset\r\n jet_children.append(tree)\r\n offset += len(tree)\r\n\r\n jet_children = np.vstack(jet_children)\r\n jet_contents = torch.cat([jet[\"content\"] for jet in jets], 0) # [n_nodes, n_features]\r\n n_nodes = offset\r\n\r\n # Level-wise traversal\r\n level_children = np.zeros((n_nodes, 4), dtype=np.int32)\r\n level_children[:, [0, 2]] -= 1\r\n\r\n inners = [] # Inner nodes at level i\r\n outers = [] # Outer nodes at level i\r\n offset = 0\r\n\r\n for jet in jets:\r\n queue = [(jet[\"root_id\"] + offset, -1, True, 0)]\r\n\r\n while len(queue) > 0:\r\n node, parent, is_left, depth = queue.pop(0)\r\n\r\n if len(inners) < depth + 1:\r\n inners.append([])\r\n if len(outers) < depth + 1:\r\n outers.append([])\r\n\r\n if jet_children[node, 0] != -1: # Inner node\r\n inners[depth].append(node)\r\n position = len(inners[depth]) - 1\r\n is_leaf = False\r\n\r\n queue.append((jet_children[node, 0], node, True, depth + 1))\r\n queue.append((jet_children[node, 1], node, False, depth + 1))\r\n\r\n else: # Outer node\r\n outers[depth].append(node)\r\n position = len(outers[depth]) - 1\r\n is_leaf = True\r\n\r\n if parent >= 0: # Register node at its parent\r\n if is_left:\r\n level_children[parent, 0] = position\r\n level_children[parent, 1] = is_leaf\r\n else:\r\n level_children[parent, 2] = position\r\n level_children[parent, 3] = is_leaf\r\n\r\n offset += len(jet[\"tree\"])\r\n\r\n # Reorganize levels[i] so that inner nodes appear first, then outer nodes\r\n levels = []\r\n n_inners = []\r\n contents = []\r\n\r\n prev_inner = np.array([], dtype=int)\r\n\r\n for inner, outer in zip(inners, outers):\r\n n_inners.append(len(inner))\r\n inner = np.array(inner, dtype=int)\r\n outer = np.array(outer, dtype=int)\r\n level = np.concatenate((inner, outer))\r\n level = torch.from_numpy(level)\r\n if torch.cuda.is_available(): level = level.cuda()\r\n levels.append(level)\r\n\r\n left = prev_inner[level_children[prev_inner, 1] == 1]\r\n level_children[left, 0] += len(inner)\r\n right = prev_inner[level_children[prev_inner, 3] == 1]\r\n level_children[right, 2] += len(inner)\r\n\r\n contents.append(jet_contents[levels[-1]])\r\n\r\n prev_inner = inner\r\n\r\n # levels: list of arrays\r\n # levels[i][j] is a node id at a level i in one of the trees\r\n # inner nodes are positioned within levels[i][:n_inners[i]], while\r\n # leaves are positioned within levels[i][n_inners[i]:]\r\n #\r\n # level_children: array of shape [n_nodes, 2]\r\n # level_children[node_id, 0] is the position j in the next level of\r\n # the left child of node_id\r\n # level_children[node_id, 1] is the position j in the next level of\r\n # the right child of node_id\r\n #\r\n # n_inners: list of shape len(levels)\r\n # n_inners[i] is the number of inner nodes at level i, accross all\r\n # trees\r\n #\r\n # contents: array of shape [n_nodes, n_features]\r\n # contents[sum(len(l) for l in layers[:i]) + j] is the feature vector\r\n # or node layers[i][j]\r\n\r\n level_children = torch.from_numpy(level_children).long()\r\n n_inners = torch.from_numpy(np.array(n_inners)).long()\r\n if torch.cuda.is_available():\r\n level_children = level_children.cuda()\r\n n_inners = n_inners.cuda()\r\n\r\n return (levels, level_children[:, [0, 2]], n_inners, contents)\r\n" ]
[ [ "numpy.concatenate", "torch.cat", "numpy.array", "numpy.zeros", "torch.nn.init.constant_", "numpy.copy", "torch.nn.init.xavier_uniform_", "torch.from_numpy", "torch.cuda.is_available", "numpy.vstack" ] ]
DaPulse/redash
[ "ef3d2c869d100631d5163e74426bfa98bf596265" ]
[ "redash/query_runner/excel.py" ]
[ "import logging\nimport yaml\n\nfrom redash.utils.requests_session import requests_or_advocate, UnacceptableAddressException\n\nfrom redash.query_runner import *\nfrom redash.utils import json_dumps\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import pandas as pd\n import xlrd\n import openpyxl\n import numpy as np\n enabled = True\nexcept ImportError:\n enabled = False\n\nclass Excel(BaseQueryRunner):\n should_annotate_query = False\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def configuration_schema(cls):\n return {\n 'type': 'object',\n 'properties': {},\n }\n\n def __init__(self, configuration):\n super(Excel, self).__init__(configuration)\n self.syntax = \"yaml\"\n\n def test_connection(self):\n pass\n\n def run_query(self, query, user):\n path = \"\"\n ua = \"\"\n args = {}\n try:\n args = yaml.safe_load(query)\n path = args['url']\n args.pop('url', None)\n ua = args['user-agent']\n args.pop('user-agent', None)\n\n except:\n pass\n\n try:\n response = requests_or_advocate.get(url=path, headers={\"User-agent\": ua})\n workbook = pd.read_excel(response.content, **args)\n\n df = workbook.copy()\n data = {'columns': [], 'rows': []}\n conversions = [\n {'pandas_type': np.integer, 'redash_type': 'integer',},\n {'pandas_type': np.inexact, 'redash_type': 'float',},\n {'pandas_type': np.datetime64, 'redash_type': 'datetime', 'to_redash': lambda x: x.strftime('%Y-%m-%d %H:%M:%S')},\n {'pandas_type': np.bool_, 'redash_type': 'boolean'},\n {'pandas_type': np.object, 'redash_type': 'string'}\n ]\n labels = []\n for dtype, label in zip(df.dtypes, df.columns):\n for conversion in conversions:\n if issubclass(dtype.type, conversion['pandas_type']):\n data['columns'].append({'name': label, 'friendly_name': label, 'type': conversion['redash_type']})\n labels.append(label)\n func = conversion.get('to_redash')\n if func:\n df[label] = df[label].apply(func)\n break\n data['rows'] = df[labels].replace({np.nan: None}).to_dict(orient='records')\n\n json_data = json_dumps(data)\n error = None\n except KeyboardInterrupt:\n error = \"Query cancelled by user.\"\n json_data = None\n except UnacceptableAddressException:\n error = \"Can't query private addresses.\"\n json_data = None\n except Exception as e:\n error = \"Error reading {0}. {1}\".format(path, str(e))\n json_data = None\n\n return json_data, error\n\n def get_schema(self):\n raise NotSupported()\n\nregister(Excel)\n" ]
[ [ "pandas.read_excel" ] ]
jpraveenkanna/Fashion_Classification-Myntra_Dataset
[ "378b1a5f7189e4958843344880ba3fbb51042cc9" ]
[ "app.py" ]
[ "\n#importing dependency\nfrom fastai.vision import load_learner,ImageList,DatasetType,open_image,Path\nimport pandas as pd\nimport warnings\nimport numpy as np\nfrom flask import Flask, request, render_template\nfrom werkzeug.utils import secure_filename\nwarnings.filterwarnings('ignore')\n\n#config\nupload_path = \"static/upload\"\nlabel_path = 'Training_labels/labels.csv'\nmodel_path = 'model'\nthreshold = 85\n\n\n\ndef get_label(predicted_class):\n return df[0].iloc[predicted_class]\n \n\ndef print_top_3_pred(preds):\n idx=np.unravel_index(preds.numpy().argsort(axis=None), shape=preds.numpy().shape)\n top_3_pred=idx[0][-3:]\n top_pred=top_3_pred[-1]\n top3_return_msg=\"\"\n for i,val in enumerate(top_3_pred[::-1]):\n top3_return_msg +=str(str(i+1)\n +\".\"\n +str(get_label(val))\n +\" - {:.2f}%\\n\".format(np.round(preds[val].numpy()*100),2)\n +\"\\n\")\n \n return top3_return_msg,top_pred\n\ndef delete_uploadfiles(upload_path):\n for file in Path(upload_path).glob('*'):\n try:\n file.unlink()\n \n except OSError as e:\n print(\"Error: %s : %s\" % (f, e.strerror))\n return True\n\n\n\n\n\n#Reading labels\ndf= pd.read_csv(label_path,header=None)\n\n#assigning model\nmodel= model_path\n#learn = load_learner(model)\n\napp = Flask(__name__)\napp.config[\"IMAGE_UPLOADS\"] = upload_path\n\n#Removing contents in upload folder\ndelete_uploadfiles(upload_path)\n\n@app.route(\"/\")\ndef index():\n delete_uploadfiles(upload_path)\n return render_template('index.html')\n\n@app.route('/uploader', methods = ['GET', 'POST'])\ndef upload_file():\n \n if request.method == 'POST':\n image = request.files['file']\n filename = secure_filename(image.filename)\n \n #saving file in upload path\n image.save(Path(app.config[\"IMAGE_UPLOADS\"]+\"/\"+ filename))\n\n my_dict = {}\n #loading images from upload path \n img_list_loader = ImageList.from_folder(upload_path)\n \n #Checking if valid images are uploaded\n if len(img_list_loader.items)>0:\n #loading model\n load_model = load_learner(model, \n test=img_list_loader)\n #running inference\n preds,y = load_model.get_preds(ds_type=DatasetType.Test)\n index =0\n \n #Processing results for UI\n for preds,img_src in zip(preds,img_list_loader.items):\n\n top3_return_msg,top_pred = print_top_3_pred(preds)\n \n if(np.round(preds[top_pred].numpy()*100,2)<threshold):\n custom_msg = \"NA\"\n Prediction_percent = \"NA\"\n else:\n custom_msg= str(get_label(int(top_pred)))\n Prediction_percent = str(\"{:.2f}%\".format(np.round(preds[top_pred].numpy()*100,2)))\n\n temp_val=[]\n temp_val.append(img_src)\n temp_val.append(custom_msg)\n temp_val.append(Prediction_percent)\n temp_val.append(top3_return_msg)\n\n my_dict[index]=temp_val\n index+=1\n\n return render_template('result.html', mydict=my_dict)\n\n \n elif len(img_list_loader.items)== 0:\n return \"ERROR: Invalid image. Go back to upload new image\"\n\n \n\nif __name__ == \"__main__\":\n app.run(debug=False,host='0.0.0.0')\n\n" ]
[ [ "pandas.read_csv" ] ]
riven314/LiuAlgoTrader
[ "be60e16b787591067bf7f18425ab2812bf0ffa32" ]
[ "liualgotrader/enhanced_backtest.py" ]
[ "import asyncio\nimport inspect\nimport sys\nimport traceback\nimport uuid\nfrom calendar import Calendar\nfrom datetime import date, datetime, timedelta\nfrom typing import Dict, List, Optional\n\nimport alpaca_trade_api as tradeapi\nimport pandas as pd\nfrom pytz import timezone\n\nfrom liualgotrader.common import config, trading_data\nfrom liualgotrader.common.data_loader import DataLoader # type: ignore\nfrom liualgotrader.common.database import create_db_connection\nfrom liualgotrader.common.tlog import tlog, tlog_exception\nfrom liualgotrader.common.types import AssetType, TimeScale\nfrom liualgotrader.models.new_trades import NewTrade\nfrom liualgotrader.scanners.base import Scanner # type: ignore\nfrom liualgotrader.scanners_runner import create_momentum_scanner\nfrom liualgotrader.strategies.base import Strategy\n\nrun_scanners: Dict[Scanner, datetime] = {}\nsymbol_data: Dict[str, pd.DataFrame] = {}\nportfolio_value: float\n\n\nasync def create_scanners(\n data_loader: DataLoader,\n scanners_conf: Dict,\n scanner_names: Optional[List],\n) -> List[Scanner]:\n scanners: List = []\n for scanner_name in scanners_conf:\n if scanner_names and scanner_name not in scanner_name:\n continue\n tlog(f\"scanner {scanner_name} selected\")\n if scanner_name == \"momentum\":\n scanners.append(\n await create_momentum_scanner(\n None, data_loader, scanners_conf[scanner_name] # type: ignore\n )\n )\n else:\n scanners.append(\n await Scanner.get_scanner(\n data_loader=data_loader,\n scanner_name=scanner_name,\n scanner_details=scanners_conf[scanner_name],\n )\n )\n\n return scanners\n\n\nasync def create_strategies(\n uid: str,\n conf_dict: Dict,\n dl: DataLoader,\n strategy_names: Optional[List],\n) -> List[Strategy]:\n strategies = []\n config.env = \"BACKTEST\"\n\n for strategy_name in conf_dict:\n if strategy_names and strategy_name not in strategy_names:\n continue\n strategy_details = conf_dict[strategy_name]\n strategies.append(\n await Strategy.get_strategy(\n batch_id=uid,\n strategy_name=strategy_name,\n strategy_details=strategy_details,\n data_loader=dl,\n )\n )\n\n return strategies\n\n\nasync def do_scanners(\n now: datetime, scanners: List[Scanner], symbols: Dict\n) -> Dict:\n for scanner in scanners:\n if scanner in run_scanners:\n if not scanner.recurrence:\n if now.date() == run_scanners[scanner].date():\n continue\n elif (now - run_scanners[scanner]) < scanner.recurrence:\n continue\n\n run_scanners[scanner] = now\n new_symbols = await scanner.run(back_time=now)\n target_strategy_name = scanner.target_strategy_name\n\n target_strategy_name = (\n \"_all\" if not target_strategy_name else target_strategy_name\n )\n\n symbols[target_strategy_name] = new_symbols\n\n # list(\n # set(symbols.get(target_strategy_name, [])).union(set(new_symbols))\n # )\n\n return symbols\n\n\nasync def calculate_execution_price(\n symbol: str, data_loader: DataLoader, what: Dict, now: datetime\n) -> float:\n\n if what[\"type\"] == \"market\":\n return data_loader[symbol].close[now]\n\n price_limit = float(what[\"limit_price\"])\n if what[\"side\"] == \"buy\":\n if data_loader[symbol].close[now] <= price_limit:\n return data_loader[symbol].close[now]\n else:\n raise Exception(\n f\"can not buy: limit price {price_limit} below market price {data_loader[symbol].close[now]}\"\n )\n\n if data_loader[symbol].close[now] >= price_limit:\n return price_limit\n else:\n raise Exception(\n f\"can not sell: limit price {price_limit} above market price {data_loader[symbol].close[now]}\"\n )\n\n\nasync def do_strategy_result(\n strategy: Strategy,\n symbol: str,\n now: datetime,\n what: Dict,\n buy_fee_percentage: float = 0.0,\n sell_fee_percentage: float = 0.0,\n) -> bool:\n global portfolio_value\n\n qty = float(what[\"qty\"])\n sign = (\n 1\n if (\n what[\"side\"] == \"buy\"\n and qty > 0\n or what[\"side\"] == \"sell\"\n and qty < 0\n )\n else -1\n )\n\n try:\n price = await calculate_execution_price(\n symbol=symbol, data_loader=strategy.data_loader, what=what, now=now\n )\n\n if what[\"side\"] == \"buy\":\n fee = price * qty * buy_fee_percentage / 100.0\n\n sig = inspect.signature(strategy.buy_callback)\n if \"trade_fee\" in sig.parameters:\n await strategy.buy_callback(symbol, price, qty, now, fee)\n else:\n await strategy.buy_callback(symbol, price, qty, now)\n\n elif what[\"side\"] == \"sell\":\n fee = price * qty * sell_fee_percentage / 100.0\n sig = inspect.signature(strategy.buy_callback)\n if \"trade_fee\" in sig.parameters:\n await strategy.sell_callback(symbol, price, qty, now, fee)\n else:\n await strategy.sell_callback(symbol, price, qty, now)\n except Exception as e:\n tlog(\n f\"do_strategy_result({symbol}, {what}, {now}) failed w/ {e}. operation not executed\"\n )\n return False\n\n try:\n trading_data.positions[symbol] = (\n trading_data.positions[symbol] + sign * qty\n )\n except KeyError:\n trading_data.positions[symbol] = sign * qty\n\n trading_data.buy_time[symbol] = now.replace(second=0, microsecond=0)\n trading_data.last_used_strategy[symbol] = strategy\n\n db_trade = NewTrade(\n algo_run_id=strategy.algo_run.run_id,\n symbol=symbol,\n qty=qty,\n operation=what[\"side\"],\n price=price,\n indicators={},\n )\n\n await db_trade.save(\n config.db_conn_pool,\n str(now),\n trading_data.stop_prices[symbol]\n if symbol in trading_data.stop_prices\n else 0.0,\n trading_data.target_prices[symbol]\n if symbol in trading_data.target_prices\n else 0.0,\n fee,\n )\n\n return True\n\n\nasync def do_strategy_all(\n data_loader: DataLoader,\n now: pd.Timestamp,\n strategy: Strategy,\n symbols: List[str],\n buy_fee_percentage: float,\n sell_fee_percentage: float,\n):\n try:\n sig = inspect.signature(strategy.run_all)\n param = {\n \"symbols_position\": dict(\n {symbol: 0 for symbol in symbols}, **trading_data.positions\n ),\n \"now\": now.to_pydatetime(),\n \"portfolio_value\": portfolio_value,\n \"backtesting\": True,\n \"data_loader\": data_loader,\n }\n if \"fee_buy_percentage\" in sig.parameters:\n param[\"fee_buy_percentage\"] = buy_fee_percentage\n if \"fee_sell_percentage\" in sig.parameters:\n param[\"fee_sell_percentage\"] = sell_fee_percentage\n do = await strategy.run_all(**param)\n items = list(do.items())\n items.sort(key=lambda x: int(x[1][\"side\"] == \"buy\"))\n for symbol, what in items:\n await do_strategy_result(\n strategy,\n symbol,\n now,\n what,\n buy_fee_percentage,\n sell_fee_percentage,\n )\n\n except Exception as e:\n tlog(f\"[Exception] {now} {strategy}->{e}\")\n traceback.print_exc()\n raise\n\n\nasync def do_strategy_by_symbol(\n data_loader: DataLoader,\n now: pd.Timestamp,\n strategy: Strategy,\n symbols: List[str],\n):\n for symbol in symbols:\n try:\n _ = data_loader[symbol][now - timedelta(days=30) : now] # type: ignore\n do, what = await strategy.run(\n symbol=symbol,\n shortable=True,\n position=trading_data.positions.get(symbol, 0.0),\n now=now,\n portfolio_value=portfolio_value,\n backtesting=True,\n minute_history=data_loader[symbol].symbol_data[:now], # type: ignore\n )\n\n if do:\n await do_strategy_result(strategy, symbol, now, what)\n\n except Exception as e:\n tlog(f\"[Exception] {now} {strategy}({symbol})->{e}\")\n traceback.print_exc()\n raise\n\n\nasync def do_strategy(\n data_loader: DataLoader,\n now: pd.Timestamp,\n strategy: Strategy,\n symbols: List[str],\n buy_fee_percentage: float,\n sell_fee_percentage: float,\n):\n global portfolio_value\n\n if await strategy.should_run_all():\n await do_strategy_all(\n data_loader,\n now,\n strategy,\n symbols,\n buy_fee_percentage,\n sell_fee_percentage,\n )\n else:\n await do_strategy_by_symbol(data_loader, now, strategy, symbols)\n\n\ndef get_day_start_end(asset_type, day):\n if asset_type == AssetType.US_EQUITIES:\n day_start = day.date.replace(\n hour=day.open.hour,\n minute=day.open.minute,\n tzinfo=timezone(\"America/New_York\"),\n )\n day_end = day.date.replace(\n hour=day.close.hour,\n minute=day.close.minute,\n tzinfo=timezone(\"America/New_York\"),\n )\n elif asset_type == AssetType.CRYPTO:\n day_start = pd.Timestamp(\n datetime.combine(day, datetime.min.time()),\n tzinfo=timezone(\"America/New_York\"),\n )\n day_end = pd.Timestamp(\n datetime.combine(day, datetime.max.time()),\n tzinfo=timezone(\"America/New_York\"),\n )\n else:\n raise AssertionError(\n f\"get_day_start_end(): asset type {asset_type} not yet supported\"\n )\n\n return day_start, day_end\n\n\nasync def backtest_day(\n day,\n scanners,\n symbols,\n strategies,\n scale,\n data_loader,\n asset_type: AssetType,\n buy_fee_percentage: float,\n sell_fee_percentage: float,\n):\n day_start, day_end = get_day_start_end(asset_type, day)\n print(day_start, day_end)\n config.market_open = day_start\n config.market_close = day_end\n current_time = day_start\n prefetched: List[str] = []\n while current_time < day_end:\n symbols = await do_scanners(current_time, scanners, symbols)\n trading_data.positions = {\n symbol: trading_data.positions[symbol]\n for symbol in trading_data.positions\n if trading_data.positions[symbol] != 0\n }\n\n prefetch = [\n item\n for sublist in symbols.values()\n for item in sublist\n if item not in prefetched\n ]\n for symbol in prefetch:\n tlog(f\"Prefetch data for {symbol} {day_start}-{day_end}\")\n data_loader[symbol][day_start:day_end]\n prefetched.append(symbol)\n\n for strategy in strategies:\n try:\n trading_data.positions.update(\n {\n symbol: 0\n for symbol in symbols.get(strategy.name, [])\n + symbols.get(\"_all\", [])\n if symbol not in trading_data.positions\n }\n )\n except TypeError as e:\n if config.debug_enabled:\n tlog_exception(\"backtest_day\")\n\n tlog(\n f\"[EXCEPTION] {e} (hint: check scanner(s) return list of symbols?)\"\n )\n raise\n\n strategy_symbols = list(\n set(symbols.get(\"_all\", [])).union(\n set(symbols.get(strategy.name, []))\n )\n )\n\n await do_strategy(\n data_loader,\n current_time,\n strategy,\n strategy_symbols,\n buy_fee_percentage,\n sell_fee_percentage,\n )\n\n current_time += timedelta(seconds=scale.value)\n\n\nasync def backtest_main(\n uid: str,\n from_date: date,\n to_date: date,\n scale: TimeScale,\n tradeplan: Dict,\n buy_fee_percentage: float,\n sell_fee_percentage: float,\n asset_type: AssetType,\n scanners: Optional[List] = None,\n strategies: Optional[List] = None,\n) -> None:\n tlog(\n f\"Starting back-test from {from_date} to {to_date} with time scale {scale}\"\n )\n if scanners:\n tlog(f\"with scanners:{scanners}\")\n if strategies:\n tlog(f\"with strategies:{strategies}\")\n\n global portfolio_value\n if \"portfolio_value\" in tradeplan:\n portfolio_value = tradeplan[\"portfolio_value\"]\n else:\n portfolio_value = 100000\n\n await create_db_connection()\n\n data_loader = DataLoader() # DataLoader(scale)\n trade_api = tradeapi.REST(\n key_id=config.alpaca_api_key, secret_key=config.alpaca_api_secret\n )\n scanners = await create_scanners(\n data_loader, tradeplan[\"scanners\"], scanners\n )\n tlog(f\"instantiated {len(scanners)} scanners\")\n strategies = await create_strategies(\n uid, tradeplan[\"strategies\"], data_loader, strategies\n )\n tlog(f\"instantiated {len(strategies)} strategies\")\n if asset_type == AssetType.US_EQUITIES:\n calendars = trade_api.get_calendar(str(from_date), str(to_date))\n elif asset_type == AssetType.CRYPTO:\n calendars = [\n t.date() for t in pd.date_range(from_date, to_date).to_list()\n ]\n else:\n raise AssertionError(f\"Asset type {asset_type} not supported yet\")\n\n symbols: Dict = {}\n\n for day in calendars:\n await backtest_day(\n day,\n scanners,\n symbols,\n strategies,\n scale,\n data_loader,\n asset_type,\n buy_fee_percentage,\n sell_fee_percentage,\n )\n\n\ndef backtest(\n from_date: date,\n to_date: date,\n scale: TimeScale,\n config: Dict,\n scanners: Optional[List],\n strategies: Optional[List],\n asset_type: AssetType,\n buy_fee_percentage: float,\n sell_fee_percentage: float,\n) -> str:\n uid = str(uuid.uuid4())\n try:\n if not asyncio.get_event_loop().is_closed():\n asyncio.get_event_loop().close()\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(asyncio.new_event_loop())\n loop.run_until_complete(\n backtest_main(\n uid,\n from_date,\n to_date,\n scale,\n config,\n buy_fee_percentage,\n sell_fee_percentage,\n asset_type,\n scanners,\n strategies,\n )\n )\n except KeyboardInterrupt:\n tlog(\"backtest() - Caught KeyboardInterrupt\")\n except Exception as e:\n tlog(\n f\"backtest() - exception of type {type(e).__name__} with args {e.args}\"\n )\n traceback.print_exc()\n finally:\n print(\"=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\")\n print(f\"new batch-id: {uid}\")\n\n return uid\n" ]
[ [ "pandas.date_range" ] ]
vsitzmann/cifar10_denoising
[ "b003caaf404cad6ffac87ffce24b11351536cdd7" ]
[ "denoising_unet.py" ]
[ "import torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\n\nimport imageio\nimport skimage.measure\n\nimport torchvision\n\nfrom pytorch_prototyping.pytorch_prototyping import *\n\n\ndef num_divisible_by_2(number):\n return np.floor(np.log2(number)).astype(int)\n\n\ndef get_num_net_params(net):\n '''Counts number of trainable parameters in pytorch module'''\n model_parameters = filter(lambda p: p.requires_grad, net.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters])\n return params\n\n\n# noinspection PyCallingNonCallable\nclass DenoisingUnet(nn.Module):\n '''A simple unet-based denoiser. This class is overly complicated for what it's accomplishing, because it\n serves as an example model class for more complicated models.\n\n Assumes images are scaled from -1 to 1.\n '''\n\n def __init__(self,\n img_sidelength):\n super().__init__()\n\n self.norm = nn.InstanceNorm2d\n self.img_sidelength = img_sidelength\n\n num_downs_unet = num_divisible_by_2(img_sidelength)\n\n self.nf0 = 64 # Number of features to use in the outermost layer of U-Net\n\n self.denoising_net = nn.Sequential(\n Unet(in_channels=3,\n out_channels=3,\n use_dropout=False,\n nf0=self.nf0,\n max_channels=8 * self.nf0,\n norm=self.norm,\n num_down=num_downs_unet,\n outermost_linear=True),\n nn.Tanh()\n )\n\n # Losses\n self.loss = nn.MSELoss()\n\n # List of logs\n self.counter = 0 # A counter to enable logging every nth iteration\n self.logs = list()\n\n self.cuda()\n\n print(\"*\" * 100)\n print(self) # Prints the model\n print(\"*\" * 100)\n print(\"Number of parameters: %d\" % get_num_net_params(self))\n print(\"*\" * 100)\n\n def get_distortion_loss(self, prediction, ground_truth):\n trgt_imgs = ground_truth.cuda()\n\n return self.loss(prediction, trgt_imgs)\n\n def get_regularization_loss(self, prediction, ground_truth):\n return torch.Tensor([0]).cuda()\n\n def write_eval(self, prediction, ground_truth, path):\n '''At test time, this saves examples to disk in a format that allows easy inspection'''\n pred = prediction.detach().cpu().numpy()\n gt = ground_truth.detach().cpu().numpy()\n\n output = np.concatenate((pred, gt), axis=1)\n output /= 2.\n output += 0.5\n\n imageio.imwrite(path, output)\n\n def write_updates(self, writer, predictions, ground_truth, input, iter):\n '''Writes out tensorboard summaries as logged in self.logs.'''\n batch_size, _, _, _ = predictions.shape\n ground_truth = ground_truth.cuda()\n\n if not self.logs:\n return\n\n for type, name, content in self.logs:\n if type == 'image':\n writer.add_image(name, content.detach().cpu().numpy(), iter)\n writer.add_scalar(name + '_min', content.min(), iter)\n writer.add_scalar(name + '_max', content.max(), iter)\n elif type == 'figure':\n writer.add_figure(name, content, iter, close=True)\n\n # Cifar10 images are tiny - to see them better in tensorboard, upsample to 256x256\n output_input_gt = torch.cat((input, predictions, ground_truth), dim=0)\n output_input_gt = F.interpolate(output_input_gt, scale_factor=256 / self.img_sidelength)\n grid = torchvision.utils.make_grid(output_input_gt,\n scale_each=True,\n nrow=batch_size,\n normalize=True).cpu().detach().numpy()\n writer.add_image(\"Output_vs_gt\", grid, iter)\n\n writer.add_scalar(\"psnr\", self.get_psnr(predictions, ground_truth), iter)\n\n writer.add_scalar(\"out_min\", predictions.min(), iter)\n writer.add_scalar(\"out_max\", predictions.max(), iter)\n\n writer.add_scalar(\"trgt_min\", ground_truth.min(), iter)\n writer.add_scalar(\"trgt_max\", ground_truth.max(), iter)\n\n def get_psnr(self, predictions, ground_truth):\n '''Calculates the PSNR of the model's prediction.'''\n batch_size, _, _, _ = predictions.shape\n pred = predictions.detach().cpu().numpy()\n gt = ground_truth.detach().cpu().numpy()\n\n return skimage.measure.compare_psnr(gt, pred, data_range=2)\n\n def forward(self, input):\n self.logs = list() # Resets the logs\n\n batch_size, _, _, _ = input.shape\n\n noisy_img = input\n\n # We implement a resnet (good reasoning see https://arxiv.org/abs/1608.03981)\n pred_noise = self.denoising_net(noisy_img)\n output = noisy_img - pred_noise\n\n if not self.counter % 50:\n # Cifar10 images are tiny - to see them better in tensorboard, upsample to 256x256\n pred_noise = F.interpolate(pred_noise,\n scale_factor=256 / self.img_sidelength)\n grid = torchvision.utils.make_grid(pred_noise,\n scale_each=True,\n normalize=True,\n nrow=batch_size)\n self.logs.append(('image', 'pred_noise', grid))\n\n self.counter += 1\n\n return output\n" ]
[ [ "numpy.concatenate", "torch.cat", "torch.nn.MSELoss", "torch.nn.Tanh", "torch.nn.functional.interpolate", "torch.Tensor", "numpy.log2" ] ]
mvsantosdev/frbpoppy
[ "48386690faa5efd29924a216c6a31137a941acd0" ]
[ "frbpoppy/precalc.py" ]
[ "\"\"\"Create a lookup tables for redshift and the NE2001 dispersion measure.\"\"\"\n\nimport os\nimport numpy as np\nimport sqlite3\nimport sys\nfrom scipy.integrate import quad\nfrom tqdm import tqdm\nfrom joblib import Parallel, delayed\n\nimport frbpoppy.galacticops as go\nfrom frbpoppy.log import pprint\nfrom frbpoppy.paths import paths\n\n\nclass NE2001Table:\n \"\"\"Create/use a NE2001 lookup table for dispersion measure.\"\"\"\n\n def __init__(self, test=False):\n \"\"\"Initializing.\"\"\"\n self.test = test\n self.set_file_name()\n\n # Setup database\n self.db = False\n self.step = 0.1\n self.rounding = 2\n\n # For parallel processes\n self.temp_path = None\n\n if self.test:\n self.step = 0.1\n if os.path.exists(self.file_name):\n os.remove(self.file_name)\n\n if os.path.exists(self.file_name) and self.test is False:\n self.db = True\n else:\n # Calculations take quite some time\n # Provide a way for people to quit\n try:\n self.create_table()\n except KeyboardInterrupt:\n pprint('Losing all progress in calculations')\n os.remove(self.file_name)\n if self.temp:\n os.remove(self.temp_path)\n sys.exit()\n\n def set_file_name(self):\n \"\"\"Determine filename.\"\"\"\n uni_mods = os.path.join(paths.models(), 'universe/')\n self.file_name = uni_mods + 'dm_mw.db'\n\n if self.test:\n uni_mods = os.path.join(paths.models(), 'universe/')\n self.file_name = uni_mods + 'test_dm_mw.db'\n\n def create_table(self, parallel=True):\n \"\"\"Create a lookup table for dispersion measure.\"\"\"\n # Connect to database\n conn = sqlite3.connect(self.file_name)\n c = conn.cursor()\n\n # Set array of coordinates\n gls = np.arange(-180., 180. + self.step, self.step).round(1)\n gbs = np.arange(-90., 90. + self.step, self.step).round(1)\n dist = 0.1 # [Gpc]\n\n gls = gls.astype(np.float32)\n gbs = gbs.astype(np.float32)\n\n # Create database\n c.execute('create table dm ' +\n '(gl real, gb real, dm_mw real)')\n\n # Give an update on the progress\n m = ['Creating a DM lookup table',\n ' - Only needs to happen once',\n ' - Unfortunately pretty slow',\n ' - Prepare to wait for ~1.5h (4 cores)',\n ' - Time given as [time_spent<time_left] in (hh:)mm:ss',\n 'Starting to calculate DM values']\n for n in m:\n pprint(n)\n\n n_opt = len(gls)*len(gbs)\n options = np.array(np.meshgrid(gls, gbs)).T.reshape(-1, 2)\n dm_mw = np.zeros(len(options)).astype(np.float32)\n\n def dm_tot(i, dm_mw):\n gl, gb = options[i]\n dm_mw[i] = go.ne2001_dist_to_dm(dist, gl, gb)\n\n if parallel:\n\n temp_path = os.path.join(paths.models(), 'universe/') + 'temp.mmap'\n self.temp_path = temp_path\n\n # Make a temp memmap to have a sharedable memory object\n temp = np.memmap(temp_path, dtype=dm_mw.dtype,\n shape=len(dm_mw),\n mode='w+')\n\n # Parallel process in order to populate array\n r = range(n_opt)\n j = min([4, os.cpu_count() - 1])\n print(os.cpu_count())\n Parallel(n_jobs=j)(delayed(dm_tot)(i, temp) for i in tqdm(r))\n\n # Map results\n r = np.concatenate((options, temp[:, np.newaxis]), axis=1)\n results = map(tuple, r.tolist())\n\n # Delete the temporary directory and contents\n try:\n os.remove(temp_path)\n except FileNotFoundError:\n print(f'Unable to remove {temp_path}')\n\n else:\n for i in tqdm(range(n_opt)):\n dm_tot(i, dm_mw)\n\n # Save results to database\n dm_mw = dm_mw.astype(np.float32)\n r = np.concatenate((options, dm_mw[:, np.newaxis]), axis=1)\n results = map(tuple, r.tolist())\n\n pprint(' - Saving results')\n c.executemany('insert into dm values (?,?,?)', results)\n\n # Make for easier searching\n c.execute('create index ix on dm (gl, gb)')\n\n # Save\n conn.commit()\n\n pprint('Finished DM table')\n\n def lookup(self, gal, gab):\n \"\"\"Look up associated milky way dispersion measure with gal coords.\n\n Args:\n gl (array): Galactic longitude [fractional degrees]\n gb (array): Galactic latitude [fractional degrees]\n\n Returns:\n dm_mw (float): Galactic dispersion measure [pc*cm^-3]\n\n \"\"\"\n # Connect to database\n conn = sqlite3.connect(self.file_name)\n c = conn.cursor()\n\n dm_mw = np.ones_like(gal)\n\n # Round values\n def frac_round(x, prec=self.rounding, base=1):\n return np.round(base * np.round(x/base), prec)\n\n # Round values\n gal = frac_round(gal, self.rounding)\n gab = frac_round(gab, self.rounding)\n\n # Search database\n query = 'select dm_mw from dm where gl=? and gb=? limit 1'\n\n for i, gl in enumerate(gal):\n dm_mw[i] = c.execute(query, [str(gl), str(gab[i])]).fetchone()[0]\n\n # Close database\n conn.close()\n\n return dm_mw\n\n\nclass DistanceTable:\n \"\"\"\n Create/use a lookup table for comoving distance, volume, redshift etc.\n\n Create a list of tuples to lookup the corresponding redshift for a comoving\n distance [Gpc] (or the other way around). Uses formulas from\n Hoggs et al. (1999) for the cosmological calculations. To avoid long\n calculation times, it will check if a previous run with the same parameters\n has been done, which it will then load it. If not, it will calculate a new\n table, and save the table for later runs. Covers z, dist, vol, dvol,\n cdf_sfr and cdf_smd.\n\n Args:\n H_0 (float, optional): Hubble parameter. Defaults to 67.74 km/s/Mpc\n W_m (float, optional): Omega matter. Defaults to 0.3089\n W_k (float, optional): Omega vacuum. Defaults to 0.6911\n\n \"\"\"\n\n def __init__(self, H_0=67.74, W_m=0.3089, W_v=0.6911, test=False):\n \"\"\"Initializing.\"\"\"\n self.H_0 = H_0\n self.W_m = W_m\n self.W_v = W_v\n self.test = test\n\n self.set_file_name()\n\n # Setup database\n self.db = False\n self.step = 0.00001\n self.z_max = 6.5\n\n if self.test:\n self.step = 0.001\n self.z_max = 6.5\n if os.path.exists(self.file_name):\n os.remove(self.file_name)\n\n if os.path.exists(self.file_name) and self.test is False:\n self.db = True\n else:\n # Calculations take quite some time\n # Provide a way for people to quit\n try:\n self.create_table()\n except KeyboardInterrupt:\n pprint('Losing all progress in calculations')\n os.remove(self.file_name)\n sys.exit()\n\n def set_file_name(self):\n \"\"\"Determine filename.\"\"\"\n uni_mods = os.path.join(paths.models(), 'universe/')\n\n def cvt(value):\n \"\"\"Convert a float to a string without a period.\"\"\"\n return str(value).replace('.', 'd')\n\n # Convert\n paras = ['h0', cvt(self.H_0),\n 'wm', cvt(self.W_m),\n 'wv', cvt(self.W_v)]\n f = '-'.join(paras)\n\n self.file_name = uni_mods + f'{f}.db'\n\n if self.test:\n self.file_name = uni_mods + 'cosmo_test.db'\n\n def create_table(self):\n \"\"\"Create a lookup table for distances.\"\"\"\n m = ['Creating a distance table',\n ' - Only needs to happen once',\n ' - May take up to 2m on a single core']\n for n in m:\n pprint(n)\n\n # Connect to database\n conn = sqlite3.connect(self.file_name)\n c = conn.cursor()\n\n H_0 = self.H_0\n W_m = self.W_m\n W_v = self.W_v\n\n W_k = 1.0 - W_m - W_v # Omega curvature\n\n if W_k != 0.0:\n pprint('Careful - Your cosmological parameters do not sum to 1.0')\n\n zs = np.arange(0, self.z_max+self.step, self.step)\n\n # Create database\n t = 'real'\n par = f'(z {t}, dist {t}, vol {t}, dvol {t}, cdf_sfr {t}, cdf_smd {t})'\n s = f'create table distances {par}'\n c.execute(s)\n\n results = []\n\n pprint(' - Calculating parameters at various redshifts')\n conv = go.Redshift(zs, H_0=H_0, W_m=W_m, W_v=W_v)\n dists = conv.dist_co()\n vols = conv.vol_co()\n\n # Get dV\n dvols = np.zeros_like(vols)\n dvols[1:] = np.diff(vols)\n\n pprint(' - Calculating Star Formation Rate')\n # Get pdf sfr\n pdf_sfr = sfr(zs)*dvols\n cdf_sfr = np.cumsum(pdf_sfr) # Unnormalized\n cdf_sfr /= cdf_sfr[-1]\n\n pprint(' - Calculating Stellar Mass Density')\n # Get pdf csmd\n pdf_smd = smd(zs, H_0=H_0, W_m=W_m, W_v=W_v)*dvols\n cdf_smd = np.cumsum(pdf_smd) # Unnormalized\n cdf_smd /= cdf_smd[-1]\n\n results = np.stack((zs, dists, vols, dvols, cdf_sfr, cdf_smd)).T\n\n pprint(' - Saving values to database')\n # Save results to database\n data = map(tuple, results.tolist())\n c.executemany('insert into distances values (?,?,?,?,?,?)', data)\n\n # Make for easier searching\n # I don't really understand SQL index names...\n c.execute('create index ix on distances (z)')\n c.execute('create index ixx on distances (dist)')\n c.execute('create index ixxx on distances (vol)')\n c.execute('create index ixxxx on distances (dvol)')\n c.execute('create index ixxxxx on distances (cdf_sfr)')\n c.execute('create index ixxxxxx on distances (cdf_smd)')\n\n # Save\n conn.commit()\n\n pprint('Finished distance table')\n\n def lookup(self, z=None, dist_co=None, vol_co=None, dvol_co=None,\n cdf_sfr=None, cdf_smd=None):\n \"\"\"Look up associated values with input values.\"\"\"\n # Connect to database\n conn = sqlite3.connect(self.file_name)\n c = conn.cursor()\n\n # Check what's being looked up, set all other keywords to same length\n kw = {'z': z,\n 'dist': dist_co,\n 'vol': vol_co,\n 'dvol': dvol_co,\n 'cdf_sfr': cdf_sfr,\n 'cdf_smd': cdf_smd}\n\n for key, value in kw.items():\n if value is not None:\n in_par = key\n break\n\n for key, value in kw.items():\n if key != in_par:\n kw[key] = np.ones_like(kw[in_par])\n\n keys = list(kw.keys())\n\n # Search database\n query = f'select * from distances where {in_par} > ? limit 1'\n\n for i, r in enumerate(kw[in_par]):\n d = c.execute(query, [str(r)]).fetchone()\n for ii, key in enumerate(keys):\n if key == in_par:\n continue\n\n kw[key][i] = d[ii]\n\n # Close database\n conn.close()\n\n return list(kw.values())\n\n\ndef sfr(z):\n \"\"\"Return the number density of star forming rate at redshift z.\n\n Follows Madau & Dickinson (2014), eq. 15. For more info see\n https://arxiv.org/pdf/1403.0007.pdf\n \"\"\"\n return (1+z)**2.7/(1+((1+z)/2.9)**5.6)\n\n\ndef smd(z, H_0=67.74, W_m=0.3089, W_v=0.6911):\n \"\"\"Return the number density of Stellar Mass Density at redshift z.\n\n Follows Madau & Dickinson (2014), eq. 2 & 15. For more info see\n https://arxiv.org/pdf/1403.0007.pdf\n \"\"\"\n def integral(z):\n z1 = z + 1\n return z1**1.7/(1+(z1/2.9)**5.6)*(1/(H_0*(W_m*z1**3+W_v)**0.5))\n\n def csmd(z):\n return 0.01095*quad(integral, z, np.inf)[0]\n\n vec_csmd = np.vectorize(csmd)\n\n return vec_csmd(z)\n" ]
[ [ "numpy.concatenate", "numpy.zeros_like", "numpy.ones_like", "numpy.vectorize", "numpy.round", "numpy.diff", "numpy.stack", "numpy.arange", "numpy.cumsum", "numpy.meshgrid", "scipy.integrate.quad" ] ]
naskoap/covid-analysis
[ "26a418ed3d46a6014a3f59e2415cc62ea85d7f3c" ]
[ "bin/python/data_prep.py" ]
[ "import pandas as pd\nimport plotly as px\nfrom ipywidgets import interact\n\n\ncountry_df = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/web-data/data/cases_country.csv')\ncountry_df.columns = map(str.lower, country_df.columns)\ncountry_df = country_df.rename(columns={'country_region': 'country'})\n\nsorted_country_df = country_df.sort_values('confirmed', ascending= False)\n\ndef bubble_chart(n):\n fig = px.scatter(sorted_country_df.head(n), x=\"country\", y=\"confirmed\", size=\"confirmed\", color=\"country\",\n hover_name=\"country\", size_max=60)\n fig.update_layout(\n title=str(n) +\" Worst hit countries\",\n xaxis_title=\"Countries\",\n yaxis_title=\"Confirmed Cases\",\n width = 700\n )\n fig.show()\n \ninteract(bubble_chart, n=10)" ]
[ [ "pandas.read_csv" ] ]
cpeng-pz/Informer2020
[ "7bcc5e3220ee841b18788b6ff5d0d0907259aaf7" ]
[ "utils/timefeatures.py" ]
[ "from typing import List\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.tseries import offsets\nfrom pandas.tseries.frequencies import to_offset\n\nclass TimeFeature:\n def __init__(self):\n pass\n\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n pass\n\n def __repr__(self):\n return self.__class__.__name__ + \"()\"\n\nclass SecondOfMinute(TimeFeature):\n \"\"\"Minute of hour encoded as value between [-0.5, 0.5]\"\"\"\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n return index.second / 59.0 - 0.5\n\nclass MinuteOfHour(TimeFeature):\n \"\"\"Minute of hour encoded as value between [-0.5, 0.5]\"\"\"\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n return index.minute / 59.0 - 0.5\n\nclass HourOfDay(TimeFeature):\n \"\"\"Hour of day encoded as value between [-0.5, 0.5]\"\"\"\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n return index.hour / 23.0 - 0.5\n\nclass DayOfWeek(TimeFeature):\n \"\"\"Hour of day encoded as value between [-0.5, 0.5]\"\"\"\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n return index.dayofweek / 6.0 - 0.5\n\nclass DayOfMonth(TimeFeature):\n \"\"\"Day of month encoded as value between [-0.5, 0.5]\"\"\"\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n return (index.day - 1) / 30.0 - 0.5\n\nclass DayOfYear(TimeFeature):\n \"\"\"Day of year encoded as value between [-0.5, 0.5]\"\"\"\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n return (index.dayofyear - 1) / 365.0 - 0.5\n\nclass MonthOfYear(TimeFeature):\n \"\"\"Month of year encoded as value between [-0.5, 0.5]\"\"\"\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n return (index.month - 1) / 11.0 - 0.5\n\nclass WeekOfYear(TimeFeature):\n \"\"\"Week of year encoded as value between [-0.5, 0.5]\"\"\"\n def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:\n return (index.isocalendar().week - 1) / 52.0 - 0.5\n\ndef time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:\n \"\"\"\n Returns a list of time features that will be appropriate for the given frequency string.\n Parameters\n ----------\n freq_str\n Frequency string of the form [multiple][granularity] such as \"12H\", \"5min\", \"1D\" etc.\n \"\"\"\n\n features_by_offsets = {\n offsets.YearEnd: [],\n offsets.QuarterEnd: [MonthOfYear],\n offsets.MonthEnd: [MonthOfYear],\n offsets.Week: [DayOfMonth, WeekOfYear],\n offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],\n offsets.Minute: [\n MinuteOfHour,\n HourOfDay,\n DayOfWeek,\n DayOfMonth,\n DayOfYear,\n ],\n offsets.Second: [\n SecondOfMinute,\n MinuteOfHour,\n HourOfDay,\n DayOfWeek,\n DayOfMonth,\n DayOfYear,\n ],\n }\n\n offset = to_offset(freq_str)\n try:\n feature_classes = features_by_offsets[type(offset)]\n return [cls() for cls in feature_classes]\n except KeyError:\n supported_freq_msg = f\"\"\"\n Unsupported frequency {freq_str}\n The following frequencies are supported:\n Y - yearly\n alias: A\n M - monthly\n W - weekly\n D - daily\n B - business days\n H - hourly\n T - minutely\n alias: min\n S - secondly\n \"\"\"\n raise RuntimeError(supported_freq_msg)\n\ndef time_features(dates, timeenc=1, freq='h'):\n if timeenc==0:\n dates['month'] = dates.date.apply(lambda row:row.month,1)\n dates['day'] = dates.date.apply(lambda row:row.day,1)\n dates['weekday'] = dates.date.apply(lambda row:row.weekday(),1)\n dates['hour'] = dates.date.apply(lambda row:row.hour,1)\n dates['minute'] = dates.date.apply(lambda row:row.minute,1)\n dates['minute'] = dates.minute.map(lambda x:x//15)\n freq_map = {\n 'y':[],'m':['month'],'w':['month'],'d':['month','day','weekday'],\n 'b':['month','day','weekday'],'h':['month','day','weekday','hour'],\n 't':['month','day','weekday','hour','minute'],\n }\n return dates[freq_map[freq.lower()]].values\n if timeenc==1:\n dates = pd.to_datetime(dates.date.values)\n return np.vstack([feat(dates) for feat in time_features_from_frequency_str(freq)]).transpose(1,0)" ]
[ [ "pandas.to_datetime", "pandas.tseries.frequencies.to_offset" ] ]
oksanagit/profile_collection-1
[ "7849c588a7cdae13a16712753017361252252591" ]
[ "startup/10-machine.py" ]
[ "print(f\"Loading {__file__}...\")\n\nimport numpy as np\nfrom ophyd import (\n EpicsSignal,\n EpicsSignalRO,\n EpicsMotor,\n Device,\n Signal,\n PseudoPositioner,\n PseudoSingle,\n)\nfrom ophyd.utils.epics_pvs import set_and_wait\nfrom ophyd.pseudopos import pseudo_position_argument, real_position_argument\nfrom ophyd.positioner import PositionerBase\nfrom ophyd import Component as Cpt\n\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nimport functools\nimport math\nfrom pathlib import Path\n\n\n\"\"\"\nFor organization, this file will define objects for the machine. This will\ninclude the undulator (and energy axis) and front end slits.\n\"\"\"\n\n\n# Constants\nANG_OVER_EV = 12.3984\n\n\n# Signals\nring_current = EpicsSignalRO(\"SR:C03-BI{DCCT:1}I:Real-I\", name=\"ring_current\")\n\n\n# Setup undulator\nclass InsertionDevice(Device, PositionerBase):\n gap = Cpt(EpicsMotor, \"-Ax:Gap}-Mtr\", kind=\"hinted\", name=\"\")\n brake = Cpt(\n EpicsSignal,\n \"}BrakesDisengaged-Sts\",\n write_pv=\"}BrakesDisengaged-SP\",\n kind=\"omitted\",\n add_prefix=(\"read_pv\", \"write_pv\", \"suffix\"),\n )\n\n # These are debugging values, not even connected to by default\n elev = Cpt(EpicsSignalRO, \"-Ax:Elev}-Mtr.RBV\", kind=\"omitted\")\n taper = Cpt(EpicsSignalRO, \"-Ax:Taper}-Mtr.RBV\", kind=\"omitted\")\n tilt = Cpt(EpicsSignalRO, \"-Ax:Tilt}-Mtr.RBV\", kind=\"omitted\")\n elev_u = Cpt(EpicsSignalRO, \"-Ax:E}-Mtr.RBV\", kind=\"omitted\")\n\n def set(self, *args, **kwargs):\n set_and_wait(self.brake, 1)\n return self.gap.set(*args, **kwargs)\n\n def stop(self, *, success=False):\n return self.gap.stop(success=success)\n\n @property\n def settle_time(self):\n return self.gap.settle_time\n\n @settle_time.setter\n def settle_time(self, val):\n self.gap.settle_time = val\n\n @property\n def timeout(self):\n return self.gap.timeout\n\n @timeout.setter\n def timeout(self, val):\n self.gap.timeout = val\n\n @property\n def egu(self):\n return self.gap.egu\n\n @property\n def limits(self):\n return self.gap.limits\n\n @property\n def low_limit(self):\n return self.gap.low_limit\n\n @property\n def high_limit(self):\n return self.gap.high_limit\n\n def move(self, *args, moved_cb=None, **kwargs):\n if moved_cb is not None:\n\n @functools.wraps(moved_cb)\n def inner_move(status, obj=None):\n if obj is not None:\n obj = self\n return moved_cb(status, obj=obj)\n\n else:\n inner_move = None\n return self.set(*args, moved_cb=inner_move, **kwargs)\n\n @property\n def position(self):\n return self.gap.position\n\n @property\n def moving(self):\n return self.gap.moving\n\n def subscribe(self, callback, *args, **kwargs):\n @functools.wraps(callback)\n def inner(obj, **kwargs):\n return callback(obj=self, **kwargs)\n\n return self.gap.subscribe(inner, *args, **kwargs)\n\n\n# Setup energy axis\nclass Energy(PseudoPositioner):\n # Synthetic axis\n energy = Cpt(PseudoSingle)\n\n # Real motors\n u_gap = Cpt(InsertionDevice, \"SR:C5-ID:G1{IVU21:1\")\n bragg = Cpt(\n EpicsMotor,\n \"XF:05IDA-OP:1{Mono:HDCM-Ax:P}Mtr\",\n add_prefix=(),\n read_attrs=[\"user_readback\"],\n )\n c2_x = Cpt(\n EpicsMotor,\n \"XF:05IDA-OP:1{Mono:HDCM-Ax:X2}Mtr\",\n add_prefix=(),\n read_attrs=[\"user_readback\"],\n )\n epics_d_spacing = EpicsSignal(\"XF:05IDA-CT{IOC:Status01}DCMDspacing.VAL\")\n epics_bragg_offset = EpicsSignal(\"XF:05IDA-CT{IOC:Status01}BraggOffset.VAL\")\n\n # Motor enable flags\n move_u_gap = Cpt(Signal, None, add_prefix=(), value=True)\n move_c2_x = Cpt(Signal, None, add_prefix=(), value=True)\n harmonic = Cpt(Signal, None, add_prefix=(), value=0, kind=\"config\")\n selected_harmonic = Cpt(Signal, None, add_prefix=(), value=0)\n\n # Experimental\n detune = Cpt(Signal, None, add_prefix=(), value=0)\n\n def energy_to_positions(self, target_energy, undulator_harmonic, u_detune):\n \"\"\"Compute undulator and mono positions given a target energy\n\n Paramaters\n ----------\n target_energy : float\n Target energy in keV\n\n undulator_harmonic : int, optional\n The harmonic in the undulator to use\n\n uv_mistune : float, optional\n Amount to 'mistune' the undulator in keV. Will settings\n such that the peak of the undulator spectrum will be at\n `target_energy + uv_mistune`.\n\n Returns\n -------\n bragg : float\n The angle to set the monocromotor\n\n \"\"\"\n # Set up constants\n Xoffset = self._xoffset\n d_111 = self._d_111\n delta_bragg = self._delta_bragg\n C2Xcal = self._c2xcal\n T2cal = self._t2cal\n etoulookup = self.etoulookup\n\n # Calculate Bragg RBV\n BraggRBV = (\n np.arcsin((ANG_OVER_EV / target_energy) / (2 * d_111)) / np.pi * 180\n - delta_bragg\n )\n\n # Calculate C2X\n Bragg = BraggRBV + delta_bragg\n T2 = Xoffset * np.sin(Bragg * np.pi / 180) / np.sin(2 * Bragg * np.pi / 180)\n dT2 = T2 - T2cal\n C2X = C2Xcal - dT2\n\n # Calculate undulator gap\n\n # TODO make this more sohpisticated to stay a fixed distance\n # off the peak of the undulator energy\n ugap = float(\n etoulookup((target_energy + u_detune) / undulator_harmonic)\n ) # in mm\n ugap *= 1000 # convert to um\n\n return BraggRBV, C2X, ugap\n\n def undulator_energy(self, harmonic=3):\n \"\"\"Return the current energy peak of the undulator at the given harmonic\n\n Paramaters\n ----------\n harmonic : int, optional\n The harmonic to use, defaults to 3\n \"\"\"\n p = self.u_gap.get().readback\n utoelookup = self.utoelookup\n\n fundemental = float(utoelookup(ugap))\n\n energy = fundemental * harmonic\n\n return energy\n\n def __init__(\n self,\n *args,\n xoffset=None,\n d_111=None,\n delta_bragg=None,\n C2Xcal=None,\n T2cal=None,\n **kwargs,\n ):\n self._xoffset = xoffset\n self._d_111 = d_111\n self._delta_bragg = delta_bragg\n self._c2xcal = C2Xcal\n self._t2cal = T2cal\n super().__init__(*args, **kwargs)\n\n # calib_path = '/nfs/xf05id1/UndulatorCalibration/'\n calib_path = Path(__file__).parent\n # calib_file = \"../data/SRXUgapCalibration20170612.txt\"\n calib_file = \"../data/20210912_SRXUgapCalibration.txt\"\n\n # with open(os.path.join(calib_path, calib_file), 'r') as f:\n with open(calib_path / calib_file, \"r\") as f:\n next(f)\n uposlistIn = []\n elistIn = []\n for line in f:\n num = [float(x) for x in line.split()]\n uposlistIn.append(num[0])\n elistIn.append(num[1])\n\n self.etoulookup = InterpolatedUnivariateSpline(elistIn, uposlistIn)\n self.utoelookup = InterpolatedUnivariateSpline(uposlistIn, elistIn)\n\n self.u_gap.gap.user_readback.name = self.u_gap.name\n\n def crystal_gap(self):\n \"\"\"\n Return the current physical gap between first and second crystals\n \"\"\"\n C2X = self.c2_x.get().user_readback\n bragg = self.bragg.get().user_readback\n\n T2cal = self._t2cal\n delta_bragg = self._delta_bragg\n d_111 = self._d_111\n c2x_cal = self._c2xcal\n\n Bragg = np.pi / 180 * (bragg + delta_bragg)\n\n dT2 = c2x_cal - C2X\n T2 = dT2 + T2cal\n\n XoffsetVal = T2 / (np.sin(Bragg) / np.sin(2 * Bragg))\n\n return XoffsetVal\n\n @pseudo_position_argument\n def forward(self, p_pos):\n energy = p_pos.energy\n harmonic = int(self.harmonic.get())\n if harmonic < 0 or ((harmonic % 2) == 0 and harmonic != 0):\n raise RuntimeError(\n f\"The harmonic must be 0 or odd and positive, you set {harmonic}. \"\n \"Set `energy.harmonic` to a positive odd integer or 0.\"\n )\n detune = self.detune.get()\n if energy <= 4.4:\n raise ValueError(\n \"The energy you entered is too low ({} keV). \"\n \"Minimum energy = 4.4 keV\".format(energy)\n )\n if energy > 25.0:\n if (energy < 4400.0) or (energy > 25000.0):\n # Energy is invalid\n raise ValueError(\n \"The requested photon energy is invalid ({} keV). \"\n \"Values must be in the range of 4.4 - 25 keV\".format(energy)\n )\n else:\n # Energy is in eV\n energy = energy / 1000.0\n\n # harmonic cannot be None, it is an undesired datatype\n # Previously, we were finding the harmonic with the highest flux, this\n # was always done during energy change since harmonic was returned to\n # None\n # Here, we are programming it in\n # if harmonic is None:\n if harmonic < 3:\n harmonic = 3\n # Choose the right harmonic\n braggcal, c2xcal, ugapcal = self.energy_to_positions(\n energy, harmonic, detune\n )\n # Try higher harmonics until the required gap is too small\n while True:\n braggcal, c2xcal, ugapcal = self.energy_to_positions(\n energy, harmonic + 2, detune\n )\n if ugapcal < self.u_gap.low_limit:\n break\n harmonic += 2\n\n self.selected_harmonic.put(harmonic)\n\n # Compute where we would move everything to in a perfect world\n bragg, c2_x, u_gap = self.energy_to_positions(energy, harmonic, detune)\n\n # Sometimes move the crystal gap\n if not self.move_c2_x.get():\n c2_x = self.c2_x.position\n\n # Sometimes move the undulator\n if not self.move_u_gap.get():\n u_gap = self.u_gap.position\n\n return self.RealPosition(bragg=bragg, c2_x=c2_x, u_gap=u_gap)\n\n @real_position_argument\n def inverse(self, r_pos):\n bragg = r_pos.bragg\n e = ANG_OVER_EV / (\n 2 * self._d_111 * math.sin(math.radians(bragg + self._delta_bragg))\n )\n return self.PseudoPosition(energy=float(e))\n\n @pseudo_position_argument\n def set(self, position):\n return super().set([float(_) for _ in position])\n\n def synch_with_epics(self):\n self.epics_d_spacing.put(self._d_111)\n self.epics_bragg_offset.put(self._delta_bragg)\n\n def retune_undulator(self):\n self.detune.put(0.0)\n self.move(self.energy.get()[0])\n\n\n# Recalibrated 2021-09-08\ncal_data_2021cycle3 = {\n \"d_111\": 3.128666195523328,\n \"delta_bragg\": 0.2167556062528753,\n \"C2Xcal\": 3.6,\n \"T2cal\": 15.0347755916,\n \"xoffset\": 24.65,\n}\n\nenergy = Energy(prefix=\"\", name=\"energy\", **cal_data_2021cycle3)\nenergy.wait_for_connection()\nenergy.synch_with_epics()\nenergy.value = 1.0\n\n\n# Setup front end slits (primary slits)\nclass SRXSlitsFE(Device):\n top = Cpt(EpicsMotor, \"3-Ax:T}Mtr\")\n bot = Cpt(EpicsMotor, \"4-Ax:B}Mtr\")\n inb = Cpt(EpicsMotor, \"3-Ax:I}Mtr\")\n out = Cpt(EpicsMotor, \"4-Ax:O}Mtr\")\n\n\nfe = SRXSlitsFE(\"FE:C05A-OP{Slt:\", name=\"fe\")\n" ]
[ [ "numpy.arcsin", "numpy.sin", "scipy.interpolate.InterpolatedUnivariateSpline" ] ]
ivanbgd/Coursera-Neural-Networks-for-Machine-Learning
[ "95b4583b1dbb53784fb8ba0381ca5ca87849ae14" ]
[ "AS4/utils.py" ]
[ "import numpy as np\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nfrom collections import namedtuple\nfrom sys import exit\n\n_report_calls_to_sample_bernoulli = True\n\n\ndef load_data(file_name = \".\\\\data_set.mat\"):\n \"\"\" Loads data from a .mat file. MATLAB 5.0 MAT-file\n This method loads the training, validation and test sets.\n They are already split in the data file.\n Columns represent data samples, where rows contain individual pixels or one-hot encoded classes (target digits).\n Input:\n file_name: A .mat file name. The extension is not needed.\n Returns:\n training: A namedtuple of training 'inputs' and 'targets'. Respective shapes are: (256, 1000), (10, 1000).\n validation: A namedtuple of validation 'inputs' and 'targets'. Respective shapes are: (256, 1000), (10, 1000).\n test: A namedtuple of test 'inputs' and 'targets'. Respective shapes are: (256, 9000), (10, 9000).\n \"\"\"\n mat_contents = sio.loadmat(file_name)\n data = mat_contents['data']\n\n #print(mat_contents)\n #print(data)\n #print(data.dtype) # [('training', 'O'), ('validation', 'O'), ('test', 'O')] -->\n # --> dtype=[('inputs', 'O'), ('targets', 'O')]), dtype=[('targets', 'O'), ('inputs', 'O')]), dtype=[('inputs', 'O'), ('targets', 'O')])\n\n #print(data['training'][0][0][0][0][0].shape) # (256, 1000)\n #print(data['training'][0][0][0][0][1].shape) # (10, 1000)\n #print(data['validation'][0][0][0][0][0].shape) # (10, 1000)\n #print(data['validation'][0][0][0][0][1].shape) # (256, 1000)\n #print(data['test'][0][0][0][0][0].shape) # (256, 9000)\n #print(data['test'][0][0][0][0][1].shape) # (10, 9000)\n\n train_input = data['training'][0][0][0][0][0] # (256, 1000)\n train_target = data['training'][0][0][0][0][1] # (10, 1000)\n valid_input = data['validation'][0][0][0][0][1] # (256, 1000)\n valid_target = data['validation'][0][0][0][0][0] # (10, 1000)\n test_input = data['test'][0][0][0][0][0] # (256, 9000)\n test_target = data['test'][0][0][0][0][1] # (10, 9000)\n\n Train = namedtuple('Train', ['inputs', 'targets'])\n Valid = namedtuple('Valid', ['inputs', 'targets'])\n Test = namedtuple('Test', ['inputs', 'targets'])\n\n training = Train(train_input, train_target)\n validation = Valid(valid_input, valid_target)\n test = Test(test_input, test_target)\n \n return training, validation, test\n\n\ndef a4_rand(requested_size, seed):\n \"\"\"\n Returns 'random' data of the requested shape.\n The data are sampled from the file 'a4_randomness_source.mat'.\n Seed is used to calculate the starting point in the file.\n \"\"\"\n mat_contents = sio.loadmat(\".\\\\a4_randomness_source.mat\")\n randomness_source = mat_contents['randomness_source'] # (1, 350381)\n\n requested_size = list(requested_size)\n start_i = int(np.round(seed)) % int(np.round(randomness_source.shape[1] / 10)) + 0\n if (start_i + np.prod(requested_size)) >= (randomness_source.shape[1] + 0):\n exit('a4_rand failed to generate an array of that size (too big)')\n ret = np.reshape(randomness_source[:, start_i : start_i+np.prod(requested_size)-0], newshape=tuple(requested_size), order='F')\n return ret\n\n\ndef log_sum_exp_over_rows(a):\n \"\"\"\n This computes log(sum(exp(a), 0)) in a numerically stable way.\n \"\"\"\n maxs_small = a.max(axis=0)\n maxs_big = np.tile(maxs_small, (a.shape[0], 1))\n ret = np.log(np.sum(np.exp(a - maxs_big), 0)) + maxs_small\n return ret\n\n\ndef classification_phi_gradient(input_to_class, data):\n \"\"\"\n This is about a very simple model: there's an input layer, and a softmax output layer. There are no hidden layers, and no biases.\n This returns the gradient of phi (a.k.a. negative the loss) for the <input_to_class> matrix.\n <input_to_class> is a matrix of size <number of classes> by <number of input units>.\n <data> has fields .inputs (matrix of size <number of input units> by <number of data cases>) and .targets (matrix of size <number of classes> by <number of data cases>).\n \"\"\"\n # first: forward pass\n class_input = np.dot(input_to_class, data.inputs) # input to the components of the softmax. size: <number of classes> by <number of data cases>\n class_normalizer = log_sum_exp_over_rows(class_input) # log(sum(exp)) is what we subtract to get normalized log class probabilities. size: <1> by <number of data cases>\n log_class_prob = class_input - np.tile(class_normalizer, (class_input.shape[0], 1)) # log of probability of each class. size: <number of classes> by <number of data cases>\n class_prob = np.exp(log_class_prob) # probability of each class. Each column (i.e. each case) sums to 1. size: <number of classes> by <number of data cases>\n # now: gradient computation\n d_loss_by_d_class_input = -(data.targets - class_prob) / data.inputs.shape[1] # size: <number of classes> by <number of data cases>\n d_loss_by_d_input_to_class = np.dot(d_loss_by_d_class_input, data.inputs.T) # size: <number of classes> by <number of input units>\n d_phi_by_d_input_to_class = -d_loss_by_d_input_to_class\n return d_phi_by_d_input_to_class\n\n \ndef argmax_over_rows(matrix):\n indices = np.argmax(matrix, axis=0)\n return indices\n\n\ndef describe_matrix(matrix):\n print('Describing a matrix of size {} by {}. The mean of the elements is {}. The sum of the elements is {}.'\\\n .format(matrix.shape[0], matrix.shape[1], np.mean(matrix), np.sum(matrix)))\n\n\ndef extract_mini_batch(data_set, start_i, n_cases):\n \"\"\"\n Use Python indexing for start_i, i.e. indexing that starts from 0.\n \"\"\"\n mini_batch_inputs = data_set.inputs[:, start_i : start_i + n_cases - 0]\n mini_batch_targets = data_set.targets[:, start_i : start_i + n_cases - 0]\n Mini_batch = namedtuple('Mini_batch', ['inputs', 'targets'])\n mini_batch = Mini_batch(mini_batch_inputs, mini_batch_targets)\n return mini_batch\n\n\ndef logistic(input):\n return 1. / (1. + np.exp(-input))\n\n\ndef optimize(model_shape, gradient_function, training_data, learning_rate, n_iterations):\n \"\"\"\n This trains a model that's defined by a single matrix of weights.\n <model_shape> is the shape of the array of weights.\n <gradient_function> is a function that takes parameters <model> and <data> and returns the gradient (or approximate gradient in the case of CD-1) of the function that we're maximizing.\n Note the contrast with the loss function that we saw in PA3, which we were minimizing. The returned gradient is an array of the same shape as the provided <model> parameter.\n This uses mini-batches of size 100, momentum of 0.9, no weight decay, and no early stopping.\n This returns the matrix of weights of the trained model.\n \"\"\"\n model = (a4_rand(model_shape, np.prod(model_shape)) * 2 - 1) * 0.1\n momentum_speed = np.zeros(model_shape)\n mini_batch_size = 100\n start_of_next_mini_batch = 1 - 1\n for iteration_number in range(n_iterations):\n mini_batch = extract_mini_batch(training_data, start_of_next_mini_batch, mini_batch_size)\n start_of_next_mini_batch = (start_of_next_mini_batch + mini_batch_size) % training_data.inputs.shape[1]\n gradient = gradient_function(model, mini_batch)\n momentum_speed = 0.9 * momentum_speed + gradient # ascent\n model = model + momentum_speed * learning_rate\n return model\n\n\ndef show_rbm(rbm_w):\n n_hid = rbm_w.shape[0]\n n_rows = int(np.ceil(np.sqrt(n_hid)))\n blank_lines = 4\n distance = 16 + blank_lines\n to_show = np.zeros([n_rows * distance + blank_lines, n_rows * distance + blank_lines])\n for i in range(n_hid):\n row_i = int(np.floor(i / n_rows))\n col_i = i % n_rows\n pixels = np.reshape(rbm_w[i+0, :], [16, 16]).T\n row_base = row_i*distance + blank_lines\n col_base = col_i*distance + blank_lines\n to_show[row_base+0:row_base+16, col_base+0:col_base+16] = pixels\n extreme = np.max(np.abs(to_show))\n try:\n plt.imshow(to_show, cmap='gray')\n plt.title('Hidden units of the RBM')\n plt.show()\n except:\n print('Failed to display the RBM. No big deal (you do not need the display to finish the assignment), but you are missing out on an interesting picture.');\n return\n\n\ndef _sample_bernoulli(probabilities):\n \"\"\"\n Returns a \"binary\" matrix of the same shape as 'probabilities'.\n It is binary in the sense that it contains only 0s and 1s.\n This creates a Bernoulli distribution over samples from the file 'a4_randomness_source.mat'.\n \"\"\"\n if _report_calls_to_sample_bernoulli:\n print('_sample_bernoulli() was called with a matrix of size {} by {}.'.format(probabilities.shape[0], probabilities.shape[1]))\n seed = np.sum(probabilities)\n binary = 1 * (probabilities > a4_rand(probabilities.shape, seed)) # The \"1*\" is to avoid the \"logical\" data type, which just confuses things.\n return binary\n\n\n######################\n### OUR CODE BELOW ###\n######################\n\ndef visible_state_to_hidden_probabilities(rbm_w, visible_state):\n \"\"\"\n <rbm_w> is a matrix of size <number of hidden units> by <number of visible units>\n <visible_state> is a binary matrix of size <number of visible units> by <number of configurations that we're handling in parallel>.\n The returned value is a matrix of size <number of hidden units> by <number of configurations that we're handling in parallel>.\n This takes in the (binary) states of the visible units, and returns the activation probabilities of the hidden units conditional on those states.\n \"\"\"\n raise NotImplementedError\n return hidden_probability\n\n\ndef hidden_state_to_visible_probabilities(rbm_w, hidden_state):\n \"\"\"\n <rbm_w> is a matrix of size <number of hidden units> by <number of visible units>\n <hidden_state> is a binary matrix of size <number of hidden units> by <number of configurations that we're handling in parallel>.\n The returned value is a matrix of size <number of visible units> by <number of configurations that we're handling in parallel>.\n This takes in the (binary) states of the hidden units, and returns the activation probabilities of the visible units, conditional on those states.\n \"\"\"\n raise NotImplementedError\n return visible_probability\n\n\ndef configuration_goodness(rbm_w, visible_state, hidden_state):\n \"\"\"\n <rbm_w> is a matrix of size <number of hidden units> by <number of visible units>\n <visible_state> is a binary matrix of size <number of visible units> by <number of configurations that we're handling in parallel>.\n <hidden_state> is a binary matrix of size <number of hidden units> by <number of configurations that we're handling in parallel>.\n This returns a scalar: the mean over cases of the goodness (negative energy) of the described configurations.\n \"\"\"\n raise NotImplementedError\n return G\n\n\ndef configuration_goodness_gradient(visible_state, hidden_state):\n \"\"\"\n <visible_state> is a binary matrix of size <number of visible units> by <number of configurations that we're handling in parallel>.\n <hidden_state> is a (possibly but not necessarily binary) matrix of size <number of hidden units> by <number of configurations that we're handling in parallel>.\n You don't need the model parameters for this computation.\n This returns the gradient of the mean configuration goodness (negative energy, as computed by function <configuration_goodness>) with respect to the model parameters.\n Thus, the returned value is of the same shape as the model parameters, which by the way are not provided to this function.\n Notice that we're talking about the mean over data cases (as opposed to the sum over data cases).\n \"\"\"\n raise NotImplementedError\n return d_G_by_rbm_w\n\n\ndef cd1(rbm_w, visible_data):\n \"\"\"\n This is an implementation of Contrastive Divergence gradient estimator with 1 full Gibbs update, a.k.a. CD-1.\n <rbm_w> is a matrix of size <number of hidden units> by <number of visible units>\n <visible_data> is a (possibly but not necessarily binary) matrix of size <number of visible units> by <number of data cases>\n The returned value is the gradient approximation produced by CD-1 (Contrastive Divergence 1). It's of the same shape as <rbm_w>.\n \"\"\"\n raise NotImplementedError\n return ret\n\n\n" ]
[ [ "numpy.dot", "numpy.reshape", "numpy.zeros", "numpy.round", "numpy.sum", "scipy.io.loadmat", "numpy.tile", "numpy.exp", "matplotlib.pyplot.title", "numpy.mean", "numpy.prod", "numpy.argmax", "numpy.abs", "numpy.sqrt", "numpy.floor", "matplotlib.pyplot.show", "matplotlib.pyplot.imshow" ] ]
cvut/pyrocon
[ "c2a65bdc2e178c968eb9847aaf45b2fd47239aaa", "c2a65bdc2e178c968eb9847aaf45b2fd47239aaa" ]
[ "interpolation/poly.py", "robCRSikt.py" ]
[ "# Coordinated Spline Motion and Robot Control Project\n# \n# Copyright (c) 2017 Olga Petrova <olga.petrova@cvut.cz>\n# Advisor: Pavel Pisa <pisa@cmp.felk.cvut.cz>\n# FEE CTU Prague, Czech Republic\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# In 2017, project funded by PiKRON s.r.o. http://www.pikron.com/\n\n''' Module provides functions for points interpolation using splines of 3rd order '''\n\nimport numpy as np\nfrom utils import param_correction\n\n# Tri Diagonal Matrix Algorithm solver\ndef TDMAsolver(d):\n \"\"\"\n Tri Diagonal Matrix Algorithm solver.\n :param d: Tri Diagonal Matrix.\n :return: Solution of linear system.\n \"\"\"\n\n n = len(d)\n b = 4*np.ones_like(d)\n for i in range(1, n):\n mc = 1/b[i-1]\n b[i] -= mc\n d[i] -= mc*d[i-1]\n\n x = np.empty_like(d)\n x[-1] = d[-1]/b[-1]\n\n for il in range(n-2, -1, -1):\n x[il] = (d[il]-x[il+1])/b[il]\n\n return x\n\n\ndef _cubic_spline(x):\n \"\"\"\n Internal function for interpolation using polynomial splines of 3rd order.\n :param x: Points to interpolate.\n :return: Spline parameters.\n \"\"\"\n n, dim = x.shape\n\n v0 = (4 * x[1] - 3 * x[0] - x[2]) / 2\n vn = (-4 * x[-2] + x[-3] + 3 * x[-1]) / 2\n\n b = -3 * x[1:-3] + 3 * x[3:-1]\n b0 = -3 * x[0] + 3 * x[2] - v0\n bn = -3 * x[-3] + 3 * x[-1] - vn\n b = np.vstack((b0, b, bn))\n\n v1ton = TDMAsolver(b)\n v = np.vstack((v0, v1ton, vn))\n\n k0 = [[0]*dim, x[1] - x[0] - v0, v0, x[0]]\n kn = [[0]*dim, x[-1] - x[-2] - v[-2], v[-2], x[-2]]\n A = 2 * x[1:-2] - 2 * x[2:-1] + v[2:-1] + v[1:-2]\n B = - 3 * x[1:-2] + 3 * x[2:-1] - v[2:-1] - 2 * v[1:-2]\n C = v[1:-2]\n D = x[1:-2]\n\n lst = [A, B, C, D]\n\n for i, m in enumerate(lst):\n lst[i] = np.vstack((k0[i], lst[i], kn[i]))\n\n return lst\n\n\ndef interpolate(points):\n \"\"\"\n Interpolation of points using polynomial splines of 3rd order.\n :param points: Points to interpolate.\n :return: Spline parameters.\n \"\"\"\n [A, B, C, _] = _cubic_spline(points)\n\n param_lst = []\n\n for i in range(points.shape[0] - 1):\n param = np.vstack((C[i], B[i], A[i]))\n param = np.reshape(param.T, [points.shape[1]*3], order='C')\n param_lst.append(param)\n\n param_lst = param_correction(points[0], np.array(param_lst), 3)\n\n return param_lst\n", "# Coordinated Spline Motion and Robot Control Project\n# \n# Copyright (c) 2017 Olga Petrova <olga.petrova@cvut.cz>\n# Advisor: Pavel Pisa <pisa@cmp.felk.cvut.cz>\n# FEE CTU Prague, Czech Republic\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# In 2017, project funded by PiKRON s.r.o. http://www.pikron.com/\n\nimport numpy as np\n\n# based on ROBCRSIKT by Pavel Krsek, Michal Havlena\n# for BlueBot and Bosch Toolbox\n\ndef robCRSikt(robot, pos):\n \"\"\"\n Inverse kinematic task - robot CRS.\n :param robot: CRS robot instance.\n :param pos: Coordinates of robot position in world coordinates.\n :return: Coordinates of robot position in joint coordinates (degrees).\n \"\"\"\n \n pos = np.array(pos).astype(float)\n pos[3:] = pos[3:] / 180.0 * np.pi\n\n myeps = 10000 * 2.2204e-16 # equality tolerance\n par1 = 0 # if infinite number of solutions, theta1=par1\n par4 = 0 # if infinite number of solutions, theta4=par4\n\n # T = base * A01 * A12 * A23 * A34 * A45 * A56 * A76 * tool\n A76 = np.eye(4)\n A76[2][3] = robot.d[5]\n T = np.array([[np.cos(pos[3]) * np.cos(pos[4]), - np.sin(pos[3]) * np.cos(pos[5]) + np.cos(pos[3]) * np.sin(pos[4]) * np.sin(pos[5]),\n np.sin(pos[3]) * np.sin(pos[5]) + np.cos(pos[3]) * np.sin(pos[4]) * np.cos(pos[5]), pos[0]],\n [np.sin(pos[3]) * np.cos(pos[4]), np.cos(pos[3]) * np.cos(pos[5]) + np.sin(pos[3]) * np.sin(pos[4]) * np.sin(pos[5]),\n - np.cos(pos[3]) * np.sin(pos[5]) + np.sin(pos[3]) * np.sin(pos[4]) * np.cos(pos[5]), pos[1]],\n [- np.sin(pos[4]), np.cos(pos[4]) * np.sin(pos[5]), np.cos(pos[4]) * np.cos(pos[5]), pos[2]],\n [0, 0, 0, 1]])\n W = np.linalg.inv(robot.base).dot(T.dot(np.linalg.inv(robot.tool).dot(np.linalg.inv(A76))))\n # X = A01 * A12 * A23 * [0 0 0 1]' because A34*A45*A57==R34*R45*R56 is pure rotation\n X = W.dot(np.array([0, 0, 0, 1])[np.newaxis].T).T[0]\n\n # solve joints 1, 2, 3\n J = []\n b = X[2] - robot.d[0]\n if abs(X[0]) < myeps and abs(X[1]) < myeps: # arm going straight up\n if abs(b - robot.d[3] - robot.a[1]) < myeps: # full length\n J.append([par1, 0, 0])\n elif b < robot.d[3] + robot.a[1]: # can reach\n J.append([ par1, - np.arccos((robot.a[1] ** 2 + b ** 2 - robot.d[3] ** 2) / (2 * robot.a[1] * b)),\n np.pi - np.arccos((robot.a[1] ** 2 + robot.d[3] ** 2 - b ** 2) / (2 * robot.a[1] * robot.d[3]))])\n J.append([par1, np.arccos((robot.a[1] ** 2 + b ** 2 - robot.d[3] ** 2) / (2 * robot.a[1] * b)), - np.pi + np.arccos(\n (robot.a[1] ** 2 + robot.d[3] ** 2 - b ** 2) / (2 * robot.a[1] * robot.d[3]))])\n else: # cannot reach\n J = [np.nan, np.nan, np.nan]\n\n else:\n c = np.sqrt(b ** 2 + X[0] ** 2 + X[1] ** 2)\n if abs(c - robot.d[3] - robot.a[1]) < myeps: # full length\n J.append([np.arctan2(X[1], X[0]) - np.pi / 2 + np.arcsin(b / c), 0])\n J.append([np.arctan2(-X[1], -X[0]), np.pi / 2 - np.arcsin(b / c), 0])\n elif c < robot.d[3] + robot.a[1]: # can reach\n theta2 = np.pi / 2 - np.arcsin(b / c) + np.arccos((robot.a[1] ** 2 + c ** 2 - robot.d[3] ** 2) / (2 * robot.a[1] * c))\n # can be bigger than np.pi!!! \n if theta2 > np.pi:\n theta2 = theta2-2 * np.pi\n\n J.append(np.array([np.arctan2(X[1], X[0]), - theta2, np.pi - np.arccos((robot.a[1] ** 2 + robot.d[3] ** 2 - c ** 2) / (2 * robot.a[1] * robot.d[3]))]))\n J.append(np.array([np.arctan2(X[1], X[0]), - np.pi / 2 + np.arcsin(b / c) + np.arccos((robot.a[1] ** 2 + c ** 2 - robot.d[3] ** 2) / (2 * robot.a[1] * c)),\n - np.pi + np.arccos((robot.a[1] ** 2 + robot.d[3] ** 2 - c ** 2) / (2 * robot.a[1] * robot.d[3]))]))\n J.append(np.array([np.arctan2(-X[1], -X[0]), theta2, - np.pi + np.arccos((robot.a[1] ** 2 + robot.d[3] ** 2 - c ** 2) / (2 * robot.a[1] * robot.d[3]))]))\n J.append(np.array([np.arctan2(-X[1], -X[0]), np.pi / 2 - np.arcsin(b / c) - np.arccos((robot.a[1] ** 2 + c ** 2 - robot.d[3] ** 2) / (2 * robot.a[1] * c)),\n np.pi - np.arccos((robot.a[1] ** 2 + robot.d[3] ** 2 - c ** 2) / (2 * robot.a[1] * robot.d[3]))]))\n else: # cannot reach\n J = [np.nan, np.nan, np.nan]\n\n\n deg = []\n toolJ = np.eye(4)\n toolJ[2][3] = robot.d[3]\n for j in range(np.array(J).shape[0]):\n nnn = [np.isnan(a) for a in J]\n if not np.any(nnn):\n # direct kinematics for first 3 joints; inversed\n dif = (J[j] -robot.offset[:3])\n robot.theta = dif*robot.sign[:3]\n P = W\n for i in range(3):\n M = [[np.cos(robot.theta[i]), -np.sin(robot.theta[i]) * np.cos(robot.alpha[i]), np.sin(robot.theta[i]) * np.sin(robot.alpha[i]), robot.a[i] * np.cos(robot.theta[i])],\n [np.sin(robot.theta[i]), np.cos(robot.theta[i]) * np.cos(robot.alpha[i]), -np.cos(robot.theta[i]) * np.sin(robot.alpha[i]), robot.a[i] * np.sin(robot.theta[i])],\n [0, np.sin(robot.alpha[i]), np.cos(robot.alpha[i]), robot.d[i]],\n [0, 0, 0, 1]]\n P = np.linalg.inv(M).dot(P)\n # P = R34 * R45 * R56\n P = np.linalg.inv(toolJ).dot(P)\n\n # Euler Z - Y Z for joints 4, 5, 6\n if abs(P[2][2] - 1) < myeps: # np.cos(theta5) == 1\n deg.append(J[j].tolist() + [par4, 0, np.arctan2(P[1][0], P[0][0]) - par4])\n elif abs(P[2][2] + 1) < myeps: # np.cos(theta5) == -1\n deg.append(J[j].tolist() + [par4, np.pi, np.arctan2(P[1][0], -P[0][0]) + par4])\n else: # non - degenerate\n theta5 = np.arccos(P[2][2])\n deg.append(J[j].tolist() +[np.arctan2(P[1][2] * np.sign(np.sin(theta5)), P[0][2] * np.sign(np.sin(theta5))), - theta5,\n np.arctan2(P[2][1] * np.sign(np.sin(theta5)), -P[2][0] * np.sign(np.sin(theta5)))])\n deg.append(J[j].tolist() + [np.arctan2(P[1][2] * np.sign(np.sin(-theta5)), P[0][2] * np.sign(np.sin(-theta5))), theta5,\n np.arctan2(P[2][1] * np.sign(np.sin(-theta5)), -P[2][0] * np.sign(np.sin(-theta5)))])\n else:\n deg = J + [np.nan, np.nan, np.nan]\n\n deg = np.array(deg) * 180 / np.pi\n return deg" ]
[ [ "numpy.array", "numpy.ones_like", "numpy.reshape", "numpy.empty_like", "numpy.vstack" ], [ "numpy.array", "numpy.isnan", "numpy.sin", "numpy.arccos", "numpy.arcsin", "numpy.eye", "numpy.any", "numpy.arctan2", "numpy.sqrt", "numpy.cos", "numpy.linalg.inv" ] ]
zazula/talos
[ "4a2a2c1c16310a2158692808cb0a6cfe4e4be326", "4a2a2c1c16310a2158692808cb0a6cfe4e4be326" ]
[ "tests/commands/test_latest.py", "talos/parameters/ParamSpace.py" ]
[ "def test_latest():\n\n print('\\n >>> start Latest Features... \\n')\n\n import talos\n from tensorflow.keras.models import Sequential\n from tensorflow.keras.layers import Dense\n\n x, y = talos.templates.datasets.iris()\n\n p = {'activation': ['relu', 'elu'],\n 'optimizer': ['Nadam', 'Adam'],\n 'losses': ['logcosh'],\n 'shapes': ['brick'],\n 'first_neuron': [16, 32, 64, 128],\n 'hidden_layers': [0, 1, 2, 3],\n 'dropout': [.2, .3, .4],\n 'batch_size': [20, 30, 40, 50],\n 'epochs': [10]}\n\n def iris_model(x_train, y_train, x_val, y_val, params):\n\n model = Sequential()\n model.add(Dense(params['first_neuron'],\n input_dim=4,\n activation=params['activation']))\n\n talos.utils.hidden_layers(model, params, 3)\n\n model.add(Dense(3, activation='softmax'))\n model.compile(optimizer=params['optimizer'],\n loss=params['losses'], metrics=['acc'])\n\n out = model.fit(x_train,\n y_train,\n callbacks=[talos.utils.ExperimentLogCallback('test_latest', params)],\n batch_size=params['batch_size'],\n epochs=params['epochs'],\n validation_data=(x_val, y_val),\n verbose=0)\n\n return out, model\n\n scan_object = talos.Scan(x, y,\n model=iris_model,\n params=p,\n experiment_name='test_latest',\n round_limit=5,\n reduction_method='gamify',\n save_weights=False)\n\n print('finised Latest Features \\n')\n", "import inspect\n\nimport numpy as np\nimport itertools as it\nfrom datetime import datetime\n\n\nclass ParamSpace:\n\n def __init__(self,\n params,\n param_keys,\n random_method='uniform_mersenne',\n fraction_limit=None,\n round_limit=None,\n time_limit=None,\n boolean_limit=None):\n\n # set all the arguments\n self.params = params\n self.param_keys = param_keys\n self.fraction_limit = fraction_limit\n self.round_limit = round_limit\n self.time_limit = time_limit\n self.boolean_limit = boolean_limit\n self.random_method = random_method\n\n # set a counter\n self.round_counter = 0\n\n # handle tuple conversion to discrete values\n self.p = self._param_input_conversion()\n\n # create list of list from the params dictionary\n self._params_temp = [list(self.p[key]) for key in self.param_keys]\n\n # establish max dimensions\n self.dimensions = np.prod([len(l) for l in self._params_temp])\n\n # apply all the set limits\n self.param_index = self._param_apply_limits()\n\n # create the parameter space\n self.param_space = self._param_space_creation()\n\n # handle the boolean limits separately\n if self.boolean_limit is not None:\n index = self._convert_lambda(self.boolean_limit)(self.param_space)\n self.param_space = self.param_space[index]\n\n # reset index\n self.param_index = list(range(len(self.param_index)))\n\n def _param_input_conversion(self):\n\n '''Parameters may be input as lists of single or\n multiple values (discrete values) or tuples\n (range of values). This helper checks the format of\n each input and handles it accordingly.'''\n\n out = {}\n\n # go through each parameter type\n for param in self.param_keys:\n\n # deal with range (tuple) values\n if isinstance(self.params[param], tuple):\n out[param] = self._param_range_expansion(self.params[param])\n\n # deal with range (list) values\n elif isinstance(self.params[param], list):\n out[param] = self.params[param]\n\n return out\n\n def _param_apply_limits(self):\n\n from talos.reducers.sample_reducer import sample_reducer\n\n if self.boolean_limit is not None:\n # NOTE: this is handled in __init__\n pass\n\n # a time limit is set\n if self.time_limit is not None:\n # NOTE: this is handled in _time_left\n pass\n\n # a fractional limit is set\n if self.fraction_limit is not None:\n return sample_reducer(self.fraction_limit,\n self.dimensions,\n self.random_method)\n\n # a round limit is set\n if self.round_limit is not None:\n return sample_reducer(self.round_limit,\n self.dimensions,\n self.random_method)\n\n # no limits are set\n return list(range(self.dimensions))\n\n def _param_range_expansion(self, param_values):\n\n '''Expands a range (tuple) input into discrete\n values. Helper for _param_input_conversion.\n Expects to have a input as (start, end, steps).\n '''\n\n start = param_values[0]\n end = param_values[1]\n steps = param_values[2]\n\n out = np.arange(start, end, (end - start) / steps, dtype=float)\n\n # inputs are all ints\n if isinstance(start, int) and isinstance(end, int):\n out = out.astype(int)\n out = np.unique(out)\n\n return out\n\n def _param_space_creation(self):\n\n '''Expand params dictionary to permutations\n\n Takes the input params dictionary and expands it to\n actual parameter permutations for the experiment.\n '''\n\n # handle the cases where parameter space is still large\n if len(self.param_index) > 100000:\n\n final_grid = list(it.product(*self._params_temp))\n out = np.array(final_grid, dtype='object')\n\n # handle the cases where parameter space is already smaller\n else:\n final_grid = []\n for i in self.param_index:\n p = []\n for l in reversed(self._params_temp):\n i, s = divmod(int(i), len(l))\n p.insert(0, l[s])\n final_grid.append(tuple(p))\n\n out = np.array(final_grid, dtype='object')\n\n return out\n\n def _check_time_limit(self):\n\n if self.time_limit is None:\n return True\n\n stop = datetime.strptime(self.time_limit, \"%Y-%m-%d %H:%M\")\n\n return stop > datetime.now()\n\n def round_parameters(self):\n\n # permutations remain in index\n if len(self.param_index) > 0:\n\n # time limit has not been met yet\n if self._check_time_limit():\n self.round_counter += 1\n\n # get current index\n index = self.param_index.pop(0)\n\n # get the values based on the index\n values = self.param_space[index]\n round_parameters = self._round_parameters_todict(values)\n\n # pass the parameters to Scan\n return round_parameters\n\n # the experiment is finished\n return False\n\n def _round_parameters_todict(self, values):\n\n round_parameters = {}\n\n for i, key in enumerate(self.param_keys):\n round_parameters[key] = values[i]\n\n return round_parameters\n\n def _convert_lambda(self, fn):\n\n '''Converts a lambda function into a format\n where parameter labels are changed to the column\n indexes in parameter space.'''\n\n # get the source code for the lambda function\n fn_string = inspect.getsource(fn)\n fn_string = fn_string.replace('\"', '\\'')\n\n # look for column/label names\n for i, name in enumerate(self.param_keys):\n index = ':,' + str(i)\n fn_string = fn_string.replace(name, index)\n\n # cleanup the string\n fn_string = fn_string.split('lambda')[1]\n fn_string = fn_string.replace('[\\':', '[:')\n fn_string = fn_string.replace('\\']', ']')\n fn_string = 'lambda ' + fn_string\n\n # pass it back as a function\n return eval(fn_string)\n\n def remove_is_not(self, label, value):\n\n '''Removes baesd on exact match but reversed'''\n\n col = self.param_keys.index(label)\n drop = np.where(self.param_space[:, col] != value)[0].tolist()\n self.param_index = [x for x in self.param_index if x not in drop]\n\n def remove_is(self, label, value):\n\n '''Removes based on exact match'''\n\n col = self.param_keys.index(label)\n drop = np.where(self.param_space[:, col] == value)[0].tolist()\n self.param_index = [x for x in self.param_index if x not in drop]\n\n def remove_ge(self, label, value):\n\n '''Removes based on greater-or-equal'''\n\n col = self.param_keys.index(label)\n drop = np.where(self.param_space[:, col] >= value)[0].tolist()\n self.param_index = [x for x in self.param_index if x not in drop]\n\n def remove_le(self, label, value):\n\n '''Removes based on lesser-or-equal'''\n\n col = self.param_keys.index(label)\n drop = np.where(self.param_space[:, col] <= value)[0].tolist()\n self.param_index = [x for x in self.param_index if x not in drop]\n\n def remove_lambda(self, function):\n\n '''Removes based on a lambda function'''\n\n index = self._convert_lambda(function)(self.param_space)\n self.param_space = self.param_space[index]\n self.param_index = list(range(len(self.param_space)))\n" ]
[ [ "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.Dense" ], [ "numpy.where", "numpy.array", "numpy.arange", "numpy.unique" ] ]
rochesterxugroup/csc_249_final_proj_a2d_det
[ "0f742981699352181e94a001f3d6f4a5b1824a54" ]
[ "mask_rcnn/utils/resnet_weights_helper.py" ]
[ "\"\"\"\nHelper functions for converting resnet pretrained weights from other formats\n\"\"\"\nimport os\nimport pickle\n\nimport torch\n\nimport mask_rcnn.nn as mynn\nimport mask_rcnn.utils.detectron_weight_helper as dwh\nfrom mask_rcnn.core.config import cfg\n\n\ndef load_pretrained_imagenet_weights(model):\n \"\"\"Load pretrained weights\n Args:\n num_layers: 50 for res50 and so on.\n model: the generalized rcnnn module\n \"\"\"\n _, ext = os.path.splitext(cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS)\n if ext == '.pkl':\n with open(cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS, 'rb') as fp:\n src_blobs = pickle.load(fp, encoding='latin1')\n if 'blobs' in src_blobs:\n src_blobs = src_blobs['blobs']\n pretrianed_state_dict = src_blobs\n else:\n weights_file = os.path.join(cfg.ROOT_DIR, cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS)\n pretrianed_state_dict = convert_state_dict(torch.load(weights_file))\n\n # Convert batchnorm weights\n for name, mod in model.named_modules():\n if isinstance(mod, mynn.AffineChannel2d):\n if cfg.FPN.FPN_ON:\n pretrianed_name = name.split('.', 2)[-1]\n else:\n pretrianed_name = name.split('.', 1)[-1]\n bn_mean = pretrianed_state_dict[pretrianed_name + '.running_mean']\n bn_var = pretrianed_state_dict[pretrianed_name + '.running_var']\n scale = pretrianed_state_dict[pretrianed_name + '.weight']\n bias = pretrianed_state_dict[pretrianed_name + '.bias']\n std = torch.sqrt(bn_var + 1e-5)\n new_scale = scale / std\n new_bias = bias - bn_mean * scale / std\n pretrianed_state_dict[pretrianed_name + '.weight'] = new_scale\n pretrianed_state_dict[pretrianed_name + '.bias'] = new_bias\n\n model_state_dict = model.state_dict()\n\n pattern = dwh.resnet_weights_name_pattern()\n\n name_mapping, _ = model.detectron_weight_mapping\n\n for k, v in name_mapping.items():\n if isinstance(v, str): # maybe a str, None or True\n if pattern.match(v):\n if cfg.FPN.FPN_ON:\n pretrianed_key = k.split('.', 2)[-1]\n else:\n pretrianed_key = k.split('.', 1)[-1]\n if ext == '.pkl':\n model_state_dict[k].copy_(torch.Tensor(pretrianed_state_dict[v]))\n else:\n model_state_dict[k].copy_(pretrianed_state_dict[pretrianed_key])\n\n\ndef convert_state_dict(src_dict):\n \"\"\"Return the correct mapping of tensor name and value\n\n Mapping from the names of torchvision model to our resnet conv_body and box_head.\n \"\"\"\n dst_dict = {}\n for k, v in src_dict.items():\n toks = k.split('.')\n if k.startswith('layer'):\n assert len(toks[0]) == 6\n res_id = int(toks[0][5]) + 1\n name = '.'.join(['res%d' % res_id] + toks[1:])\n dst_dict[name] = v\n elif k.startswith('fc'):\n continue\n else:\n name = '.'.join(['res1'] + toks)\n dst_dict[name] = v\n return dst_dict\n" ]
[ [ "torch.sqrt", "torch.Tensor", "torch.load" ] ]
EiffL/NaMaster
[ "41cc7839083511fe6be2eb20e93b8916c5c1f1f3" ]
[ "test/testutils.py" ]
[ "import numpy as np\n\ndef normdiff(v1,v2) :\n return np.amax(np.fabs(v1-v2))\n\ndef read_flat_map(filename,i_map=0) :\n \"\"\"\n Reads a flat-sky map and the details of its pixelization scheme.\n The latter are returned as a FlatMapInfo object.\n i_map : map to read. If -1, all maps will be read.\n \"\"\"\n from astropy.io import fits\n from astropy.wcs import WCS\n \n hdul=fits.open(filename)\n w=WCS(hdul[0].header)\n\n maps=hdul[i_map].data\n ny,nx=maps.shape\n\n return w,maps\n" ]
[ [ "numpy.fabs" ] ]
Widbskdh/Opai
[ "f04c5281783f54c46be060428d9e58f3a6e15bb4" ]
[ "utils/logs/hooks_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for hooks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nimport tensorflow as tf # pylint: disable=g-bad-import-order\n\nfrom utils.logs import hooks\nfrom utils.testing import mock_lib\n\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)\n\n\nclass ExamplesPerSecondHookTest(tf.test.TestCase):\n \"\"\"Tests for the ExamplesPerSecondHook.\n\n In the test, we explicitly run global_step tensor after train_op in order to\n keep the global_step value and the train_op (which increase the glboal_step\n by 1) consistent. This is to correct the discrepancies in reported global_step\n value when running on GPUs.\n \"\"\"\n\n def setUp(self):\n \"\"\"Mock out logging calls to verify if correct info is being monitored.\"\"\"\n self._logger = mock_lib.MockBenchmarkLogger()\n\n self.graph = tf.Graph()\n with self.graph.as_default():\n tf.compat.v1.train.create_global_step()\n self.train_op = tf.compat.v1.assign_add(\n tf.compat.v1.train.get_global_step(), 1)\n self.global_step = tf.compat.v1.train.get_global_step()\n\n def test_raise_in_both_secs_and_steps(self):\n with self.assertRaises(ValueError):\n hooks.ExamplesPerSecondHook(\n batch_size=256,\n every_n_steps=10,\n every_n_secs=20,\n metric_logger=self._logger)\n\n def test_raise_in_none_secs_and_steps(self):\n with self.assertRaises(ValueError):\n hooks.ExamplesPerSecondHook(\n batch_size=256,\n every_n_steps=None,\n every_n_secs=None,\n metric_logger=self._logger)\n\n def _validate_log_every_n_steps(self, every_n_steps, warm_steps):\n hook = hooks.ExamplesPerSecondHook(\n batch_size=256,\n every_n_steps=every_n_steps,\n warm_steps=warm_steps,\n metric_logger=self._logger)\n\n with tf.compat.v1.train.MonitoredSession(\n tf.compat.v1.train.ChiefSessionCreator(), [hook]) as mon_sess:\n for _ in range(every_n_steps):\n # Explicitly run global_step after train_op to get the accurate\n # global_step value\n mon_sess.run(self.train_op)\n mon_sess.run(self.global_step)\n # Nothing should be in the list yet\n self.assertFalse(self._logger.logged_metric)\n\n mon_sess.run(self.train_op)\n global_step_val = mon_sess.run(self.global_step)\n\n if global_step_val > warm_steps:\n self._assert_metrics()\n else:\n # Nothing should be in the list yet\n self.assertFalse(self._logger.logged_metric)\n\n # Add additional run to verify proper reset when called multiple times.\n prev_log_len = len(self._logger.logged_metric)\n mon_sess.run(self.train_op)\n global_step_val = mon_sess.run(self.global_step)\n\n if every_n_steps == 1 and global_step_val > warm_steps:\n # Each time, we log two additional metrics. Did exactly 2 get added?\n self.assertEqual(len(self._logger.logged_metric), prev_log_len + 2)\n else:\n # No change in the size of the metric list.\n self.assertEqual(len(self._logger.logged_metric), prev_log_len)\n\n def test_examples_per_sec_every_1_steps(self):\n with self.graph.as_default():\n self._validate_log_every_n_steps(1, 0)\n\n def test_examples_per_sec_every_5_steps(self):\n with self.graph.as_default():\n self._validate_log_every_n_steps(5, 0)\n\n def test_examples_per_sec_every_1_steps_with_warm_steps(self):\n with self.graph.as_default():\n self._validate_log_every_n_steps(1, 10)\n\n def test_examples_per_sec_every_5_steps_with_warm_steps(self):\n with self.graph.as_default():\n self._validate_log_every_n_steps(5, 10)\n\n def _validate_log_every_n_secs(self, every_n_secs):\n hook = hooks.ExamplesPerSecondHook(\n batch_size=256,\n every_n_steps=None,\n every_n_secs=every_n_secs,\n metric_logger=self._logger)\n\n with tf.compat.v1.train.MonitoredSession(\n tf.compat.v1.train.ChiefSessionCreator(), [hook]) as mon_sess:\n # Explicitly run global_step after train_op to get the accurate\n # global_step value\n mon_sess.run(self.train_op)\n mon_sess.run(self.global_step)\n # Nothing should be in the list yet\n self.assertFalse(self._logger.logged_metric)\n time.sleep(every_n_secs)\n\n mon_sess.run(self.train_op)\n mon_sess.run(self.global_step)\n self._assert_metrics()\n\n def test_examples_per_sec_every_1_secs(self):\n with self.graph.as_default():\n self._validate_log_every_n_secs(1)\n\n def test_examples_per_sec_every_5_secs(self):\n with self.graph.as_default():\n self._validate_log_every_n_secs(5)\n\n def _assert_metrics(self):\n metrics = self._logger.logged_metric\n self.assertEqual(metrics[-2][\"name\"], \"average_examples_per_sec\")\n self.assertEqual(metrics[-1][\"name\"], \"current_examples_per_sec\")\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.compat.v1.train.create_global_step", "tensorflow.Graph", "tensorflow.compat.v1.train.get_global_step", "tensorflow.compat.v1.logging.set_verbosity", "tensorflow.test.main", "tensorflow.compat.v1.train.ChiefSessionCreator" ] ]
jorana/verne
[ "df9ed569fe6716db74e3e594b989e5c9e9c8983c" ]
[ "verne/CalcLimits.py" ]
[ "import numpy as np\nfrom scipy.interpolate import interp1d\n\nimport argparse\n\n# Parse the arguments!\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='...')\n parser.add_argument('-exp', '--exp',\n help='Calculate limits for which experiment? \"CDMS\" or \"nucleus\"', type=str,\n required=True)\n args = parser.parse_args()\n exp = args.exp\n\n if (exp == \"CDMS\"):\n loc = \"SUF\"\n N_lim = 34.96 # Upper limit for 27 observed events\n lmlist = [\"1.0\", \"1.5\", \"2.0\", \"3.0\", \"4.0\", \"5.0\"]\n if (exp == \"nucleus\"):\n loc = \"MPI\"\n N_lim = 541.204 # Upper limit for 511 observed events\n lmlist = [\"0.0\", \"1.0\", \"1.5\", \"2.0\", \"3.0\", \"4.0\", \"5.0\"]\n\n masses = np.zeros(len(lmlist))\n limits = np.zeros(len(lmlist))\n\n # Loop over the masses, calculating the upper limit\n # From the Nevent files\n for i, lmstring in enumerate(lmlist):\n lsig, Ne = np.loadtxt(\"../results/Nevents/N_\" + loc + \"_lmx\" + lmstring + \".txt\",\n unpack=True)\n Ne = Ne[lsig.argsort()]\n lsig = np.sort(lsig)\n\n Ne += (-lsig / 1e20)\n\n # Generate an interpolating function\n lsig_interp = interp1d(np.log10(Ne), lsig)\n\n lsig_limit = lsig_interp(np.log10(N_lim))\n\n print(lmstring, lsig_limit)\n\n masses[i] = 10 ** float(lmstring)\n limits[i] = 10 ** lsig_limit\n\n # Add on the final datapoint for very large masses\n if (exp == \"CDMS\"):\n m_max = 1e15\n if (exp == \"nucleus\"):\n m_max = 1e8\n\n limits = np.append(limits, limits[-1] * m_max / masses[-1])\n masses = np.append(masses, m_max)\n\n np.savetxt(\"../results/constraints/\" + exp + \"_thiswork.txt\", zip(masses, limits))\n" ]
[ [ "numpy.log10", "numpy.loadtxt", "numpy.sort", "numpy.append" ] ]
AmberCrafter/pythonlib_meteo
[ "78593a85b09e7afd07bc1cc45c31566efcb14c60" ]
[ "basic.py" ]
[ "# =================================================================== #\r\n# Author: AmberCraft\r\n# License: MIT\r\n# Version: 1.0.0\r\n# Type: toolbox\r\n# Publish date: 2020-09-06\r\n# =================================================================== #\r\n# <ChangeLog>\r\n# <Version: 1.0.0>\r\n# This is the first public version, maybe has some bug. If yuo find any\r\n# bug, please let me known to fix it.\r\n# =================================================================== #\r\n# Declear Parameter/Variable\r\n# variableName: type <unit> [option] -- description and note\r\n# ------------------------------------------------------------------- #\r\n# @basic variable\r\n# longitude: float <deg>\r\n# latitude: float <deg>\r\n# altitude: float <deg>\r\n# temperature: float <degC>\r\n# dTemperature: float <degC> -- dry temperature\r\n# wTemperature: float <degC> -- wet temperature\r\n# rHumidity: float <%> -- relative humidity\r\n# pressure: float <hPa>\r\n# windSpeed: float <m/s>\r\n# windDir: float <deg>\r\n# rainfall: float <mm>\r\n# ------------------------------------------------------------------- #\r\n# @derive variable\r\n# eTemperature: float <K> -- equivalent temperature\r\n# epTemperature: float <K> -- equivalent potential temperature\r\n# dewTemperature: float <degC> -- dewpoint temperature\r\n# mixRatio: float <1> -- mixing ratio\r\n# pTemperature: float <K> -- potential temperature\r\n# spHumidity: float <g/kg> -- specific humidity\r\n# vapor: float <hPa> -- water vapor pressure\r\n# vTemperature: float <K> -- virtural temperature\r\n# vpTemperature: float <K> -- virtural potential temperature\r\n# ------------------------------------------------------------------- #\r\n# @other parameter\r\n# AGL: float <m> -- above ground level height\r\n# density_air <kg/m3>\r\n# spLatent: float <kJ/kg> -- specific latent heat\r\n# triTemperature_water: float <K> -- water triple point temperature\r\n# =================================================================== #\r\n# Declear function\r\n# functionName -- discription and not\r\n# ------------------------------------------------------------------- #\r\n# dewpoint_temperature\r\n# mixing_ratio\r\n# equivalent_potential_temperature\r\n# exp_air_density\r\n# exp_gravity\r\n# equivalent_temperature\r\n# exp_specific_latent_evap\r\n# moist_static_energy\r\n# potential_temperature\r\n# saturation_vapor_pressure\r\n# specific_humidity\r\n# vapor_pressure\r\n# virtural_potential_temperature\r\n# virtural_temperature\r\n# =================================================================== #\r\n# Function description formation\r\n'''\r\n# Input Parameter\r\n@type variable: type <unit> [option] -- discription and note\r\n# Option Input Parameter\r\n@type variable -> {\r\n @type variable: type <unit> [option] -- discription and not\r\n}\r\n# Output Parameter\r\n@type variable: type <unit> [option] -- discription and not\r\n@result\r\n\r\n# Function\r\n# Discription\r\n# Ref and Src\r\n'''\r\n# @type = [\r\n# basi = basic variable, \r\n# deri = derive variable,\r\n# disc = discription parameter,\r\n# para = other parameter\r\n# ]\r\n# =================================================================== #\r\n\r\ndef _ndarray_float(x):\r\n import numpy as np\r\n return np.array(x,dtype=float)\r\n\r\ndef dewpoint_temperature(temperautre,rHumidity):\r\n '''\r\n # Input Parameter:\r\n @basi temperature: float <degC>\r\n @basi rHumidity: float <%>\r\n\r\n # Output Parameter:\r\n @deri dewTemperature: float <degC>\r\n\r\n # Discription\r\n 1. effective range\r\n 0℃ < T < 60℃\r\n 1% < RH < 100%\r\n 0℃ < T_{d} < 50℃\r\n 2. depend on numpy to calculate array\r\n\r\n # Src: wiki\r\n '''\r\n import numpy as np\r\n temperautre=_ndarray_float(temperautre)\r\n rHumidity=_ndarray_float(rHumidity)\r\n\r\n a=17.27\r\n b=237.7\r\n gamma=a*temperautre/(b+temperautre)+np.log(rHumidity/100)\r\n return b*gamma/(a-gamma)\r\n\r\ndef saturation_vapor_pressure(temperature,phase='water'):\r\n '''\r\n # Input Parameter:\r\n @basi temperature: float <degC>\r\n @disc phase: str <1> ['water','ice'] -- defined what the water phase at 0℃\r\n\r\n # Output Parameter:\r\n @deri satVapor: float <hPa> -- saturation vapor pressure\r\n\r\n # Discription\r\n 1. effective range\r\n -80℃ < T < 50℃\r\n 2. depend on numpy to calculate array\r\n\r\n # Formula\r\n Arden Buck equations -> es = 6.1121*exp((18.678-(temperature/234.5))*(temperature/(257.14+temperature)))\r\n Goff Gratch equations -> # Not implement\r\n phase='water' -> es = 10**(\r\n C1*(1-triTemperature_water/(temperature+273.15)) +\r\n C2*log((temperature+273.15)/triTemperature_water) +\r\n C3*10**(-4)*(1-10**(C4*(temperature/triTemperature_water)**(-1))) +\r\n C5*10**(-3)*(10**(C6*(1-triTemperature_water/temperature))-1) +\r\n C7)\r\n C1 = 10.79586\r\n C2 = -5.02808\r\n C3 = 1.50474\r\n C4 = -8.20602\r\n C5 = 0.42873\r\n C6 = 4.76955\r\n C7 = 0.7861183\r\n phase='ice' -> es = 10**(\r\n C1*(triTemperature_water/temperature-1) +\r\n C2*log(triTemperature_water/temperature) +\r\n C3(1-temperature/triTemperature_water) +\r\n C4\r\n )\r\n C1 = -9.906936\r\n C2 = -3.56654\r\n C3 = 0.876817\r\n C4 = 0.7861183\r\n triTemperature_water = 273.16 <K>\r\n # Ref: \r\n Arden Buck equations\r\n Goff Gratch equation(1946)\r\n '''\r\n import numpy as np\r\n def liquid(temperature):\r\n return 6.1121*np.exp((18.678-(temperature/234.5))*(temperature/(257.14+temperature)))\r\n def solid(temperature):\r\n # incorrect\r\n return 6.1121*np.exp((18.678-(temperature/234.5))*(temperature/(257.14+temperature)))\r\n \r\n phase_list={\r\n 'solid':0,\r\n 's':0,\r\n 'ice':0,\r\n\r\n 'liquid':1,\r\n 'l':1,\r\n 'water':1\r\n }\r\n \r\n temperature=_ndarray_float(temperature)\r\n\r\n if phase_list[phase]==1:\r\n Tl=np.array(temperature,dtype=float)\r\n Tl[Tl<0]=np.nan\r\n Ts=np.array(temperature,dtype=float)\r\n Ts[Ts>=0]=np.nan\r\n else:\r\n Tl=np.array(temperature,dtype=float)\r\n Tl[Tl<=0]=np.nan\r\n Ts=np.array(temperature,dtype=float)\r\n Ts[Ts>0]=np.nan\r\n Tl=liquid(Tl); Ts=solid(Ts)\r\n return np.nansum([Tl,Ts],axis=0)\r\n\r\ndef vapor_pressure(temperature=None,rHumidity=None,method='rh',dTemperature=None,wTemperature=None,Pressure=None,*args,**keywords):\r\n '''\r\n # Input Parameter\r\n @basi temperature: float <degC>\r\n @basi rHumidity: float <%> -- relative humidity\r\n @disc method: str <1> [default='rh', 'dw'] -- defined the calculation method, dw mean dry and wet bulb temperature measurement method\r\n {\r\n if method=='dw' -> keywords:{\r\n @basi dTemperature: float <degC>\r\n @basi wTemperature: float <degC>\r\n @basi pressure: float <hPa>\r\n }\r\n }\r\n @disc phase: str <1> [default='water','ice'] -- defined what the water phase at 0℃\r\n\r\n # Output Parameter\r\n @deri vapor pressure: float <hPa>\r\n\r\n # Formula\r\n method='rh' ->\r\n e=es*rh\r\n method='wd' ->\r\n phase='water' -> e=es-0.5*(dTemperature-wTemperature)*Pressure/1013.25 (es is water saturation pressure)\r\n phase='ice' -> e=es-0.44*(dTemperature-wTemperature)*Pressure/1013.25 (es is ice saturation pressure)\r\n method='Ferrel' # Not implement\r\n phase='water' -> e=es+C1*Pressure*(dTemperature-wTemperature)*(1+C2*(wTemperature+273.15))\r\n C1 = -6.6*10**(-4)\r\n C2 = 0.00115\r\n phase='ice' -> e=es+C1*Pressure*(dTemperature-wTemperature)*(1+C2*(wTemperature+273.15))\r\n C1 = -5.973*10**(-4)\r\n C2 = 0.00115\r\n\r\n # Src: https://zhidao.baidu.com/question/373004649658108244.html\r\n '''\r\n if method in ['rh']:\r\n return saturation_vapor_pressure(temperature,*args,**keywords)*rHumidity/100\r\n if method in ['dw','wd']:\r\n if 'phase' in keywords.keys():\r\n if keywords['phase']=='water': return saturation_vapor_pressure(dtemperature,*args,**keywords)-0.5*(dTemperature-wTemperature)*Pressure/1013.25\r\n if keywords['phase']=='ice': return saturation_vapor_pressure(dtemperature,*args,**keywords)-0.44*(dTemperature-wTemperature)*Pressure/1013.25\r\n else:\r\n return saturation_vapor_pressure(dTemperature,*args,**keywords)-0.5*(dTemperature-wTemperature)*Pressure/1013.25\r\n \r\n\r\ndef mixing_ratio(pressure,vapor=None,*args,**keywords):\r\n '''\r\n # Input Parameter:\r\n @basi pressure: float <hPa>\r\n @deri vapor: float <hPa>\r\n\r\n #Option Input Parameter\r\n @deri vapor->keywords:{\r\n @basi temperature: float <degC>\r\n @basi rHumidity: float <%>\r\n @basi phase: str [default='water']\r\n }\r\n \r\n # Output Parameter:\r\n @deri mixRatio: float <1> -- mixing ratio\r\n\r\n # Function\r\n mixRatio = 0.622 * (vapor/(pressure-vapor))\r\n\r\n # Src: AMS Glossary\r\n '''\r\n if vapor==None: vapor=vapor_pressure(**keywords)\r\n return 0.622*(vapor/(pressure-vapor))\r\n\r\ndef specific_humidity(pressure,vapor=None,*args,**keywords):\r\n '''\r\n # Input Parameter:\r\n @basi pressure: float <hPa>\r\n @deri vapor: float <hPa>\r\n \r\n # Option Input Parameter\r\n @deri vapor -> {\r\n @basi temperature: float\r\n @basi rHumidity: float <%>\r\n @disc phase: str <1> [defaul='water','ice'] -- defined what the water phase at 0℃\r\n }\r\n \r\n # output:\r\n @deri specific humidity: float <g/kg>\r\n \r\n depend on numpy to calculate array\r\n src: wiki\r\n '''\r\n # return 0.622*(vapor/pressure)\r\n if vapor==None: vapor=vapor_pressure(*args,**keywords)\r\n q = mixing_ratio(pressure,vapor)\r\n return q/(1+q)*1000\r\n\r\ndef potential_temperature(temperature,pressure):\r\n '''\r\n # Input Parameter\r\n @basi temperature: float <K>\r\n @basi pressure: float <hPa>\r\n\r\n # Output Parameter\r\n @deri pTemperature: float <K> -- potential temperature\r\n\r\n #Formula\r\n theta = T*(P0/P)**(R/Cp)\r\n P0=1000 <hPa>\r\n '''\r\n R =0.287 #unit: kJ/(kg*K)\r\n Cp=1.005 #unit: kJ/(kg*K)\r\n P0=1000 #unit: hPa\r\n return temperature*(P0/pressure)**(R/Cp)\r\n\r\ndef virtural_temperature(temperature,pressure,vapor=None,*args,**keywords):\r\n '''\r\n # Input Parameter\r\n @basi temperature: float <degC>\r\n @basi pressure: float <hPa>\r\n @deri vapor: float <hPa> -- vapor pressure\r\n\r\n # Option Input Parameter\r\n @deri vapor -> {\r\n *@basi temperature: float <degC>\r\n @basi rHumidity: float <%>\r\n @disc phase: str <1> [defaul='water','ice'] -- defined what the water phase at 0℃\r\n }\r\n\r\n # Ouput Parameter\r\n @deri vTemperature: float <K> -- virture temperature\r\n\r\n #Formula\r\n Tv = T/(1-(e/p)*(1-epsilon))\r\n e: vapor pressure\r\n epsilon=Rd/Rv=Mv/Md~=0.622\r\n\r\n # Ref: https://en.wikipedia.org/wiki/Virtual_temperature\r\n '''\r\n epsilon=0.622\r\n if vapor==None: vapor=vapor_pressure(temperature,**keywords)\r\n temperature = temperature+273.15\r\n return temperature/(1-(vapor/pressure)*(1-epsilon))\r\n\r\ndef virtural_potential_temperature(*args,**keywords):\r\n '''\r\n # Input Parameter\r\n @basi temperature: float <degC>\r\n @basi pressure: float <hPa>\r\n @deri vapor: float <hPa> -- vapor pressure\r\n\r\n # Option Input Parameter\r\n @deri vapor -> {\r\n @basi temperature: float <degC>\r\n @basi rHumidity: float <%>\r\n }\r\n\r\n # Ouput Parameter\r\n @deri vpTemperature: float <K> -- virture potential temperature\r\n\r\n #Formula\r\n theta = Tv*(P0/P)**(R/Cp)\r\n Tv = T/(1-(e/p)*(1-epsilon))\r\n e: vapor pressure\r\n epsilon=Rd/Rv=Mv/Md~=0.622\r\n\r\n # Ref: https://en.wikipedia.org/wiki/Virtual_temperature\r\n '''\r\n return potential_temperature(temperature=virtural_temperature(**keywords),pressure=keywords['pressure'])\r\n\r\ndef exp_specific_latent_evap(temperature,phase='water'):\r\n '''\r\n # Input Parameter\r\n @basi temperature: float <degC>\r\n @desc phase: str <1> [default='water','ice']\r\n\r\n #Output Parameter\r\n @para spLatent: float <kJ/kg> -- specific latent heat\r\n\r\n # Ref: https://en.wikipedia.org/wiki/Latent_heat\r\n '''\r\n if phase=='water':\r\n return (2500.8-2.36*temperature+0.0016*temperature**2-0.00006*temperature**3)\r\n if phase=='ice':\r\n return (2831.1-0.29*temperature-0.004*temperature**2)\r\n\r\ndef exp_gravity(latitude,altitude):\r\n '''\r\n # Input Parameter\r\n @basi latitude: float <deg>\r\n @basi altitude: float <m>\r\n\r\n # Ouput Parameter\r\n @para gravity: float <m/s2>\r\n\r\n # Formula\r\n g ~= g0 * (1 + 0.0052884*(sin(latitude)**2) - 0.0000059*(sin(2*latitude)**2)) - 0.000003086*altitude\r\n g0 ~= 9.78046 <m/s2>\r\n\r\n # Discription\r\n 1. Depend on numpy\r\n\r\n # Ref\r\n https://zh.wikipedia.org/wiki/%E9%87%8D%E5%8A%9B%E5%8A%A0%E9%80%9F%E5%BA%A6\r\n '''\r\n import numpy as np\r\n latitude=np.deg2rad(latitude)\r\n g0 = 9.78046\r\n return g0 * (1 + 0.0052884*(np.sin(latitude)**2) - 0.0000059*(np.sin(2*latitude)**2)) - 0.000003086*altitude\r\n\r\ndef exp_air_density(temperautre,pressure):\r\n '''\r\n # Input Parameter\r\n @basi temperature: float <degC>\r\n @basi pressure: float <hPa>\r\n\r\n # Output Parameter\r\n @para density_air: float <kg/m3>\r\n\r\n # Formula\r\n density_air = 1.293 * (pressure/1033.6) * (273.15/(273.15+temperature))\r\n '''\r\n return 1.293*(pressure/1033.6)*(273.15/(273.15+temperautre))\r\n\r\ndef moist_static_energy(temperature,pressure,AGL,mixRatio=None,*args,**keywords):\r\n '''\r\n # Input Parameter\r\n @basi temperature: float <degC>\r\n @basi pressure: float <hPa>\r\n @para AGL: float <m> -- above ground level height\r\n @deri mixRatio: float <1>\r\n\r\n # Option Input Parameter\r\n @deri mixRatio -> {\r\n @basi temperature: float <degC>\r\n @basi rHumidity: float <%>\r\n @basi pressure: float <hPa>\r\n }\r\n\r\n # Ouput Parameter\r\n @result moist static energy: float <kJ/kg>\r\n\r\n # Formula\r\n S = Cp*T + g*z + Lv*q\r\n z: above ground level heigh (AGL) <m>\r\n q: spHumidity <g/kg>\r\n Lv: latent heat of vaporation <kJ/g>\r\n\r\n # Ref\r\n https://en.wikipedia.org/wiki/Moist_static_energy\r\n '''\r\n rho = lambda Tair, Press: 1.293*(Press/1033.6)*(273.15/(273.15+Tair))\r\n Cp=1.005 #unit: kJ/(kg*K)\r\n Lv=exp_specific_latent_evap(temperature)/1000\r\n g=exp_gravity(24.968498,132)\r\n if mixRatio==None: mixRatio=mixing_ratio(temperature=temperature,pressure=pressure,*args,**keywords)\r\n return Cp*temperature + g*AGL/rho(temperature,pressure) + Lv*mixRatio\r\n\r\ndef equivalent_temperature(temperature,rHumidity,pressure):\r\n '''\r\n # Input Parameter\r\n @basi temperature: float <degC>\r\n @basi rHumidity: float <%>\r\n @basi pressure: float <hPa>\r\n\r\n # Ouput Parameter\r\n @deri relative temperature: float <K>\r\n\r\n # Formula\r\n Te = T*exp((Lv*mixingRatio)/(Cp*Td))\r\n Lv: latent heat of vaporation <kJ/g>\r\n Td: dewpoint temperature\r\n\r\n # Discription\r\n 1. Depend on numpy\r\n \r\n # Ref\r\n https://zh.wikipedia.org/wiki/%E7%9B%B8%E7%95%B6%E4%BD%8D%E6%BA%AB\r\n '''\r\n import numpy as np\r\n Cp=1.005 #unit: kJ/(kg*K)\r\n Lv=exp_specific_latent_evap(temperature)/1000\r\n dewTemperature = dewpoint_temperature(temperature,rHumidity)\r\n mixRatio=mixing_ratio(temperature=temperature,rHumidity=rHumidity,pressure=pressure)\r\n temperature+=273.15\r\n return temperature*np.exp((Lv*mixRatio)/(Cp*dewTemperature))\r\n\r\ndef equivalent_potential_temperature(*args,**keywords):\r\n '''\r\n # Input Parameter\r\n @basi temperature: float <degC>\r\n @basi rHumidity: float <%>\r\n @basi pressure: float <hPa>\r\n\r\n # Ouput Parameter\r\n @deri epTemperature: float <K> -- equivalent potential temperature\r\n\r\n # Formula\r\n theta_e = Te*(P0/P)**(R/Cp)\r\n\r\n # Ref\r\n https://zh.wikipedia.org/wiki/%E7%9B%B8%E7%95%B6%E4%BD%8D%E6%BA%AB\r\n '''\r\n return potential_temperature(temperature=equivalent_temperature(**keywords),pressure=keywords['pressure'])\r\n\r\nif __name__ == \"__main__\":\r\n Temperature = 25\r\n \r\n\r\n satPressure = saturation_vapor_pressure(25)\r\n vapPressure = vapor_pressure(dTemperature=25, wTemperature=20, method='dw', Pressure=1000)\r\n\r\n print('stop')\r\n" ]
[ [ "numpy.array", "numpy.sin", "numpy.log", "numpy.nansum", "numpy.exp", "numpy.deg2rad" ] ]
depp/ultrafxr
[ "e52408beb84de32fc3d439c3827f59e32260fb7d" ]
[ "math/coeffs/calc.py" ]
[ "import argparse\nimport math\nimport numpy\nimport numpy.polynomial.polynomial as polynomial\nimport pathlib\nimport sys\n\nfrom typing import List, TextIO, Tuple\n\nclass SolverError(Exception):\n pass\n\ndef chebyshev_nodes(n: int) -> numpy.ndarray:\n \"\"\"Generate N Chebyshev nodes in the range [-1,1].\"\"\"\n d = math.pi * 0.5\n return numpy.sin(numpy.linspace(-d, d, 2 * n + 1)[1::2])\n\ndef rescale(x, xrange):\n \"\"\"Rescale the x array so it covers xrange exactly.\"\"\"\n x0, x1 = xrange\n xmin = numpy.min(x)\n xmax = numpy.max(x)\n xspan = xmax - xmin\n return (x - xmin) * (x1 / xspan) + (xmax - x) * (x0 / xspan)\n\nFUNCTIONS = {}\ndef function(*, name, min_order):\n def wrap(f):\n assert name not in FUNCTIONS\n FUNCTIONS[name] = (f, min_order)\n return f\n return wrap\n\n@function(name='exp2', min_order=2)\ndef exp2_coeffs(order: int) -> numpy.ndarray:\n \"\"\"Coefficients for 2^x on (-0.5, 0.5).\n \n Coefficients are chosen to minimize maximum equivalent input error.\n \"\"\"\n xrange = -0.5, 0.5\n x0, x1 = xrange\n # Remez algorithm, adapted to minimize relative error.\n # Signs: alternating +1, -1\n signs = numpy.zeros((order + 2,))\n signs[0::2] = 1\n signs[1::2] = -1\n # X: initial set of sample points\n # Chebyshev nodes, to avoid Runge's phenomenon\n x = chebyshev_nodes(order + 2)\n x = rescale(x, xrange)\n # x = 0.5 * x + 0.5 * (numpy.power(2, x) - 1)\n\n y = numpy.exp2(x)\n last_rel_err = math.inf\n last_poly_coeffs = None\n for i in range(100):\n # Solve equation: a_j * x_i^j + (-1)^i * E * 2^x_i = 2^x_i\n # This gives us coeffs for polynomial, followed by E\n error_coeffs = signs * y\n lin_coeffs = numpy.append(\n numpy.power(x[:, None], numpy.arange(0, order + 1)[None, :]),\n error_coeffs.reshape((order + 2, 1)),\n axis=1,\n )\n poly_coeffs = numpy.linalg.solve(lin_coeffs, y)\n # The [:-1] strips off E, and gives us just the coeffs.\n poly_coeffs = poly_coeffs[:-1]\n\n # Find extrema of (p(x) - 2^x) / 2^x\n # Which are extrema of p(x) 2^-x - 1\n # Which we find by solving p'(x) 2^-x - log 2 2^-x p(x) = 0\n # Which has the same roots as log2 p(x) - p'(x)\n rel_coeffs = numpy.log(2) * poly_coeffs\n rel_coeffs[:-1] -= numpy.arange(1, order + 1) * poly_coeffs[1:]\n roots = numpy.roots(rel_coeffs[::-1])\n if numpy.any(numpy.iscomplex(roots)):\n raise SolverError('Roots are complex')\n roots.sort()\n if numpy.min(roots) <= x0 or x1 <= numpy.max(roots):\n raise SolverError('Roots are too large')\n x[0] = x0\n x[1 : -1] = roots\n x[-1] = x1\n\n # Calculate maximum relative error\n y = numpy.exp2(x)\n rel_err = numpy.max(\n numpy.abs((polynomial.Polynomial(poly_coeffs)(x) - y) / y))\n if not math.isinf(last_rel_err):\n improvement = (last_rel_err - rel_err) / last_rel_err\n if improvement <= 0:\n rel_err, poly_coeffs = last_rel_err, last_poly_coeffs\n break\n elif improvement < 1e-6:\n break\n last_rel_err = rel_err\n last_poly_coeffs = poly_coeffs\n\n return poly_coeffs\n\n@function(name='sin1_smooth', min_order=1)\ndef sin1_smooth_coeffs(order: int) -> numpy.ndarray:\n \"\"\"Coefficients for sin(2 pi x) on (-0.25, 0.25).\n\n Coefficients are chosen to make higher order derivatives smooth. Only\n odd-numbered coefficients are included.\n \"\"\"\n # We solve for an odd polynomial p(x) where the odd derivatives are 0 at\n # x=1, and then fix p(1) = 1.\n mat_coeffs = numpy.zeros((order, order))\n vec_coeffs = numpy.zeros((order))\n poly = numpy.ones((order,))\n powers = numpy.arange(order) * 2 + 1\n for n in range(order - 1):\n poly *= powers\n powers -= 1\n # 2n+1-th derivative of f is 0 at x=1\n mat_coeffs[n] = poly\n poly *= powers\n powers -= 1\n # f(1) = 1\n mat_coeffs[order - 1] = 1\n vec_coeffs[order - 1] = 1\n poly_coeffs = numpy.linalg.solve(mat_coeffs, vec_coeffs)\n # Above coefficients are for sin(pi x / 2), rescale for sin(2 pi x).\n poly_coeffs *= 4 ** numpy.arange(1, 2 * order + 1, 2)\n return poly_coeffs\n\n@function(name='sin1_l1', min_order=2)\ndef sin1_l1_coeffs(order: int) -> numpy.ndarray:\n \"\"\"Coefficients for sin(2 pi x) on (0, 0.25).\n\n Constant coefficient is chosen to be zero, and omitted from result. Maximum\n error is minimized.\n \"\"\"\n # We create for a polynomial for a quarter wave of a sine.\n # Remez algorithm.\n # Signs: alternating +1, -1\n signs = numpy.zeros((order + 2,))\n signs[0::2] = 1\n signs[1::2] = -1\n # Fix f(0) = 0\n signs[0] = 0\n # X: initial set of sample points\n # Chebyshev nodes, to avoid Runge's phenomenon\n x = chebyshev_nodes(order + 2)\n x = rescale(x, (0.0, 0.25))\n\n tau = 2 * numpy.pi\n last_error = math.inf\n last_poly_coeffs = None\n for _ in range(100):\n # Solve for polynomial coefficients.\n lin_coeffs = numpy.append(\n numpy.power(x[:, None], numpy.arange(0, order + 1)[None, :]),\n signs[:,None],\n axis=1,\n )\n poly_coeffs = numpy.linalg.solve(lin_coeffs, numpy.sin(tau * x))[:-1]\n poly_coeffs[0] = 0 # Should be 0 anyway.\n\n # Find X values of maximum error.\n # Which are solutions to d/dx p(x) - 2 pi cos(2 pi x) = 0\n extrema = x[1:-1].copy()\n dpoly_coeffs = poly_coeffs[1:] * numpy.arange(1, order + 1)\n ddpoly_coeffs = dpoly_coeffs[1:] * numpy.arange(1, order)\n for _ in range(10):\n # Newton's method: x <- x - f(x) / f'(x)\n powers = numpy.power(extrema[:, None],\n numpy.arange(0, order)[None, :])\n fx = powers @ dpoly_coeffs - tau * numpy.cos(tau * extrema)\n dfx = (powers[:,:-1] @ ddpoly_coeffs\n + tau * tau * numpy.sin(tau * extrema))\n deltax = fx / dfx\n extrema -= deltax\n maxdelta = numpy.max(numpy.abs(deltax))\n if maxdelta < 1e-10:\n break\n # Use these x values for next iteration.\n x[1:-1] = extrema\n if not numpy.all(x[:-1] < x[1:]):\n raise SolverError('extrema not ascending')\n\n # Calculate maximum error\n powers = numpy.power(extrema[:, None],\n numpy.arange(0, order + 1)[None, :])\n error = numpy.max(numpy.abs(\n powers @ poly_coeffs - numpy.sin(tau * extrema)))\n if not math.isinf(last_error):\n improvement = (last_error - error) / last_error\n if improvement <= 0:\n error, poly_coeffs = last_error, last_poly_coeffs\n break\n elif improvement < 1e-6:\n break\n last_error = error\n last_poly_coeffs = poly_coeffs\n\n return poly_coeffs[1:]\n\ndef write_data(data: List[Tuple[int, numpy.ndarray]], fp: TextIO) -> None:\n for n, coeffs in data:\n cells = [str(n)]\n for coeff in coeffs:\n cells.append(str(coeff))\n fp.write(','.join(cells) + '\\n')\n\ndef main(argv: List[str]) -> None:\n p = argparse.ArgumentParser('calc.py')\n p.add_argument('-o', '--output')\n p.add_argument('-n', '--order', type=int, default=8)\n p.add_argument('function', choices=FUNCTIONS)\n args = p.parse_args(argv)\n\n function, min_order = FUNCTIONS[args.function]\n data = [(n, function(n)) for n in range(min_order, args.order + 1)]\n\n output = args.output\n if output is None:\n output = (pathlib.Path(__file__).resolve().parent\n / (args.function + '.csv'))\n print('Writing', output, file=sys.stderr)\n with output.open('w') as fp:\n write_data(data, fp)\n elif output == '-':\n write_data(data, sys.stdout)\n else:\n print('Writing', output, file=sys.stderr)\n with open(output, 'w') as fp:\n write_data(data, fp)\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n" ]
[ [ "numpy.max", "numpy.iscomplex", "numpy.sin", "numpy.zeros", "numpy.log", "numpy.exp2", "numpy.roots", "numpy.ones", "numpy.min", "numpy.arange", "numpy.linalg.solve", "numpy.abs", "numpy.all", "numpy.cos", "numpy.linspace", "numpy.polynomial.polynomial.Polynomial" ] ]
emarteca/gatsby
[ "144412f9a5a7d94e26407eff450a238e76a6a98a" ]
[ "get_failing_tests.py" ]
[ "import pandas as pd \nimport xml.etree.ElementTree as ET\nimport sys\n\n# before this, process with bash to get just the fails\ndef process_fails_file( filename):\n\tto_ret = pd.read_csv( filename, sep = ' ', header=None)\n\tto_ret = to_ret[to_ret[0] == \"FAIL\"]\n\tto_ret.drop([0], inplace=True, axis=1) # delete the garbage initial columns\n\tto_ret.columns = [\"suite\"]\n\treturn( to_ret)\n\n\ndef get_failing_tests( filename):\n\troot = ET.parse( filename).getroot()\n\t# we already know the shape of the tree so we can just hardcode this\n\tfailing_tests = []\n\tfor ts in root:\n\t\tfor t in ts:\n\t\t\tfor e in t:\n\t\t\t\tif e.tag == \"failure\":\n\t\t\t\t\tfailing_tests += [t.attrib[\"classname\"]]\n\treturn( list(set(failing_tests)))\n\ndef print_DF_to_file( df, filename):\n\tf = open(filename, 'w');\n\tf.write(df.to_csv(index = False, header=False))\n\tf.close()\n\ndef main():\n\tnew_root = \"/home/ellen/Documents/ASJProj/TESTING_reordering/gatsby/\"\n\tfailing_suites = process_fails_file( \"fails.csv\")\n\tprint_DF_to_file( failing_suites.apply(lambda row: new_root + row.suite, axis=1), \"test_list.txt\")\n\tfailing_tests = get_failing_tests(\"junit.xml\")\n\tprint_DF_to_file(pd.DataFrame(failing_tests).drop_duplicates(), \"affected_test_descs.txt\")\n\nmain()\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
kavanase/DefectsWithTheBoys
[ "041c7569d13c331719a740067924cc464f0a6d9e" ]
[ "doped/pycdt/corrections/tests/test_kumagai_correction.py" ]
[ "# coding: utf-8\n\nfrom __future__ import division\n\n__status__ = \"Development\"\n\nimport os\nimport numpy as np\nimport unittest\n\nfrom pymatgen.io.vasp.outputs import Locpot\nfrom doped.pycdt.corrections.kumagai_correction import *\nfrom pymatgen.util.testing import PymatgenTest\n\n# Paths to files we are testing on\nbl_path = os.path.abspath(os.path.join(\n __file__, '..', '..', '..', '..', 'test_files', 'bLOCPOT.gz'))\ndl_path = os.path.abspath(os.path.join(\n __file__, '..', '..', '..', '..', 'test_files', 'dLOCPOT.gz'))\nkad_path = os.path.abspath(os.path.join(\n __file__, '..', '..', '..', '..', 'test_files', 'testKumagaiData.json'))\n\nclass KumagaiBulkInitANDCorrectionTest(PymatgenTest):\n #TODO: add test for outcar Kumagai method...\n def setUp(self):\n self.bl = Locpot.from_file(bl_path)\n self.dl = Locpot.from_file(dl_path)\n self.bs = self.bl.structure\n self.ds = self.dl.structure\n self.kbi = KumagaiBulkInit(self.bs, self.bl.dim, 15,\n optgamma=3.49423226983)\n self.kc = KumagaiCorrection(15, -3, 3.49423226983, self.kbi.g_sum,\n self.bs, self.ds, bulk_locpot=self.bl,\n defect_locpot=self.dl)\n\n def test_find_optimal_gamma(self):\n self.assertEqual(self.kbi.find_optimal_gamma(), 3.4942322698305639)\n\n def test_reciprocal_sum(self):\n self.assertEqual(self.kbi.g_sum.size, 884736)\n self.assertAlmostEqual(self.kbi.g_sum[0][0][0], 0.050661706751775192)\n\n def test_pc(self):\n self.assertAlmostEqual(self.kc.pc(), 2.1315841582145407)\n\n def test_potalign(self):\n self.assertAlmostEqual(self.kc.potalign(), 2.1091426308966001)\n\n def test_correction(self):\n self.assertAlmostEqual(self.kc.correction(), 4.24073)\n\n def test_plot(self):\n tmpforplot = {'C': {'r': [1, 2, 3], 'Vqb': [0.1, 0.2, 0.3],\n 'Vpc': [-0.05, -0.15, -0.25]},\n 'EXTRA': {'wsrad': 1, 'potalign': 0.05,\n 'lengths': (3, 3, 3)}}\n KumagaiCorrection.plot(tmpforplot, 'TMP')\n self.assertTrue(os.path.exists('TMP_kumagaisiteavgPlot.pdf'))\n os.system('rm TMP_kumagaisiteavgPlot.pdf')\n\n def test_plot_from_datfile(self):\n KumagaiCorrection.plot_from_datfile(name=kad_path, title='TMP')\n self.assertTrue(os.path.exists('TMP_kumagaisiteavgPlot.pdf'))\n os.system('rm TMP_kumagaisiteavgPlot.pdf')\n\n #NOTE there are here because g_sum is pre computed for it\n def test_get_sum_at_r(self):\n val = get_g_sum_at_r(self.kbi.g_sum, self.bs, self.bl.dim,\n [0.1, 0.1, 0.1])\n self.assertAlmostEqual(val, 0.04795055159361078)\n\n def test_anisotropic_madelung_potential(self):\n val = anisotropic_madelung_potential(\n self.bs, self.bl.dim, self.kbi.g_sum, [0.1, 0.1, 0.1],\n [[15, 0.1, -0.1], [0.1, 13, 0], [-0.1, 0, 20]], -3,\n self.kbi.gamma, self.kbi.tolerance)\n self.assertAlmostEqual(val, -4.2923511216202419)\n\n def test_anisotropic_pc_energy(self):\n val = anisotropic_pc_energy(\n self.bs, self.kbi.g_sum,\n [[15, 0.1, -0.1], [0.1, 13, 0], [-0.1, 0, 20]], -3,\n self.kbi.gamma, self.kbi.tolerance)\n self.assertAlmostEqual(val, 1.5523329679084736)\n\n\nclass KumagaiSetupFunctionsTest(PymatgenTest):\n def setUp(self):\n self.bl = Locpot.from_file(bl_path)\n self.dl = Locpot.from_file(dl_path)\n self.bs = self.bl.structure\n self.ds = self.dl.structure\n\n def test_kumagai_init(self):\n angset, bohrset, vol, determ, invdiel = kumagai_init(\n self.bs, [[15, 0.1, -0.1], [0.1, 13, 0], [-0.1, 0, 20]])\n newangset = [list(row) for row in angset]\n newbohrset = [list(row) for row in bohrset]\n newinvdiel = [list(row) for row in invdiel]\n self.assertEqual(newangset, [[5.750183, 0, 0], [0, 5.750183, 0],\n [0, 0, 5.750183]])\n self.assertEqual(newbohrset, [[10.866120815099999, 0, 0],\n [0, 10.866120815099999, 0],\n [0, 0, 10.866120815099999]])\n self.assertAlmostEqual(vol, 1282.9909362724345)\n self.assertAlmostEqual(determ, 3899.6699999999969)\n tmpinvdiel = [[0.066672308169665628, -0.00051286390899742801, 0.00033336154084832821],\n [-0.00051286390899742801, 0.076927022030069209, -0.0000025643195449871406],\n [0.00033336154084832826, -0.0000025643195449871406, 0.050001666807704244]]\n np.testing.assert_array_almost_equal(newinvdiel, tmpinvdiel)\n\n def test_real_sum(self):\n a = self.bs.lattice.matrix[0]\n b = self.bs.lattice.matrix[1]\n c = self.bs.lattice.matrix[2]\n tmpdiel = [[15, 0.1, -0.1], [0.1, 13, 0], [-0.1, 0, 20]]\n val = real_sum(a, b, c, np.array([0.1, 0.1, 0.1]), -1, tmpdiel, 3, 1)\n self.assertAlmostEqual(val, -0.0049704211394050414)\n\n def test_getgridind(self):\n triv_ans = getgridind(self.bs, (96,96,96), [0,0,0])\n self.assertArrayEqual(triv_ans, [0,0,0])\n diff_ans = getgridind(self.bs, (96,96,96), [0.2,0.3,0.4])\n self.assertArrayEqual(diff_ans, [19,29,38])\n #test atomic site averaging approach\n asa_ans = getgridind(self.bs, (96,96,96), [0,0,0], gridavg=0.08)\n correct_avg = [(95, 0, 0), (0, 95, 0), (0, 0, 95), (0, 0, 0),\n (0, 0, 1), (0, 1, 0), (1, 0, 0)]\n self.assertArrayEqual(asa_ans, correct_avg)\n\n def test_disttrans(self):\n nodefpos = disttrans( self.bs, self.ds)\n self.assertArrayEqual(list(nodefpos.keys()), [1, 2, 3, 4, 5, 6, 7])\n self.assertArrayEqual(nodefpos[3]['cart_reldef'], [ 2.8750915, 2.8750915, 0.])\n self.assertEqual(nodefpos[3]['bulk_site_index'], 3)\n self.assertEqual(nodefpos[3]['dist'], 4.0659933923636054)\n self.assertEqual(nodefpos[3]['def_site_index'], 2)\n self.assertArrayEqual(nodefpos[3]['cart'], [ 2.8750915, 2.8750915, 0.])\n self.assertArrayEqual(nodefpos[3]['siteobj'][0], [ 2.8750915, 2.8750915, 0.])\n self.assertArrayEqual(nodefpos[3]['siteobj'][1], [ 0.5, 0.5, 0.])\n self.assertEqual(nodefpos[3]['siteobj'][2], 'Ga')\n\n def test_wigner_seitz_radius(self):\n self.assertAlmostEqual(wigner_seitz_radius(self.bs), 2.8750914999999999)\n\n def test_read_ES_avg_fromlocpot(self):\n potdict = read_ES_avg_fromlocpot(self.bl)\n correct_potential = [-12.275644645892712, -12.275648494671156, -12.275640119791278,\n -12.275643801037788, -24.350725695796118, -24.350725266618685,\n -24.350723873595474, -24.350723145563087]\n self.assertArrayEqual(potdict['potential'], correct_potential)\n self.assertArrayEqual(potdict['ngxf_dims'], (96, 96, 96))\n\n\nimport unittest\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.testing.assert_array_almost_equal", "numpy.array" ] ]
ankitshah009/sonnet
[ "a07676192c6d0f2ed5967d6bc367d62e55835baf" ]
[ "sonnet/python/modules/conv.py" ]
[ "# Copyright 2017 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Implementation of convolutional Sonnet modules.\n\nClasses defining convolutional operations, inheriting from `snt.Module`, with\neasy weight sharing.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport math\nimport numbers\n\n# Dependency imports\n\nimport numpy as np\nimport six\nfrom sonnet.python.modules import base\nfrom sonnet.python.modules import util\nimport tensorflow as tf\n\n\n# Strings for TensorFlow convolution padding modes. See the following\n# documentation for an explanation of VALID versus SAME:\n# https://www.tensorflow.org/api_docs/python/tf/nn/convolution\nSAME = \"SAME\"\nVALID = \"VALID\"\nFULL = \"FULL\"\nCAUSAL = \"CAUSAL\"\nREVERSE_CAUSAL = \"REVERSE_CAUSAL\"\nCONV_OP_ALLOWED_PADDINGS = {SAME, VALID}\nALLOWED_PADDINGS = {SAME, VALID, FULL, CAUSAL, REVERSE_CAUSAL}\n\nDATA_FORMAT_NCW = \"NCW\"\nDATA_FORMAT_NWC = \"NWC\"\nSUPPORTED_1D_DATA_FORMATS = {DATA_FORMAT_NCW, DATA_FORMAT_NWC}\n\nDATA_FORMAT_NCHW = \"NCHW\"\nDATA_FORMAT_NHWC = \"NHWC\"\nSUPPORTED_2D_DATA_FORMATS = {DATA_FORMAT_NCHW, DATA_FORMAT_NHWC}\n\nDATA_FORMAT_NDHWC = \"NDHWC\"\nDATA_FORMAT_NCDHW = \"NCDHW\"\nSUPPORTED_3D_DATA_FORMATS = {DATA_FORMAT_NDHWC, DATA_FORMAT_NCDHW}\n\n\ndef _default_transpose_size(input_shape, stride, kernel_shape=None,\n padding=SAME):\n \"\"\"Returns default (maximal) output shape for a transpose convolution.\n\n In general, there are multiple possible output shapes that a transpose\n convolution with a given `input_shape` can map to. This function returns the\n output shape which evenly divides the stride to produce the input shape in\n a forward convolution, i.e. the maximal valid output shape with the given\n configuration:\n\n if the padding type is SAME then: output_shape = input_shape * stride\n if the padding type is VALID then: output_shape = input_shape * stride +\n kernel_shape - 1\n\n See the following documentation for an explanation of VALID versus SAME\n padding modes:\n https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#convolution\n\n Args:\n input_shape: Sequence of sizes of each dimension of the input, excluding\n batch and channel dimensions.\n stride: Sequence or integer of kernel strides, excluding batch and channel\n dimension strides.\n kernel_shape: Sequence or integer of kernel sizes.\n padding: Padding algorithm, either `snt.SAME` or `snt.VALID`.\n\n Returns:\n output_shape: A tuple of sizes for a transposed convolution that divide\n evenly with the given strides, kernel shapes, and padding algorithm.\n \"\"\"\n if not input_shape:\n raise TypeError(\"input_shape is None; if using Sonnet, are you sure you \"\n \"have connected the module to inputs?\")\n input_length = len(input_shape)\n stride = _fill_and_verify_parameter_shape(stride, input_length, \"stride\")\n padding = _verify_conv_op_supported_padding(padding)\n\n output_shape = tuple(x * y for x, y in zip(input_shape, stride))\n\n if padding == VALID:\n kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, input_length,\n \"kernel\")\n output_shape = tuple(x + y - 1 for x, y in zip(output_shape, kernel_shape))\n\n return output_shape\n\n\ndef _fill_shape(x, n):\n \"\"\"Converts a dimension to a tuple of dimensions of a given size.\n\n This is used to allow shorthand notation for various configuration parameters.\n A user can provide either, for example, `2` or `[2, 2]` as a kernel shape, and\n this function returns `(2, 2)` in both cases. Passing `[1, 2]` will return\n `(1, 2)`.\n\n Args:\n x: An integer, tf.Dimension, or an iterable of them.\n n: An integer, the size of the desired output list\n\n Returns:\n If `x` is an integer, a tuple of size `n` containing `n` copies of `x`.\n If `x` is an iterable of integers or tf.Dimension of size `n`, it returns\n `tuple(x)`.\n\n Raises:\n TypeError: If n is not a positive integer;\n or if x is neither integer nor an iterable of size n.\n \"\"\"\n if not isinstance(n, numbers.Integral) or n < 1:\n raise TypeError(\"n must be a positive integer\")\n\n if (isinstance(x, numbers.Integral) or isinstance(x, tf.Dimension)) and x > 0:\n return (x,) * n\n\n try:\n if len(x) == n and all(v > 0 for v in x):\n return tuple(x)\n except TypeError:\n pass\n\n raise TypeError(\"x is {}, must be either a positive integer \"\n \"or an iterable of positive integers of size {}\"\n .format(x, n))\n\n\ndef _fill_and_verify_parameter_shape(x, n, parameter_label):\n \"\"\"Expands x if necessary into a `n`-D kernel shape and reports errors.\"\"\"\n try:\n return _fill_shape(x, n)\n except TypeError as e:\n raise base.IncompatibleShapeError(\"Invalid \" + parameter_label + \" shape: \"\n \"{}\".format(e))\n\n\ndef _verify_conv_op_supported_padding(padding):\n \"\"\"Verifies that the given padding type is supported for conv ops.\n\n Args:\n padding: One of CONV_OP_ALLOWED_PADDINGS.\n\n Returns:\n padding.\n\n Raises:\n ValueError: If padding is not one of CONV_OP_ALLOWED_PADDINGS.\n \"\"\"\n if padding not in CONV_OP_ALLOWED_PADDINGS:\n raise ValueError(\n \"Padding must be member of '{}', not {}\".format(\n CONV_OP_ALLOWED_PADDINGS, padding))\n return padding\n\n\ndef _fill_and_verify_padding(padding, n):\n \"\"\"Verifies that the provided padding is supported and expands to size n.\n\n Args:\n padding: One of ALLOWED_PADDINGS, or an iterable of them.\n n: An integer, the size of the desired output list.\n\n Returns:\n If `padding` is one of ALLOWED_PADDINGS, a tuple of size `n` containing `n`\n copies of `padding`.\n If `padding` is an iterable of ALLOWED_PADDINGS of size `n`, it returns\n `padding(x)`.\n\n Raises:\n TypeError: If n is not a positive integer; if padding is neither one of\n ALLOWED_PADDINGS nor an iterable of ALLOWED_PADDINGS of size n.\n \"\"\"\n if not isinstance(n, numbers.Integral) or n < 1:\n raise TypeError(\"n must be a positive integer\")\n\n if isinstance(padding, six.string_types) and padding in ALLOWED_PADDINGS:\n return (padding,) * n\n\n try:\n if len(padding) == n and all(p in ALLOWED_PADDINGS for p in padding):\n return tuple(padding)\n except TypeError:\n pass\n\n raise TypeError(\"padding is {}, must be member of '{}' or an iterable of \"\n \"these of size {}\".format(padding, ALLOWED_PADDINGS, n))\n\n\ndef _padding_to_conv_op_padding(padding):\n \"\"\"Whether to use SAME or VALID for the underlying convolution op.\n\n Args:\n padding: A tuple of members of ALLOWED_PADDINGS, e.g. as returned from\n `_fill_and_verify_padding`.\n\n Returns:\n One of CONV_OP_ALLOWED_PADDINGS, the padding method to use for the\n underlying convolution op.\n\n Raises:\n ValueError: If padding is not a tuple.\n \"\"\"\n if not isinstance(padding, tuple):\n raise ValueError(\"padding should be a tuple.\")\n if all(p == SAME for p in padding):\n # If we want SAME padding for all dimensions then we can use SAME for the\n # conv and avoid doing any extra padding.\n return SAME\n else:\n # Otherwise we prefer to use VALID, since we can implement all the other\n # padding types just by adding some extra padding before doing a VALID conv.\n # (We could use SAME but then we'd also have to crop outputs in some cases).\n return VALID\n\n\ndef _fill_and_one_pad_stride(stride, n, data_format=DATA_FORMAT_NHWC):\n \"\"\"Expands the provided stride to size n and pads it with 1s.\"\"\"\n if isinstance(stride, numbers.Integral) or (\n isinstance(stride, collections.Iterable) and len(stride) <= n):\n if data_format.startswith(\"NC\"):\n return (1, 1,) + _fill_shape(stride, n)\n elif data_format.startswith(\"N\") and data_format.endswith(\"C\"):\n return (1,) + _fill_shape(stride, n) + (1,)\n else:\n raise ValueError(\n \"Invalid data_format {:s}. Must start with N and have a channel dim \"\n \"either follow the N dim or come at the end\".format(data_format))\n elif isinstance(stride, collections.Iterable) and len(stride) == n + 2:\n return stride\n else:\n raise base.IncompatibleShapeError(\n \"stride is {} ({}), must be either a positive integer or an iterable of\"\n \" positive integers of size {}\".format(stride, type(stride), n))\n\n\ndef _verify_inputs(inputs, channel_index, data_format):\n \"\"\"Verifies `inputs` is semantically correct.\n\n Args:\n inputs: An input tensor provided by the user.\n channel_index: The index of the channel dimension.\n data_format: The format of the data in `inputs`.\n\n Raises:\n base.IncompatibleShapeError: If the shape of `inputs` doesn't match\n `data_format`.\n base.UnderspecifiedError: If the channel dimension of `inputs` isn't\n defined.\n TypeError: If input Tensor dtype is not compatible with either\n `tf.float16`, `tf.bfloat16`, `tf.float32` or `tf.float64`.\n \"\"\"\n # Check shape.\n input_shape = tuple(inputs.get_shape().as_list())\n if len(input_shape) != len(data_format):\n raise base.IncompatibleShapeError((\n \"Input Tensor must have rank {} corresponding to \"\n \"data_format {}, but instead was {} of rank {}.\").format(\n len(data_format), data_format, input_shape, len(input_shape)))\n\n # Check type.\n if not (tf.float16.is_compatible_with(inputs.dtype) or\n tf.bfloat16.is_compatible_with(inputs.dtype) or\n tf.float32.is_compatible_with(inputs.dtype) or\n tf.float64.is_compatible_with(inputs.dtype)):\n raise TypeError(\n \"Input must have dtype tf.float16, tf.bfloat16, tf.float32 or \"\n \"tf.float64, but dtype was {}\".format(inputs.dtype))\n\n # Check channel dim.\n input_channels = input_shape[channel_index]\n if input_channels is None:\n raise base.UnderspecifiedError(\n \"Number of input channels must be known at module build time\")\n\n\ndef create_weight_initializer(fan_in_shape, dtype=tf.float32):\n \"\"\"Returns a default initializer for the weights of a convolutional module.\"\"\"\n stddev = 1 / math.sqrt(np.prod(fan_in_shape))\n return tf.truncated_normal_initializer(stddev=stddev, dtype=dtype)\n\n\ndef create_bias_initializer(unused_bias_shape, dtype=tf.float32):\n \"\"\"Returns a default initializer for the biases of a convolutional module.\"\"\"\n return tf.zeros_initializer(dtype=dtype)\n\n\ndef _find_channel_index(data_format):\n \"\"\"Returns the index of the channel dimension.\n\n Args:\n data_format: A string of characters corresponding to Tensor dimensionality.\n\n Returns:\n channel_index: An integer indicating the channel dimension.\n\n Raises:\n ValueError: If no channel dimension was found.\n \"\"\"\n for i, c in enumerate(data_format):\n if c == \"C\":\n return i\n raise ValueError(\"data_format requires a channel dimension. Got: {}\"\n .format(data_format))\n\n\ndef _apply_bias(inputs, outputs, channel_index, data_format, output_channels,\n initializers, partitioners, regularizers):\n \"\"\"Initialize and apply a bias to the outputs.\n\n Figures out the shape of the bias vector, initialize it, and applies it.\n\n Args:\n inputs: A Tensor of shape `data_format`.\n outputs: A Tensor of shape `data_format`.\n channel_index: The index of the channel dimension in `inputs`.\n data_format: Format of `inputs`.\n output_channels: Channel dimensionality for `outputs`.\n initializers: Optional dict containing ops to initialize the biases\n (with key 'b').\n partitioners: Optional dict containing partitioners to partition the\n biases (with key 'b').\n regularizers: Optional dict containing regularizers for the biases\n (with key 'b').\n\n Returns:\n b: The constructed bias variable.\n outputs: The `outputs` argument that has had a bias applied.\n \"\"\"\n bias_shape = (output_channels,)\n if \"b\" not in initializers:\n initializers[\"b\"] = create_bias_initializer(bias_shape,\n dtype=inputs.dtype)\n b = tf.get_variable(\"b\",\n shape=bias_shape,\n dtype=inputs.dtype,\n initializer=initializers[\"b\"],\n partitioner=partitioners.get(\"b\", None),\n regularizer=regularizers.get(\"b\", None))\n\n # tf.nn.bias_add only supports 2 data formats.\n if data_format in (DATA_FORMAT_NHWC, DATA_FORMAT_NCHW):\n # Supported as-is.\n outputs = tf.nn.bias_add(outputs, b, data_format=data_format)\n else:\n # Create our own bias vector.\n bias_correct_dim = [1] * len(data_format)\n bias_correct_dim[channel_index] = output_channels\n outputs += tf.reshape(b, bias_correct_dim)\n\n return b, outputs\n\n\nclass _ConvND(base.AbstractModule):\n \"\"\"N-dimensional convolution and dilated convolution module, including bias.\n\n This acts as a light wrapper around the TensorFlow ops `tf.nn.convolution`\n abstracting away variable creation and sharing.\n \"\"\"\n\n def __init__(self, output_channels, kernel_shape, stride=1, rate=1,\n padding=SAME, use_bias=True, initializers=None,\n partitioners=None, regularizers=None, mask=None,\n data_format=DATA_FORMAT_NHWC,\n custom_getter=None, name=\"conv_nd\"):\n \"\"\"Constructs a _ConvND module.\n\n Args:\n output_channels: Number of output channels. `output_channels` can be\n either a number or a callable. In the latter case, since the function\n invocation is deferred to graph construction time, the user must only\n ensure that output_channels can be called, returning an integer,\n when `build` is called.\n kernel_shape: Sequence of kernel sizes (up to size N), or an integer.\n `kernel_shape` will be expanded to define a kernel size in all\n dimensions.\n stride: Sequence of strides (up to size N), or an integer.\n `stride` will be expanded to define stride in all dimensions.\n rate: Sequence of dilation rates (of size N), or integer that is used to\n define dilation rate in all dimensions. 1 corresponds to standard ND\n convolution, `rate > 1` corresponds to dilated convolution. Cannot be\n > 1 if any of `stride` is also > 1.\n padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,\n `snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings\n (up to size N).\n * snt.SAME and snt.VALID are explained in the Tensorflow docs at\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution.\n * snt.FULL pre- and post-pads with the maximum padding which does not\n result in a convolution over just padded elements.\n * snt.CAUSAL pre-pads to ensure that each output value only depends on\n input values at the same or preceding indices (\"no dependence on the\n future\").\n * snt.REVERSE_CAUSAL post-pads to ensure that each output value only\n depends on input values at the same or *greater* indices (\"no\n dependence on the past\").\n If you use the same padding for all dimensions, and it is one of SAME\n or VALID, then this is supported directly by the underlying\n convolution op. In all other cases, the input data will be padded\n using tf.pad before calling the convolution op.\n use_bias: Whether to include bias parameters. Default `True`.\n initializers: Optional dict containing ops to initialize the filters (with\n key 'w') or biases (with key 'b'). The default initializer for the\n weights is a truncated normal initializer, which is commonly used\n when the inputs are zero centered (see\n https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for\n the bias is a zero initializer.\n partitioners: Optional dict containing partitioners to partition\n weights (with key 'w') or biases (with key 'b'). As a default, no\n partitioners are used.\n regularizers: Optional dict containing regularizers for the filters\n (with key 'w') and the biases (with key 'b'). As a default, no\n regularizers are used. A regularizer should be a function that takes\n a single `Tensor` as an input and returns a scalar `Tensor` output,\n e.g. the L1 and L2 regularizers in `tf.contrib.layers`.\n mask: A convertible to a ND tensor which is multiplied\n component-wise with the weights (Optional).\n data_format: The data format of the input.\n custom_getter: Callable or dictionary of callables to use as\n custom getters inside the module. If a dictionary, the keys\n correspond to regexes to match variable names. See the\n `tf.get_variable` documentation for information about the\n custom_getter API.\n name: Name of the module.\n\n Raises:\n base.IncompatibleShapeError: If the given kernel shape is not an integer;\n or if the given kernel shape is not a sequence of two integers.\n base.IncompatibleShapeError: If the given stride is not an integer; or if\n the given stride is not a sequence of two integers.\n base.IncompatibleShapeError: If the given rate is not an integer; or if\n the given rate is not a sequence of two integers.\n base.IncompatibleShapeError: If a mask is a TensorFlow Tensor with\n a not fully defined shape.\n base.NotSupportedError: If rate in any dimension and the stride in any\n dimension are simultaneously > 1.\n ValueError: If the given padding is not `snt.VALID`, `snt.SAME`,\n `snt.FULL`, `snt.CAUSAL`, `snt.REVERSE_CAUSAL` or a sequence of these.\n KeyError: If `initializers`, `partitioners` or `regularizers` contain any\n keys other than 'w' or 'b'.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n TypeError: If mask is given and it is not convertible to a Tensor.\n ValueError: If the passed-in data_format doesn't have a channel dimension.\n \"\"\"\n super(_ConvND, self).__init__(custom_getter=custom_getter, name=name)\n\n self._n = len(data_format) - 2\n self._input_channels = None\n self._output_channels = output_channels\n self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, self._n,\n \"kernel\")\n self._data_format = data_format\n\n # The following is for backwards-compatibility from when we used to accept\n # N-strides of the form [1, ..., 1].\n if (isinstance(stride, collections.Iterable) and\n len(stride) == len(data_format)):\n self._stride = tuple(stride)[1:-1]\n else:\n self._stride = _fill_and_verify_parameter_shape(stride, self._n, \"stride\")\n\n self._rate = _fill_and_verify_parameter_shape(rate, self._n, \"rate\")\n\n if any(x > 1 for x in self._stride) and any(x > 1 for x in self._rate):\n raise base.NotSupportedError(\"Cannot have stride > 1 with rate > 1\")\n\n self._padding = _fill_and_verify_padding(padding, self._n)\n self._conv_op_padding = _padding_to_conv_op_padding(self._padding)\n\n self._use_bias = use_bias\n self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias)\n self._initializers = util.check_initializers(\n initializers, self.possible_keys)\n self._partitioners = util.check_partitioners(\n partitioners, self.possible_keys)\n self._regularizers = util.check_regularizers(\n regularizers, self.possible_keys)\n\n if mask is not None:\n if isinstance(mask, (tf.Tensor, list, tuple, np.ndarray)):\n self._mask = tf.convert_to_tensor(mask)\n if not (tf.float16.is_compatible_with(self._mask.dtype) or\n tf.bfloat16.is_compatible_with(self._mask.dtype) or\n tf.float32.is_compatible_with(self._mask.dtype) or\n tf.float64.is_compatible_with(self._mask.dtype)):\n raise TypeError(\n \"Mask needs to have dtype float16, bfloat16, float32 or float64\")\n if not self._mask.get_shape().is_fully_defined():\n base.IncompatibleShapeError(\n \"Mask needs to have a statically defined shape\")\n else:\n raise TypeError(\"Invalid type for mask: {}\".format(type(mask)))\n else:\n self._mask = None\n\n self._channel_index = _find_channel_index(self._data_format)\n\n @classmethod\n def get_possible_initializer_keys(cls, use_bias=True):\n return {\"w\", \"b\"} if use_bias else {\"w\"}\n\n def _build(self, inputs):\n \"\"\"Connects the _ConvND module into the graph, with input Tensor `inputs`.\n\n If this is not the first time the module has been connected to the graph,\n the input Tensor provided here must have the same number of channels, in\n order for the existing variables to be the correct size for the\n multiplication; the batch size and input spatial dimensions may differ for\n each connection.\n\n Args:\n inputs: A ND Tensor of the same rank as `data_format`, and either of types\n `tf.float16`, `tf.bfloat16`, `tf.float32` or `tf.float64`.\n\n Returns:\n A ND Tensor of shape [batch_size, output_dim_1, output_dim_2, ...,\n output_channels].\n\n Raises:\n ValueError: If connecting the module into the graph any time after the\n first time and the inferred size of the input does not match previous\n invocations.\n base.IncompatibleShapeError: If the input tensor has the wrong number\n of dimensions.\n base.UnderspecifiedError: If the channel dimension of `inputs` isn't\n defined.\n base.IncompatibleShapeError: If a mask is present and its shape is\n incompatible with the shape of the weights.\n TypeError: If input Tensor dtype is not compatible with either\n `tf.float16`, `tf.bfloat16`, `tf.float32` or `tf.float64`.\n \"\"\"\n _verify_inputs(inputs, self._channel_index, self._data_format)\n self._input_shape = tuple(inputs.get_shape().as_list())\n self._input_channels = self._input_shape[self._channel_index]\n\n self._w = self._construct_w(inputs)\n\n if self._mask is not None:\n w = self._apply_mask()\n else:\n w = self._w\n\n inputs = self._pad_input(inputs)\n outputs = self._apply_conv(inputs, w)\n\n if self._use_bias:\n self._b, outputs = _apply_bias(\n inputs, outputs, self._channel_index, self._data_format,\n self.output_channels, self._initializers, self._partitioners,\n self._regularizers)\n\n return outputs\n\n def _pad_input(self, inputs):\n \"\"\"Pad input in case the desired padding type requires it.\n\n VALID and SAME padding types are directly supported by tensorflow\n convolution ops, so don't require us to pad input ourselves, at least\n in cases where the same method is used for all dimensions.\n\n Other padding types (FULL, CAUSAL, REVERSE_CAUSAL) aren't directly supported\n by conv ops but can be implemented by using VALID and padding the input\n appropriately ourselves.\n\n If different padding types are used for different dimensions, we use VALID\n but pad the input ourselves along any dimensions that require other padding\n types.\n\n Args:\n inputs: A Tensor of shape `data_format` and of type `tf.float16`,\n `tf.bfloat16`, `tf.float32` or `tf.float64`.\n\n Returns:\n inputs: The `inputs` argument that has had any required padding added.\n \"\"\"\n if all(p == self._conv_op_padding for p in self._padding):\n # All axes require the same padding type that we're going to use for the\n # underlying convolution op, so nothing needs to be done:\n return inputs\n\n # In all other cases we use VALID as the underlying padding type, and for\n # the axes which require something other than VALID, we pad inputs ourselves\n # before the convolution.\n assert self._conv_op_padding == VALID\n\n def pad_amount(kernel_size, rate, padding):\n \"\"\"Pre- and post-padding required for a particular axis before conv op.\"\"\"\n # The effective kernel size includes any holes/gaps introduced by the\n # dilation rate. It's equal to kernel_size when rate == 1.\n effective_kernel_size = int((kernel_size - 1) * rate + 1)\n if padding == FULL:\n return [effective_kernel_size - 1, effective_kernel_size - 1]\n if padding == CAUSAL:\n return [effective_kernel_size - 1, 0]\n if padding == REVERSE_CAUSAL:\n return [0, effective_kernel_size - 1]\n if padding == SAME:\n return [(effective_kernel_size - 1) // 2, effective_kernel_size // 2]\n # padding == VALID\n return [0, 0]\n\n paddings = map(pad_amount, self._kernel_shape, self._rate, self._padding)\n if self._data_format.startswith(\"NC\"): # N, C, ...\n paddings = [[0, 0], [0, 0]] + list(paddings)\n else: # N, ..., C\n paddings = [[0, 0]] + list(paddings) + [[0, 0]]\n\n return tf.pad(inputs, paddings)\n\n def _apply_conv(self, inputs, w):\n \"\"\"Apply a convolution operation on `inputs` using variable `w`.\n\n Args:\n inputs: A Tensor of shape `data_format` and of type `tf.float16`,\n `tf.bfloat16`, `tf.float32` or `tf.float64`.\n w: A weight matrix of the same type as `inputs`.\n\n Returns:\n outputs: The result of the convolution operation on `inputs`.\n \"\"\"\n outputs = tf.nn.convolution(inputs, w, strides=self._stride,\n padding=self._conv_op_padding,\n dilation_rate=self._rate,\n data_format=self._data_format)\n return outputs\n\n def _construct_w(self, inputs):\n \"\"\"Construct the convolution weight matrix.\n\n Figures out the shape of the weight matrix, initialize it, and return it.\n\n Args:\n inputs: A Tensor of shape `data_format` and of type `tf.float16`,\n `tf.bfloat16`, `tf.float32` or `tf.float64`.\n\n Returns:\n w: A weight matrix of the same type as `inputs`.\n \"\"\"\n weight_shape = self._kernel_shape + (self._input_channels,\n self.output_channels)\n\n if \"w\" not in self._initializers:\n self._initializers[\"w\"] = create_weight_initializer(weight_shape[:-1],\n dtype=inputs.dtype)\n\n w = tf.get_variable(\"w\",\n shape=weight_shape,\n dtype=inputs.dtype,\n initializer=self._initializers[\"w\"],\n partitioner=self._partitioners.get(\"w\", None),\n regularizer=self._regularizers.get(\"w\", None))\n\n return w\n\n def _apply_mask(self):\n \"\"\"Applies the passed-in mask to the convolution matrix.\n\n Returns:\n w: A copy of the convolution matrix that has had the mask applied.\n\n Raises:\n base.IncompatibleShapeError: If the mask shape has more dimensions than\n the weight matrix.\n base.IncompatibleShapeError: If the mask and the weight matrix don't\n match on shape.\n \"\"\"\n w = self._w\n w_shape = w.get_shape()\n mask_shape = self._mask.get_shape()\n\n if mask_shape.ndims > w_shape.ndims:\n raise base.IncompatibleShapeError(\n \"Invalid mask shape: {}. Max shape: {}\".format(\n mask_shape.ndims, len(self._data_format)\n )\n )\n if mask_shape != w_shape[:mask_shape.ndims]:\n raise base.IncompatibleShapeError(\n \"Invalid mask shape: {}. Weight shape: {}\".format(\n mask_shape, w_shape\n )\n )\n # TF broadcasting is a bit fragile.\n # Expand the shape of self._mask by one dim at a time to the right\n # until the rank matches `weight_shape`.\n while self._mask.get_shape().ndims < w_shape.ndims:\n self._mask = tf.expand_dims(self._mask, -1)\n\n # tf.Variable & tf.ResourceVariable don't support *=.\n w = w * self._mask # pylint: disable=g-no-augmented-assignment\n\n return w\n\n @property\n def output_channels(self):\n \"\"\"Returns the number of output channels.\"\"\"\n if callable(self._output_channels):\n self._output_channels = self._output_channels()\n # Channel must be integer.\n self._output_channels = int(self._output_channels)\n return self._output_channels\n\n @property\n def kernel_shape(self):\n \"\"\"Returns the kernel shape.\"\"\"\n return self._kernel_shape\n\n @property\n def stride(self):\n \"\"\"Returns the stride.\"\"\"\n # Backwards compatibility with old stride format.\n\n return _fill_and_one_pad_stride(self._stride, self._n, self._data_format)\n\n @property\n def rate(self):\n \"\"\"Returns the dilation rate.\"\"\"\n return self._rate\n\n @property\n def padding(self):\n \"\"\"Returns the padding algorithm used, if this is the same for all dims.\n\n Use `.paddings` if you want a tuple with the padding algorithm used for each\n dimension.\n\n Returns:\n The padding algorithm used, if this is the same for all dimensions.\n\n Raises:\n ValueError: If different padding algorithms are used for different\n dimensions.\n \"\"\"\n # This is for backwards compatibility -- previously only a single\n # padding setting was supported across all dimensions.\n if all(p == self._padding[0] for p in self._padding):\n return self._padding[0]\n else:\n raise ValueError(\"This layer uses different paddings for different \"\n \"dimensions. Use .paddings if you want a tuple of \"\n \"per-dimension padding settings.\")\n\n @property\n def paddings(self):\n \"\"\"Returns a tuple with the padding algorithm used for each dimension.\"\"\"\n return self._padding\n\n @property\n def conv_op_padding(self):\n \"\"\"Returns the padding algorithm used for the underlying convolution op.\"\"\"\n return self._conv_op_padding\n\n @property\n def w(self):\n \"\"\"Returns the Variable containing the weight matrix.\"\"\"\n self._ensure_is_connected()\n return self._w\n\n @property\n def b(self):\n \"\"\"Returns the Variable containing the bias.\n\n Returns:\n Variable object containing the bias, from the most recent __call__.\n\n Raises:\n base.NotConnectedError: If the module has not been connected to the graph\n yet, meaning the variables do not exist.\n AttributeError: If the module does not use bias.\n \"\"\"\n self._ensure_is_connected()\n if not self._use_bias:\n raise AttributeError(\n \"No bias Variable in Conv2D Module when `use_bias=False`.\")\n return self._b\n\n @property\n def has_bias(self):\n \"\"\"Returns `True` if bias Variable is present in the module.\"\"\"\n return self._use_bias\n\n @property\n def initializers(self):\n \"\"\"Returns the initializers dictionary.\"\"\"\n return self._initializers\n\n @property\n def partitioners(self):\n \"\"\"Returns the partitioners dictionary.\"\"\"\n return self._partitioners\n\n @property\n def regularizers(self):\n \"\"\"Returns the regularizers dictionary.\"\"\"\n return self._regularizers\n\n @property\n def mask(self):\n \"\"\"Returns the mask.\"\"\"\n return self._mask\n\n @property\n def data_format(self):\n \"\"\"Returns the data format.\"\"\"\n return self._data_format\n\n # Implements Transposable interface.\n @property\n def input_shape(self):\n \"\"\"Returns the input shape.\"\"\"\n self._ensure_is_connected()\n return self._input_shape\n\n @property\n def input_channels(self):\n \"\"\"Returns the number of input channels.\"\"\"\n if self._input_channels is None:\n self._ensure_is_connected()\n return self._input_channels\n\n def clone(self, name=None):\n \"\"\"Returns a cloned `_ConvND` module.\n\n Args:\n name: Optional string assigning name of cloned module. The default name\n is constructed by appending \"_clone\" to `self.module_name`.\n\n Returns:\n A copy of the current class.\n \"\"\"\n if name is None:\n name = self.module_name + \"_clone\"\n\n return type(self)(output_channels=self.output_channels,\n kernel_shape=self._kernel_shape,\n stride=self._stride,\n rate=self._rate,\n padding=self._padding,\n use_bias=self._use_bias,\n initializers=self._initializers,\n partitioners=self._partitioners,\n regularizers=self._regularizers,\n mask=self._mask,\n data_format=self._data_format,\n custom_getter=self._custom_getter,\n name=name)\n\n\nclass _ConvNDTranspose(base.AbstractModule):\n \"\"\"Spatial transposed / reverse / up ND convolution module, including bias.\n\n This acts as a light wrapper around the TensorFlow `conv_nd_transpose` ops,\n abstracting away variable creation and sharing.\n \"\"\"\n\n def __init__(self, output_channels, output_shape=None, kernel_shape=None,\n stride=1, padding=SAME, use_bias=True, initializers=None,\n partitioners=None, regularizers=None,\n data_format=DATA_FORMAT_NHWC, custom_getter=None,\n name=\"conv_nd_transpose\"):\n \"\"\"Constructs a `ConvNDTranspose module`. Support for N = (1, 2, 3).\n\n See the following documentation for an explanation of VALID versus SAME\n padding modes:\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution\n\n Args:\n output_channels: Number of output channels.\n Can be either a number or a callable. In the latter case, since the\n function invocation is deferred to graph construction time, the user\n must only ensure `output_channels` can be called, returning an\n integer, when build is called.\n output_shape: Output shape of transpose convolution.\n Can be either an iterable of integers or `Dimension`s, a\n `TensorShape`, or a callable. In the latter case, since the function\n invocation is deferred to graph construction time, the user must only\n ensure that `output_shape` can be called, returning an iterable of\n output shapes when `build` is called. Note that `output_shape` defines\n the size of output signal domain, as opposed to the shape of the\n output `Tensor`. If a None value is given, a default shape is\n automatically calculated (see docstring of\n `_default_transpose_size` function for more details).\n kernel_shape: Sequence of kernel sizes (of size N), or integer that is\n used to define kernel size in all dimensions.\n stride: Sequence of kernel strides (of size N), or integer that is used\n to define stride in all dimensions.\n padding: Padding algorithm, either `snt.SAME` or `snt.VALID`.\n use_bias: Whether to include bias parameters. Default `True`.\n initializers: Optional dict containing ops to initialize the filters (with\n key 'w') or biases (with key 'b').\n partitioners: Optional dict containing partitioners to partition\n weights (with key 'w') or biases (with key 'b'). As a default, no\n partitioners are used.\n regularizers: Optional dict containing regularizers for the filters\n (with key 'w') and the biases (with key 'b'). As a default, no\n regularizers are used. A regularizer should be a function that takes\n a single `Tensor` as an input and returns a scalar `Tensor` output,\n e.g. the L1 and L2 regularizers in `tf.contrib.layers`.\n data_format: The data format of the input.\n custom_getter: Callable or dictionary of callables to use as\n custom getters inside the module. If a dictionary, the keys\n correspond to regexes to match variable names. See the\n `tf.get_variable` documentation for information about the\n custom_getter API.\n name: Name of the module.\n\n Raises:\n base.IncompatibleShapeError: If the given kernel shape is neither an\n integer nor a sequence of two integers.\n base.IncompatibleShapeError: If the given stride is neither an integer nor\n a sequence of two or four integers.\n ValueError: If the given padding is not `snt.VALID` or `snt.SAME`.\n ValueError: If the given kernel_shape is `None`.\n KeyError: If `initializers`, `partitioners` or `regularizers` contain any\n keys other than 'w' or 'b'.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n ValueError: If the passed-in data_format doesn't have a channel dimension.\n \"\"\"\n super(_ConvNDTranspose, self).__init__(custom_getter=custom_getter,\n name=name)\n self._data_format = data_format\n self._n = len(self._data_format) - 2\n if self._n > 3:\n raise base.NotSupportedError(\n \"We only support (1, 2, 3) convolution transpose operations. \"\n \"Received data format of: {}\".format(self._data_format))\n self._output_channels = output_channels\n\n if output_shape is None:\n self._output_shape = None\n self._use_default_output_shape = True\n else:\n self._use_default_output_shape = False\n if callable(output_shape):\n self._output_shape = output_shape\n else:\n self._output_shape = _fill_and_verify_parameter_shape(output_shape,\n self._n,\n \"output_shape\")\n if kernel_shape is None:\n raise ValueError(\"`kernel_shape` cannot be None.\")\n self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, self._n,\n \"kernel\")\n if (isinstance(stride, collections.Iterable) and\n len(stride) == len(data_format)):\n if self._data_format.startswith(\"N\") and self._data_format.endswith(\"C\"):\n if not stride[0] == stride[-1] == 1:\n raise base.IncompatibleShapeError(\n \"Invalid stride: First and last element must be 1.\")\n elif self._data_format.startswith(\"NC\"):\n if not stride[0] == stride[1] == 1:\n raise base.IncompatibleShapeError(\n \"Invalid stride: First and second element must be 1.\")\n self._stride = tuple(stride)\n else:\n self._stride = _fill_and_one_pad_stride(stride, self._n,\n self._data_format)\n\n self._padding = _verify_conv_op_supported_padding(padding)\n self._use_bias = use_bias\n self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias)\n self._initializers = util.check_initializers(\n initializers, self.possible_keys)\n self._partitioners = util.check_partitioners(\n partitioners, self.possible_keys)\n self._regularizers = util.check_regularizers(\n regularizers, self.possible_keys)\n\n self._channel_index = _find_channel_index(self._data_format)\n\n @classmethod\n def get_possible_initializer_keys(cls, use_bias=True):\n return {\"w\", \"b\"} if use_bias else {\"w\"}\n\n def _build(self, inputs):\n \"\"\"Connects the _ConvNDTranspose module into the graph.\n\n If this is not the first time the module has been connected to the graph,\n the input Tensor provided here must have the same final N dimensions, in\n order for the existing variables to be the correct size for the\n multiplication. The batch size may differ for each connection.\n\n Args:\n inputs: A Tensor of shape `data_format` and of type\n `tf.float16`, `tf.bfloat16`, `tf.float32` or `tf.float64`.\n\n Returns:\n A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16`,\n `tf.float32` or `tf.float64`.\n\n Raises:\n ValueError: If connecting the module into the graph any time after the\n first time and the inferred size of the input does not match previous\n invocations.\n base.IncompatibleShapeError: If the input tensor has the wrong number\n of dimensions.\n base.UnderspecifiedError: If the channel dimension of `inputs` isn't\n defined.\n base.IncompatibleShapeError: If `output_shape` is an iterable and is not\n in the format `(out_height, out_width)`.\n TypeError: If input Tensor dtype is not compatible with either\n `tf.float16`, `tf.bfloat16`, `tf.float32` or `tf.float64`.\n \"\"\"\n _verify_inputs(inputs, self._channel_index, self._data_format)\n self._input_shape = tuple(inputs.get_shape().as_list())\n self._input_channels = self._input_shape[self._channel_index]\n\n # First, figure out what the non-(N,C) dims will be.\n if self._use_default_output_shape:\n def _default_transpose_size_wrapper():\n if self._data_format.startswith(\"NC\"):\n input_size = self._input_shape[2:]\n stride = self.stride[2:]\n else: # self._data_format == N*WC\n input_size = self._input_shape[1:-1]\n stride = self.stride[1:-1]\n return _default_transpose_size(input_size,\n stride,\n kernel_shape=self._kernel_shape,\n padding=self._padding)\n\n self._output_shape = _default_transpose_size_wrapper\n\n if len(self.output_shape) != self._n:\n raise base.IncompatibleShapeError(\n \"Output shape must have rank {}, but instead was {}\".format(\n self._n, len(self.output_shape)))\n\n # Now, construct the size of the output, including the N + C dims.\n output_shape = self._infer_all_output_dims(inputs)\n\n self._w = self._construct_w(inputs)\n\n if self._n == 1:\n # Add a dimension for the height.\n if self._data_format == DATA_FORMAT_NWC:\n h_dim = 1\n two_dim_conv_data_format = DATA_FORMAT_NHWC\n else: # self._data_format == DATA_FORMAT_NCW\n h_dim = 2\n two_dim_conv_data_format = DATA_FORMAT_NCHW\n inputs = tf.expand_dims(inputs, h_dim)\n two_dim_conv_stride = self.stride[:h_dim] + (1,) + self.stride[h_dim:]\n outputs = tf.nn.conv2d_transpose(inputs,\n self._w,\n output_shape,\n strides=two_dim_conv_stride,\n padding=self._padding,\n data_format=two_dim_conv_data_format)\n # Remove the height dimension to return a 3D tensor.\n outputs = tf.squeeze(outputs, [h_dim])\n elif self._n == 2:\n outputs = tf.nn.conv2d_transpose(inputs,\n self._w,\n output_shape,\n strides=self._stride,\n padding=self._padding,\n data_format=self._data_format)\n else:\n outputs = tf.nn.conv3d_transpose(inputs,\n self._w,\n output_shape,\n strides=self._stride,\n padding=self._padding,\n data_format=self._data_format)\n\n if self._use_bias:\n self._b, outputs = _apply_bias(\n inputs, outputs, self._channel_index, self._data_format,\n self._output_channels, self._initializers, self._partitioners,\n self._regularizers)\n\n outputs = self._recover_shape_information(inputs, outputs)\n return outputs\n\n def _construct_w(self, inputs):\n \"\"\"Construct the convolution weight matrix.\n\n Figures out the shape of the weight matrix, initialize it, and return it.\n\n Args:\n inputs: A Tensor of shape `data_format` and of type `tf.float16`,\n `tf.bfloat16`, `tf.float32` or `tf.float64`.\n\n Returns:\n w: A weight matrix of the same type as `inputs`.\n \"\"\"\n # Height dim needs to be added to everything for 1D Conv\n # as we'll be using the 2D Conv Transpose op.\n if self._n == 1:\n weight_shape = (1,) + self._kernel_shape + (self.output_channels,\n self._input_channels)\n else:\n weight_shape = self._kernel_shape + (self.output_channels,\n self._input_channels)\n\n if \"w\" not in self._initializers:\n fan_in_shape = self._kernel_shape + (self._input_channels,)\n self._initializers[\"w\"] = create_weight_initializer(fan_in_shape,\n dtype=inputs.dtype)\n w = tf.get_variable(\"w\",\n shape=weight_shape,\n dtype=inputs.dtype,\n initializer=self._initializers[\"w\"],\n partitioner=self._partitioners.get(\"w\", None),\n regularizer=self._regularizers.get(\"w\", None))\n return w\n\n def _infer_all_output_dims(self, inputs):\n \"\"\"Calculate the output shape for `inputs` after a deconvolution.\n\n Args:\n inputs: A Tensor of shape `data_format` and of type `tf.float16`,\n `tf.bfloat16`, `tf.float32` or `tf.float64`.\n\n Returns:\n output_shape: A tensor of shape (`batch_size`, `conv_output_shape`).\n \"\"\"\n # Use tensorflow shape op to manipulate inputs shape, so that unknown batch\n # size - which can happen when using input placeholders - is handled\n # correcly.\n batch_size = tf.expand_dims(tf.shape(inputs)[0], 0)\n out_channels = (self.output_channels,)\n\n # Height dim needs to be added to everything for 1D Conv\n # as we'll be using the 2D Conv Transpose op.\n if self._n == 1:\n out_shape = (1,) + self.output_shape\n else:\n out_shape = self.output_shape\n\n if self._data_format.startswith(\"NC\"):\n out_shape_tuple = out_channels + out_shape\n elif self._data_format.startswith(\"N\") and self._data_format.endswith(\"C\"):\n out_shape_tuple = out_shape + out_channels\n\n output_shape = tf.concat([batch_size, out_shape_tuple], 0)\n return output_shape\n\n def _recover_shape_information(self, inputs, outputs):\n \"\"\"Recover output tensor shape value to enable shape inference.\n\n The batch size of `inputs` isn't preserved by the convolution op. Calculate\n what the proper output shape will be for `outputs`.\n\n Args:\n inputs: A Tensor of shape `data_format` and of type `tf.float16`,\n `tf.bfloat16`, `tf.float32` or `tf.float64`.\n outputs: A Tensor of shape `data_format` and of type `tf.float16`,\n `tf.bfloat16`, `tf.float32` or `tf.float64`. The output of `inputs`\n from a transpose convolution op.\n\n Returns:\n outputs: The passed-in `outputs` with all shape information filled in.\n \"\"\"\n batch_size_value = inputs.get_shape()[0]\n if self._data_format.startswith(\"NC\"):\n output_shape_value = ((batch_size_value, self.output_channels) +\n self.output_shape)\n elif self._data_format.startswith(\"N\") and self._data_format.endswith(\"C\"):\n output_shape_value = ((batch_size_value,) + self.output_shape +\n (self.output_channels,))\n outputs.set_shape(output_shape_value)\n return outputs\n\n @property\n def output_channels(self):\n \"\"\"Returns the number of output channels.\"\"\"\n if callable(self._output_channels):\n self._output_channels = self._output_channels()\n # Channel must be integer.\n self._output_channels = int(self._output_channels)\n return self._output_channels\n\n @property\n def kernel_shape(self):\n \"\"\"Returns the kernel shape.\"\"\"\n return self._kernel_shape\n\n @property\n def stride(self):\n \"\"\"Returns the stride.\"\"\"\n return self._stride\n\n @property\n def output_shape(self):\n \"\"\"Returns the output shape.\"\"\"\n if self._output_shape is None:\n self._ensure_is_connected()\n if callable(self._output_shape):\n self._output_shape = tuple(self._output_shape())\n return self._output_shape\n\n @property\n def padding(self):\n \"\"\"Returns the padding algorithm.\"\"\"\n return self._padding\n\n @property\n def conv_op_padding(self):\n \"\"\"Returns the padding algorithm used for the underlying convolution op.\"\"\"\n return self._padding\n\n @property\n def w(self):\n \"\"\"Returns the Variable containing the weight matrix.\"\"\"\n self._ensure_is_connected()\n return self._w\n\n @property\n def b(self):\n \"\"\"Returns the Variable containing the bias.\n\n Returns:\n Variable object containing the bias, from the most recent __call__.\n\n Raises:\n base.NotConnectedError: If the module has not been connected to the graph\n yet, meaning the variables do not exist.\n AttributeError: If the module does not use bias.\n \"\"\"\n self._ensure_is_connected()\n if not self._use_bias:\n raise AttributeError(\n \"No bias Variable in Conv2DTranspose Module when `use_bias=False`.\")\n return self._b\n\n @property\n def has_bias(self):\n \"\"\"Returns `True` if bias Variable is present in the module.\"\"\"\n return self._use_bias\n\n @property\n def initializers(self):\n \"\"\"Returns the initializers dictionary.\"\"\"\n return self._initializers\n\n @property\n def partitioners(self):\n \"\"\"Returns the partitioners dictionary.\"\"\"\n return self._partitioners\n\n @property\n def regularizers(self):\n \"\"\"Returns the regularizers dictionary.\"\"\"\n return self._regularizers\n\n @property\n def input_shape(self):\n \"\"\"Returns the input shape.\"\"\"\n self._ensure_is_connected()\n return self._input_shape\n\n @property\n def input_channels(self):\n \"\"\"Returns the number of input channels.\"\"\"\n self._ensure_is_connected()\n return self._input_channels\n\n\nclass Conv1D(_ConvND, base.Transposable):\n \"\"\"1D convolution module, including optional bias.\n\n This acts as a light wrapper around the class `_ConvND`.\n \"\"\"\n\n def __init__(self, output_channels, kernel_shape, stride=1, rate=1,\n padding=SAME, use_bias=True, initializers=None,\n partitioners=None, regularizers=None, mask=None,\n data_format=DATA_FORMAT_NWC, custom_getter=None,\n name=\"conv_1d\"):\n \"\"\"Constructs a Conv1D module.\n\n See the following documentation for an explanation of VALID versus SAME\n padding modes:\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution\n\n Args:\n output_channels: Number of output channels. `output_channels` can be\n either a number or a callable. In the latter case, since the function\n invocation is deferred to graph construction time, the user must only\n ensure that output_channels can be called, returning an integer,\n when `build` is called.\n kernel_shape: Sequence of kernel sizes (of size 1), or integer that is\n used to define kernel size in all dimensions.\n stride: Sequence of kernel strides (of size 1), or integer that is used to\n define stride in all dimensions.\n rate: Sequence of dilation rates (of size 1), or integer that is used to\n define dilation rate in all dimensions. 1 corresponds to standard\n convolution, `rate > 1` corresponds to dilated convolution. Cannot be\n > 1 if any of `stride` is also > 1.\n padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,\n `snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings\n of length 1.\n * snt.SAME and snt.VALID are explained in the Tensorflow docs at\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution.\n * snt.FULL pre- and post-pads with the maximum padding which does not\n result in a convolution over just padded elements.\n * snt.CAUSAL pre-pads to ensure that each output value only depends on\n input values at the same or preceding indices (\"no dependence on the\n future\").\n * snt.REVERSE_CAUSAL post-pads to ensure that each output value only\n depends on input values at the same or *greater* indices (\"no\n dependence on the past\").\n If you use the same padding for all dimensions, and it is one of SAME\n or VALID, then this is supported directly by the underlying\n convolution op. In all other cases, the input data will be padded\n using tf.pad before calling the convolution op.\n use_bias: Whether to include bias parameters. Default `True`.\n initializers: Optional dict containing ops to initialize the filters (with\n key 'w') or biases (with key 'b'). The default initializer for the\n weights is a truncated normal initializer, which is commonly used\n when the inputs are zero centered (see\n https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for\n the bias is a zero initializer.\n partitioners: Optional dict containing partitioners to partition\n weights (with key 'w') or biases (with key 'b'). As a default, no\n partitioners are used.\n regularizers: Optional dict containing regularizers for the filters\n (with key 'w') and the biases (with key 'b'). As a default, no\n regularizers are used. A regularizer should be a function that takes\n a single `Tensor` as an input and returns a scalar `Tensor` output,\n e.g. the L1 and L2 regularizers in `tf.contrib.layers`.\n mask: A convertible to a 3D tensor which is multiplied\n component-wise with the weights (Optional).\n data_format: A string. Specifies whether the channel dimension\n of the input and output is the last dimension (default, NWC), or the\n second dimension (NCW).\n custom_getter: Callable or dictionary of callables to use as\n custom getters inside the module. If a dictionary, the keys\n correspond to regexes to match variable names. See the\n `tf.get_variable` documentation for information about the\n custom_getter API.\n name: Name of the module.\n\n Raises:\n base.IncompatibleShapeError: If the given kernel shape is not an integer;\n or if the given kernel shape is not a sequence of two integers.\n base.IncompatibleShapeError: If the given stride is not an integer; or if\n the given stride is not a sequence of two integers.\n base.IncompatibleShapeError: If the given rate is not an integer; or if\n the given rate is not a sequence of two integers.\n base.IncompatibleShapeError: If a mask is a TensorFlow Tensor with\n a not fully defined shape.\n base.NotSupportedError: If rate in any dimension and the stride in any\n dimension are simultaneously > 1.\n ValueError: If the given padding is not `snt.VALID` or `snt.SAME`.\n KeyError: If `initializers`, `partitioners` or `regularizers` contain any\n keys other than 'w' or 'b'.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n TypeError: If mask is given and it is not convertible to a Tensor.\n ValueError: If the passed-in data_format doesn't have a channel dimension.\n ValueError: If the given data_format is not a supported format (see\n `SUPPORTED_1D_DATA_FORMATS`).\n \"\"\"\n if data_format not in SUPPORTED_1D_DATA_FORMATS:\n raise ValueError(\"Invalid data_format {:s}. Allowed formats \"\n \"{}\".format(data_format, SUPPORTED_1D_DATA_FORMATS))\n super(Conv1D, self).__init__(\n output_channels=output_channels, kernel_shape=kernel_shape,\n stride=stride, rate=rate, padding=padding, use_bias=use_bias,\n initializers=initializers, partitioners=partitioners,\n regularizers=regularizers, mask=mask, data_format=data_format,\n custom_getter=custom_getter, name=name)\n\n # Implement Transposable interface\n def transpose(self, name=None):\n \"\"\"Returns matching `Conv1DTranspose` module.\n\n Args:\n name: Optional string assigning name of transpose module. The default name\n is constructed by appending \"_transpose\" to `self.name`.\n\n Returns:\n `Conv1DTranspose` module.\n\n Raises:\n base.NotSupportedError: If `rate` in any dimension > 1.\n \"\"\"\n if any(x > 1 for x in self._rate):\n raise base.NotSupportedError(\n \"Cannot transpose a dilated convolution module.\")\n\n if any(p != self._conv_op_padding for p in self._padding):\n raise base.NotSupportedError(\n \"Cannot tranpose a convolution using mixed paddings or paddings \"\n \"other than SAME or VALID.\")\n\n def output_shape():\n if self._data_format == DATA_FORMAT_NCW:\n return (self._input_shape[2],)\n else: # data_format = DATA_FORMAT_NWC\n return (self._input_shape[1],)\n\n if name is None:\n name = self.module_name + \"_transpose\"\n return Conv1DTranspose(output_channels=lambda: self._input_channels,\n output_shape=output_shape,\n kernel_shape=self._kernel_shape,\n stride=self._stride,\n padding=self._conv_op_padding,\n use_bias=self._use_bias,\n initializers=self._initializers,\n partitioners=self._partitioners,\n regularizers=self._regularizers,\n data_format=self._data_format,\n custom_getter=self._custom_getter,\n name=name)\n\n\nclass Conv1DTranspose(_ConvNDTranspose, base.Transposable):\n \"\"\"1D transposed / reverse / up 1D convolution module, including bias.\n\n This performs a 1D transpose convolution by lightly wrapping the TensorFlow op\n `tf.nn.conv2d_transpose`, setting the size of the height dimension of the\n image to 1.\n \"\"\"\n\n def __init__(self, output_channels, output_shape=None, kernel_shape=None,\n stride=1, padding=SAME, use_bias=True, initializers=None,\n partitioners=None, regularizers=None,\n data_format=DATA_FORMAT_NWC, custom_getter=None,\n name=\"conv_1d_transpose\"):\n \"\"\"Constructs a Conv1DTranspose module.\n\n See the following documentation for an explanation of VALID versus SAME\n padding modes:\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution\n\n Args:\n output_channels: Number of output channels. Can be either a number or a\n callable. In the latter case, since the function invocation is\n deferred to graph construction time, the user must only ensure\n `output_channels` can be called, returning an integer, when build is\n called.\n output_shape: Output shape of transpose convolution. Can be either a\n number or a callable. In the latter case, since the function\n invocation is deferred to graph construction time, the user must only\n ensure that `output_shape` can be called, returning an iterable of\n format `(out_length)` when build is called. If a None\n value is given, a default shape is automatically calculated (see\n docstring of _default_transpose_size function for more details).\n kernel_shape: Sequence of kernel sizes (of size 1), or integer that is\n used to define kernel size in all dimensions.\n stride: Sequence of kernel strides (of size 1), or integer that is used to\n define stride in all dimensions.\n padding: Padding algorithm, either `snt.SAME` or `snt.VALID`.\n use_bias: Whether to include bias parameters. Default `True`.\n initializers: Optional dict containing ops to initialize the filters (with\n key 'w') or biases (with key 'b').\n partitioners: Optional dict containing partitioners to partition\n weights (with key 'w') or biases (with key 'b'). As a default, no\n partitioners are used.\n regularizers: Optional dict containing regularizers for the filters\n (with key 'w') and the biases (with key 'b'). As a default, no\n regularizers are used. A regularizer should be a function that takes\n a single `Tensor` as an input and returns a scalar `Tensor` output,\n e.g. the L1 and L2 regularizers in `tf.contrib.layers`.\n data_format: A string. Specifies whether the channel dimension\n of the input and output is the last dimension (default, NWC), or the\n second dimension (NCW).\n custom_getter: Callable or dictionary of callables to use as\n custom getters inside the module. If a dictionary, the keys\n correspond to regexes to match variable names. See the\n `tf.get_variable` documentation for information about the\n custom_getter API.\n name: Name of the module.\n\n Raises:\n base.IncompatibleShapeError: If the given kernel shape is not an integer;\n or if the given kernel shape is not a sequence of two integers.\n base.IncompatibleShapeError: If the given stride is not an integer; or if\n the given stride is not a sequence of two or four integers.\n ValueError: If the given padding is not `snt.VALID` or `snt.SAME`.\n ValueError: If the given kernel_shape is `None`.\n KeyError: If `initializers`, `partitioners` or `regularizers` contain any\n keys other than 'w' or 'b'.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n ValueError: If the passed-in data_format doesn't have a channel dimension.\n ValueError: If the given data_format is not a supported format (see\n `SUPPORTED_1D_DATA_FORMATS`).\n \"\"\"\n if data_format not in SUPPORTED_1D_DATA_FORMATS:\n raise ValueError(\"Invalid data_format {:s}. Allowed formats \"\n \"{}\".format(data_format, SUPPORTED_1D_DATA_FORMATS))\n\n super(Conv1DTranspose, self).__init__(\n output_channels=output_channels, output_shape=output_shape,\n kernel_shape=kernel_shape, stride=stride, padding=padding,\n use_bias=use_bias, initializers=initializers,\n partitioners=partitioners, regularizers=regularizers,\n data_format=data_format, custom_getter=custom_getter, name=name\n )\n\n # Implement Transposable interface.\n def transpose(self, name=None):\n \"\"\"Returns matching `Conv1D` module.\n\n Args:\n name: Optional string assigning name of transpose module. The default name\n is constructed by appending \"_transpose\" to `self.name`.\n\n Returns:\n `Conv1D` module.\n \"\"\"\n if name is None:\n name = self.module_name + \"_transpose\"\n\n if self._data_format == DATA_FORMAT_NWC:\n stride = self._stride[1:-1]\n else: # self._data_format == DATA_FORMAT_NCW\n stride = self._stride[2:]\n\n return Conv1D(output_channels=lambda: self.input_channels,\n kernel_shape=self.kernel_shape,\n stride=stride,\n padding=self.padding,\n use_bias=self._use_bias,\n initializers=self.initializers,\n partitioners=self.partitioners,\n regularizers=self.regularizers,\n data_format=self._data_format,\n custom_getter=self._custom_getter,\n name=name)\n\n\nclass CausalConv1D(_ConvND):\n \"\"\"1D convolution module, including optional bias.\n\n This is deprecated, please use the padding=CAUSAL argument to Conv1D.\n\n This acts as a light wrapper around _ConvND ensuring that the outputs at index\n `i` only depend on indices smaller than `i` (also known as a causal\n convolution). For further details on the theoretical background, refer to:\n\n https://arxiv.org/abs/1610.10099\n \"\"\"\n\n def __init__(self, output_channels, kernel_shape,\n stride=1, rate=1, use_bias=True, initializers=None,\n partitioners=None, regularizers=None, mask=None,\n padding=CAUSAL, data_format=DATA_FORMAT_NWC,\n custom_getter=None, name=\"causal_conv_1d\"):\n \"\"\"Constructs a CausalConv1D module.\n\n This is deprecated, please use the padding=CAUSAL argument to Conv1D.\n\n Args:\n output_channels: Number of output channels. `output_channels` can be\n either a number or a callable. In the latter case, since the function\n invocation is deferred to graph construction time, the user must only\n ensure that output_channels can be called, returning an integer,\n when `build` is called.\n kernel_shape: Sequence of kernel sizes (of size 1), or integer that is\n used to define kernel size in all dimensions.\n stride: Sequence of kernel strides (of size 1), or integer that is used to\n define stride in all dimensions.\n rate: Sequence of dilation rates (of size 1), or integer that is used to\n define dilation rate in all dimensions. 1 corresponds to standard\n convolution, `rate > 1` corresponds to dilated convolution. Cannot be\n > 1 if any of `stride` is also > 1.\n use_bias: Whether to include bias parameters. Default `True`.\n initializers: Optional dict containing ops to initialize the filters (with\n key 'w') or biases (with key 'b'). The default initializer for the\n weights is a truncated normal initializer, which is commonly used\n when the inputs are zero centered (see\n https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for\n the bias is a zero initializer.\n partitioners: Optional dict containing partitioners to partition\n weights (with key 'w') or biases (with key 'b'). As a default, no\n partitioners are used.\n regularizers: Optional dict containing regularizers for the filters\n (with key 'w') and the biases (with key 'b'). As a default, no\n regularizers are used. A regularizer should be a function that takes\n a single `Tensor` as an input and returns a scalar `Tensor` output,\n e.g. the L1 and L2 regularizers in `tf.contrib.layers`.\n mask: A convertible to a 3D tensor which is multiplied\n component-wise with the weights (Optional).\n padding: Padding algorithm. Should be `snt.CAUSAL`.\n data_format: A string. Specifies whether the channel dimension\n of the input and output is the last dimension (default, NWC), or the\n second dimension (NCW).\n custom_getter: Callable or dictionary of callables to use as\n custom getters inside the module. If a dictionary, the keys\n correspond to regexes to match variable names. See the\n `tf.get_variable` documentation for information about the\n custom_getter API.\n name: Name of the module.\n\n Raises:\n base.IncompatibleShapeError: If the given kernel shape is not an integer;\n or if the given kernel shape is not a sequence of two integers.\n base.IncompatibleShapeError: If the given stride is not an integer; or if\n the given stride is not a sequence of two integers.\n base.IncompatibleShapeError: If the given rate is not an integer; or if\n the given rate is not a sequence of two integers.\n base.IncompatibleShapeError: If a mask is a TensorFlow Tensor with\n a not fully defined shape.\n base.NotSupportedError: If rate in any dimension and the stride in any\n dimension are simultaneously > 1.\n KeyError: If `initializers`, `partitioners` or `regularizers` contain any\n keys other than 'w' or 'b'.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n TypeError: If mask is given and it is not convertible to a Tensor.\n ValueError: If the passed-in data_format doesn't have a channel dimension.\n ValueError: If the given data_format is not a supported format (see\n `SUPPORTED_1D_DATA_FORMATS`).\n \"\"\"\n util.deprecation_warning(\n \"CausalConv1D is deprecated, please use Conv1D with padding=CAUSAL.\")\n if data_format not in SUPPORTED_1D_DATA_FORMATS:\n raise ValueError(\"Invalid data_format {:s}. Allowed formats \"\n \"{}\".format(data_format, SUPPORTED_1D_DATA_FORMATS))\n if padding != CAUSAL:\n # This used to be required to be VALID, which is now rather ambiguous.\n # Supporting VALID for now but with a warning:\n util.deprecation_warning(\n \"You specified a non-casual padding type for CausalConv1D, this has \"\n \"been ignored and you will get CAUSAL padding. Note CausalConv1D is \"\n \"deprecated, please switch to Conv1D with padding=CAUSAL.\")\n super(CausalConv1D, self).__init__(\n output_channels=output_channels, kernel_shape=kernel_shape,\n stride=stride, rate=rate, padding=CAUSAL, use_bias=use_bias,\n initializers=initializers, partitioners=partitioners,\n regularizers=regularizers, mask=mask,\n data_format=data_format,\n custom_getter=custom_getter, name=name)\n\n\nclass Conv2D(_ConvND, base.Transposable):\n \"\"\"Spatial convolution and dilated convolution module, including bias.\n\n This acts as a light wrapper around the class `_ConvND`.\n \"\"\"\n\n def __init__(self, output_channels, kernel_shape, stride=1, rate=1,\n padding=SAME, use_bias=True, initializers=None,\n partitioners=None, regularizers=None, mask=None,\n data_format=DATA_FORMAT_NHWC, custom_getter=None,\n name=\"conv_2d\"):\n \"\"\"Constructs a Conv2D module.\n\n See the following documentation for an explanation of VALID versus SAME\n padding modes:\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution\n\n Args:\n output_channels: Number of output channels. `output_channels` can be\n either a number or a callable. In the latter case, since the function\n invocation is deferred to graph construction time, the user must only\n ensure that output_channels can be called, returning an integer,\n when `build` is called.\n kernel_shape: Sequence of kernel sizes (of size 2), or integer that is\n used to define kernel size in all dimensions.\n stride: Sequence of kernel strides (of size 2), or integer that is used to\n define stride in all dimensions.\n rate: Sequence of dilation rates (of size 2), or integer that is used to\n define dilation rate in all dimensions. 1 corresponds to standard 2D\n convolution, `rate > 1` corresponds to dilated convolution. Cannot be\n > 1 if any of `stride` is also > 1.\n padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,\n `snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings\n of length 2.\n * snt.SAME and snt.VALID are explained in the Tensorflow docs at\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution.\n * snt.FULL pre- and post-pads with the maximum padding which does not\n result in a convolution over just padded elements.\n * snt.CAUSAL pre-pads to ensure that each output value only depends on\n input values at the same or preceding indices (\"no dependence on the\n future\").\n * snt.REVERSE_CAUSAL post-pads to ensure that each output value only\n depends on input values at the same or *greater* indices (\"no\n dependence on the past\").\n If you use the same padding for all dimensions, and it is one of SAME\n or VALID, then this is supported directly by the underlying\n convolution op. In all other cases, the input data will be padded\n using tf.pad before calling the convolution op.\n use_bias: Whether to include bias parameters. Default `True`.\n initializers: Optional dict containing ops to initialize the filters (with\n key 'w') or biases (with key 'b'). The default initializer for the\n weights is a truncated normal initializer, which is commonly used\n when the inputs are zero centered (see\n https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for\n the bias is a zero initializer.\n partitioners: Optional dict containing partitioners to partition\n weights (with key 'w') or biases (with key 'b'). As a default, no\n partitioners are used.\n regularizers: Optional dict containing regularizers for the filters\n (with key 'w') and the biases (with key 'b'). As a default, no\n regularizers are used. A regularizer should be a function that takes\n a single `Tensor` as an input and returns a scalar `Tensor` output,\n e.g. the L1 and L2 regularizers in `tf.contrib.layers`.\n mask: A convertible to a 4D tensor which is multiplied\n component-wise with the weights (Optional).\n data_format: A string. Specifies whether the channel dimension\n of the input and output is the last dimension (default, NHWC), or the\n second dimension (NCHW).\n custom_getter: Callable or dictionary of callables to use as\n custom getters inside the module. If a dictionary, the keys\n correspond to regexes to match variable names. See the\n `tf.get_variable` documentation for information about the\n custom_getter API.\n name: Name of the module.\n\n Raises:\n base.IncompatibleShapeError: If the given kernel shape is not an integer;\n or if the given kernel shape is not a sequence of two integers.\n base.IncompatibleShapeError: If the given stride is not an integer; or if\n the given stride is not a sequence of two integers.\n base.IncompatibleShapeError: If the given rate is not an integer; or if\n the given rate is not a sequence of two integers.\n base.IncompatibleShapeError: If a mask is given and its rank is neither 2\n nor 4, or if it is a TensorFlow Tensor with a not fully defined shape.\n base.NotSupportedError: If rate in any dimension and the stride in any\n dimension are simultaneously > 1.\n ValueError: If the given padding is not `snt.VALID`, `snt.SAME`,\n `snt.FULL`, `snt.CAUSAL`, `snt.REVERSE_CAUSAL` or a sequence of these.\n KeyError: If `initializers`, `partitioners` or `regularizers` contain any\n keys other than 'w' or 'b'.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n TypeError: If mask is given and it is not convertible to a Tensor.\n ValueError: If the passed-in data_format doesn't have a channel dimension.\n ValueError: If the given data_format is not a supported format (see\n `SUPPORTED_2D_DATA_FORMATS`).\n \"\"\"\n if data_format not in SUPPORTED_2D_DATA_FORMATS:\n raise ValueError(\"Invalid data_format {:s}. Allowed formats \"\n \"{}\".format(data_format, SUPPORTED_2D_DATA_FORMATS))\n super(Conv2D, self).__init__(\n output_channels=output_channels, kernel_shape=kernel_shape,\n stride=stride, rate=rate, padding=padding, use_bias=use_bias,\n initializers=initializers, partitioners=partitioners,\n regularizers=regularizers, mask=mask, data_format=data_format,\n custom_getter=custom_getter, name=name)\n\n # Implements Transposable interface.\n def transpose(self, name=None):\n \"\"\"Returns matching `Conv2DTranspose` module.\n\n Args:\n name: Optional string assigning name of transpose module. The default name\n is constructed by appending \"_transpose\" to `self.name`.\n\n Returns:\n `Conv2DTranspose` module.\n\n Raises:\n base.NotSupportedError: If `rate` in any dimension > 1.\n \"\"\"\n if any(x > 1 for x in self._rate):\n raise base.NotSupportedError(\n \"Cannot transpose a dilated convolution module.\")\n\n if any(p != self._conv_op_padding for p in self._padding):\n raise base.NotSupportedError(\n \"Cannot tranpose a convolution using mixed paddings or paddings \"\n \"other than SAME or VALID.\")\n\n if name is None:\n name = self.module_name + \"_transpose\"\n\n def output_shape():\n if self._data_format == DATA_FORMAT_NCHW:\n return self.input_shape[2:4]\n else: # data_format == DATA_FORMAT_NHWC\n return self.input_shape[1:3]\n\n return Conv2DTranspose(output_channels=lambda: self._input_channels,\n output_shape=output_shape,\n kernel_shape=self._kernel_shape,\n stride=self._stride,\n padding=self._conv_op_padding,\n use_bias=self._use_bias,\n initializers=self._initializers,\n partitioners=self._partitioners,\n regularizers=self._regularizers,\n data_format=self._data_format,\n custom_getter=self._custom_getter,\n name=name)\n\n\nclass Conv2DTranspose(_ConvNDTranspose, base.Transposable):\n \"\"\"Spatial transposed / reverse / up 2D convolution module, including bias.\n\n This acts as a light wrapper around the TensorFlow op `tf.nn.conv2d_transpose`\n abstracting away variable creation and sharing.\n \"\"\"\n\n def __init__(self, output_channels, output_shape=None, kernel_shape=None,\n stride=1, padding=SAME, use_bias=True, initializers=None,\n partitioners=None, regularizers=None,\n data_format=DATA_FORMAT_NHWC, custom_getter=None,\n name=\"conv_2d_transpose\"):\n \"\"\"Constructs a `Conv2DTranspose module`.\n\n See the following documentation for an explanation of VALID versus SAME\n padding modes:\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution\n\n Args:\n output_channels: Number of output channels.\n Can be either a number or a callable. In the latter case, since the\n function invocation is deferred to graph construction time, the user\n must only ensure `output_channels` can be called, returning an\n integer, when build is called.\n output_shape: Output shape of transpose convolution.\n Can be either an iterable of integers or a callable. In the latter\n case, since the function invocation is deferred to graph construction\n time, the user must only ensure that `output_shape` can be called,\n returning an iterable of format `(out_height, out_width)` when `build`\n is called. Note that `output_shape` defines the size of output signal\n domain, as opposed to the shape of the output `Tensor`. If a None\n value is given, a default shape is automatically calculated (see\n docstring of _default_transpose_size function for more details).\n kernel_shape: Sequence of kernel sizes (of size 2), or integer that is\n used to define kernel size in all dimensions.\n stride: Sequence of kernel strides (of size 2), or integer that is used to\n define stride in all dimensions.\n padding: Padding algorithm, either `snt.SAME` or `snt.VALID`.\n use_bias: Whether to include bias parameters. Default `True`.\n initializers: Optional dict containing ops to initialize the filters (with\n key 'w') or biases (with key 'b').\n partitioners: Optional dict containing partitioners to partition\n weights (with key 'w') or biases (with key 'b'). As a default, no\n partitioners are used.\n regularizers: Optional dict containing regularizers for the filters\n (with key 'w') and the biases (with key 'b'). As a default, no\n regularizers are used. A regularizer should be a function that takes\n a single `Tensor` as an input and returns a scalar `Tensor` output,\n e.g. the L1 and L2 regularizers in `tf.contrib.layers`.\n data_format: A string. Specifies whether the channel dimension\n of the input and output is the last dimension (default, NHWC), or the\n second dimension (\"NCHW\").\n custom_getter: Callable or dictionary of callables to use as\n custom getters inside the module. If a dictionary, the keys\n correspond to regexes to match variable names. See the`\n tf.get_variable` documentation for information about the\n custom_getter API.\n name: Name of the module.\n\n Raises:\n base.IncompatibleShapeError: If the given kernel shape is neither an\n integer nor a sequence of two integers.\n base.IncompatibleShapeError: If the given stride is neither an integer nor\n a sequence of two or four integers.\n ValueError: If the given padding is not `snt.VALID` or `snt.SAME`.\n ValueError: If the given kernel_shape is `None`.\n KeyError: If `initializers`, `partitioners` or `regularizers` contain any\n keys other than 'w' or 'b'.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n ValueError: If the passed-in data_format doesn't have a channel dimension.\n ValueError: If the given data_format is not a supported format (see\n `SUPPORTED_2D_DATA_FORMATS`).\n \"\"\"\n if data_format not in SUPPORTED_2D_DATA_FORMATS:\n raise ValueError(\"Invalid data_format {:s}. Allowed formats \"\n \"{}\".format(data_format, SUPPORTED_2D_DATA_FORMATS))\n\n super(Conv2DTranspose, self).__init__(\n output_channels=output_channels, output_shape=output_shape,\n kernel_shape=kernel_shape, stride=stride, padding=padding,\n use_bias=use_bias, initializers=initializers,\n partitioners=partitioners, regularizers=regularizers,\n data_format=data_format, custom_getter=custom_getter, name=name\n )\n\n # Implements Transposable interface.\n def transpose(self, name=None):\n \"\"\"Returns matching `Conv2D` module.\n\n Args:\n name: Optional string assigning name of transpose module. The default name\n is constructed by appending \"_transpose\" to `self.name`.\n\n Returns:\n `Conv2D` module.\n \"\"\"\n if name is None:\n name = self.module_name + \"_transpose\"\n\n if self._data_format == DATA_FORMAT_NHWC:\n stride = self._stride[1:-1]\n else: # self._data_format == DATA_FORMAT_NCHW\n stride = self._stride[2:]\n\n return Conv2D(output_channels=lambda: self.input_channels,\n kernel_shape=self._kernel_shape,\n stride=stride,\n padding=self._padding,\n use_bias=self._use_bias,\n initializers=self._initializers,\n partitioners=self._partitioners,\n regularizers=self._regularizers,\n data_format=self._data_format,\n custom_getter=self._custom_getter,\n name=name)\n\n\nclass Conv3D(_ConvND, base.Transposable):\n \"\"\"Volumetric convolution module, including optional bias.\n\n This acts as a light wrapper around the class `_ConvND`.\n \"\"\"\n\n def __init__(self, output_channels, kernel_shape, stride=1, rate=1,\n padding=SAME, use_bias=True, initializers=None,\n partitioners=None, regularizers=None, mask=None,\n data_format=DATA_FORMAT_NDHWC, custom_getter=None,\n name=\"conv_3d\"):\n \"\"\"Constructs a Conv3D module.\n\n See the following documentation for an explanation of VALID versus SAME\n padding modes:\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution\n\n Args:\n output_channels: Number of output channels. `output_channels` can be\n either a number or a callable. In the latter case, since the function\n invocation is deferred to graph construction time, the user must only\n ensure that output_channels can be called, returning an integer,\n when `build` is called.\n kernel_shape: Sequence of kernel sizes (of size 3), or integer that is\n used to define kernel size in all dimensions.\n stride: Sequence of kernel strides (of size 3), or integer that is used to\n define stride in all dimensions.\n rate: Sequence of dilation rates (of size 3), or integer that is used to\n define dilation rate in all dimensions. 1 corresponds to standard 3D\n convolution, `rate > 1` corresponds to dilated convolution. Cannot be\n > 1 if any of `stride` is also > 1.\n padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,\n `snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings\n of length 3.\n * snt.SAME and snt.VALID are explained in the Tensorflow docs at\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution.\n * snt.FULL pre- and post-pads with the maximum padding which does not\n result in a convolution over just padded elements.\n * snt.CAUSAL pre-pads to ensure that each output value only depends on\n input values at the same or preceding indices (\"no dependence on the\n future\").\n * snt.REVERSE_CAUSAL post-pads to ensure that each output value only\n depends on input values at the same or *greater* indices (\"no\n dependence on the past\").\n If you use the same padding for all dimensions, and it is one of SAME\n or VALID, then this is supported directly by the underlying\n convolution op. In all other cases, the input data will be padded\n using tf.pad before calling the convolution op.\n use_bias: Whether to include bias parameters. Default `True`.\n initializers: Optional dict containing ops to initialize the filters (with\n key 'w') or biases (with key 'b'). The default initializer for the\n weights is a truncated normal initializer, which is commonly used\n when the inputs are zero centered (see\n https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for\n the bias is a zero initializer.\n partitioners: Optional dict containing partitioners to partition\n weights (with key 'w') or biases (with key 'b'). As a default, no\n partitioners are used.\n regularizers: Optional dict containing regularizers for the filters\n (with key 'w') and the biases (with key 'b'). As a default, no\n regularizers are used. A regularizer should be a function that takes\n a single `Tensor` as an input and returns a scalar `Tensor` output,\n e.g. the L1 and L2 regularizers in `tf.contrib.layers`.\n mask: An object convertible to a 5D tensor which is multiplied\n component-wise with the weights (Optional).\n data_format: A string. Specifies whether the channel dimension\n of the input and output is the last dimension (default, NDHWC), or\n the second dimension (NCDHW).\n custom_getter: Callable or dictionary of callables to use as\n custom getters inside the module. If a dictionary, the keys\n correspond to regexes to match variable names. See the\n `tf.get_variable` documentation for information about the\n custom_getter API.\n name: Name of the module.\n\n Raises:\n base.IncompatibleShapeError: If the given kernel shape is not an integer;\n or if the given kernel shape is not a sequence of two integers.\n base.IncompatibleShapeError: If the given stride is not an integer; or if\n the given stride is not a sequence of two or four integers.\n base.IncompatibleShapeError: If the given rate is not an integer; or if\n the given rate is not a sequence of two integers.\n base.NotSupportedError: If rate in any dimension and the stride in any\n dimension are simultaneously > 1.\n ValueError: If the given padding is not `snt.VALID`, `snt.SAME`,\n `snt.FULL`, `snt.CAUSAL`, `snt.REVERSE_CAUSAL` or a sequence of these.\n KeyError: If `initializers`, `partitioners` or `regularizers` contain any\n keys other than 'w' or 'b'.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n ValueError: If the passed-in data_format doesn't have a channel dimension.\n ValueError: If the given data_format is not a supported format (see\n `SUPPORTED_3D_DATA_FORMATS`).\n \"\"\"\n if data_format not in SUPPORTED_3D_DATA_FORMATS:\n raise ValueError(\"Invalid data_format {:s}. Allowed formats \"\n \"{}\".format(data_format, SUPPORTED_3D_DATA_FORMATS))\n super(Conv3D, self).__init__(\n output_channels=output_channels, kernel_shape=kernel_shape,\n stride=stride, rate=rate, padding=padding, use_bias=use_bias,\n initializers=initializers, partitioners=partitioners,\n regularizers=regularizers, mask=mask, data_format=data_format,\n custom_getter=custom_getter, name=name)\n\n # Implements Transposable interface.\n def transpose(self, name=None):\n \"\"\"Returns matching `Conv3DTranspose` module.\n\n Args:\n name: Optional string assigning name of transpose module. The default name\n is constructed by appending \"_transpose\" to `self.name`.\n\n Returns:\n `Conv3DTranspose` module.\n\n Raises:\n base.NotSupportedError: If `rate` in any dimension > 1.\n \"\"\"\n if any(x > 1 for x in self._rate):\n raise base.NotSupportedError(\n \"Cannot transpose a dilated convolution module.\")\n\n if any(p != self._conv_op_padding for p in self._padding):\n raise base.NotSupportedError(\n \"Cannot tranpose a convolution using mixed paddings or paddings \"\n \"other than SAME or VALID.\")\n\n def output_shape():\n if self._data_format == DATA_FORMAT_NCDHW:\n return self.input_shape[2:]\n else: # data_format == DATA_FORMAT_NDHWC\n return self.input_shape[1:4]\n\n if name is None:\n name = self.module_name + \"_transpose\"\n return Conv3DTranspose(output_channels=lambda: self._input_channels,\n output_shape=output_shape,\n kernel_shape=self._kernel_shape,\n stride=self._stride,\n padding=self._conv_op_padding,\n use_bias=self._use_bias,\n initializers=self._initializers,\n partitioners=self._partitioners,\n regularizers=self._regularizers,\n data_format=self._data_format,\n custom_getter=self._custom_getter,\n name=name)\n\n\nclass Conv3DTranspose(_ConvNDTranspose, base.Transposable):\n \"\"\"Volumetric transposed / reverse / up 3D convolution module, including bias.\n\n This acts as a light wrapper around the TensorFlow op `tf.nn.conv3d_transpose`\n abstracting away variable creation and sharing.\n \"\"\"\n\n def __init__(self, output_channels, output_shape=None, kernel_shape=None,\n stride=1, padding=SAME, use_bias=True, initializers=None,\n partitioners=None, regularizers=None,\n data_format=DATA_FORMAT_NDHWC, custom_getter=None,\n name=\"conv_3d_transpose\"):\n \"\"\"Constructs a `Conv3DTranspose` module.\n\n See the following documentation for an explanation of VALID versus SAME\n padding modes:\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution\n\n Args:\n output_channels: Number of output channels. `output_channels` can be\n either a number or a callable. In the latter case, since the function\n invocation is deferred to graph construction time, the user must only\n ensure `output_channels` can be called, returning an integer, when\n `build` is called.\n output_shape: Output shape of transpose convolution.\n Can be either an iterable of integers or a callable. In the latter\n case, since the function invocation is deferred to graph construction\n time, the user must only ensure that `output_shape` can be called,\n returning an iterable of format `(out_depth, out_height, out_width)`\n when `build` is called. Note that `output_shape` defines the size of\n output signal domain, as opposed to the shape of the output `Tensor`.\n If a None value is given, a default shape is automatically calculated\n (see docstring of _default_transpose_size function for more details).\n kernel_shape: Sequence of kernel sizes (of size 3), or integer that is\n used to define kernel size in all dimensions.\n stride: Sequence of kernel strides (of size 3), or integer that is used to\n define stride in all dimensions.\n padding: Padding algorithm, either `snt.SAME` or `snt.VALID`.\n use_bias: Whether to include bias parameters. Default `True`.\n initializers: Optional dict containing ops to initialize the filters (with\n key 'w') or biases (with key 'b').\n partitioners: Optional dict containing partitioners to partition\n weights (with key 'w') or biases (with key 'b'). As a default, no\n partitioners are used.\n regularizers: Optional dict containing regularizers for the filters\n (with key 'w') and the biases (with key 'b'). As a default, no\n regularizers are used. A regularizer should be a function that takes\n a single `Tensor` as an input and returns a scalar `Tensor` output,\n e.g. the L1 and L2 regularizers in `tf.contrib.layers`.\n data_format: A string. Specifies whether the channel dimension\n of the input and output is the last dimension (default, NDHWC), or the\n second dimension (NCDHW).\n custom_getter: Callable or dictionary of callables to use as\n custom getters inside the module. If a dictionary, the keys\n correspond to regexes to match variable names. See the\n `tf.get_variable` documentation for information about the\n custom_getter API.\n name: Name of the module.\n\n Raises:\n module.IncompatibleShapeError: If the given kernel shape is neither an\n integer nor a sequence of three integers.\n module.IncompatibleShapeError: If the given stride is neither an integer\n nor a sequence of three or five integers.\n ValueError: If the given padding is not `snt.VALID` or `snt.SAME`.\n ValueError: If the given kernel_shape is `None`.\n KeyError: If `initializers`, `partitioners` or `regularizers` contain any\n keys other than 'w' or 'b'.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n ValueError: If the passed-in data_format doesn't have a channel dimension.\n ValueError: If the given data_format is not a supported format (see\n `SUPPORTED_3D_DATA_FORMATS`).\n \"\"\"\n if data_format not in SUPPORTED_3D_DATA_FORMATS:\n raise ValueError(\"Invalid data_format {:s}. Allowed formats \"\n \"{}\".format(data_format, SUPPORTED_3D_DATA_FORMATS))\n\n super(Conv3DTranspose, self).__init__(\n output_channels=output_channels, output_shape=output_shape,\n kernel_shape=kernel_shape, stride=stride, padding=padding,\n use_bias=use_bias, initializers=initializers,\n partitioners=partitioners, regularizers=regularizers,\n data_format=data_format, custom_getter=custom_getter, name=name\n )\n\n # Implement Transposable interface\n def transpose(self, name=None):\n \"\"\"Returns transposed Conv3DTranspose module, i.e. a Conv3D module.\"\"\"\n if name is None:\n name = self.module_name + \"_transpose\"\n\n if self._data_format == DATA_FORMAT_NDHWC:\n stride = self._stride[1:-1]\n else: # self._data_format == DATA_FORMAT_NCDHW\n stride = self._stride[2:]\n\n return Conv3D(output_channels=lambda: self.input_channels,\n kernel_shape=self._kernel_shape,\n stride=stride,\n padding=self._padding,\n use_bias=self._use_bias,\n initializers=self._initializers,\n partitioners=self._partitioners,\n regularizers=self._regularizers,\n data_format=self._data_format,\n custom_getter=self._custom_getter,\n name=name)\n\n\nclass InPlaneConv2D(_ConvND):\n \"\"\"Applies an in-plane convolution to each channel with tied filter weights.\n\n This acts as a light wrapper around the TensorFlow op\n `tf.nn.depthwise_conv2d`; it differs from the DepthWiseConv2D module in that\n it has tied weights (i.e. the same filter) for all the in-out channel pairs.\n \"\"\"\n\n def __init__(self, kernel_shape, stride=1, padding=SAME, use_bias=True,\n initializers=None, partitioners=None, regularizers=None,\n data_format=DATA_FORMAT_NHWC, custom_getter=None,\n name=\"in_plane_conv2d\"):\n \"\"\"Constructs an InPlaneConv2D module.\n\n See the following documentation for an explanation of VALID versus SAME\n padding modes:\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution\n\n Args:\n kernel_shape: Iterable with 2 elements in the layout [filter_height,\n filter_width]; or integer that is used to define the list in all\n dimensions.\n stride: Iterable with 2 or 4 elements of kernel strides, or integer that\n is used to define stride in all dimensions.\n padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,\n `snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings\n of length 2.\n * snt.SAME and snt.VALID are explained in the Tensorflow docs at\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution.\n * snt.FULL pre- and post-pads with the maximum padding which does not\n result in a convolution over just padded elements.\n * snt.CAUSAL pre-pads to ensure that each output value only depends on\n input values at the same or preceding indices (\"no dependence on the\n future\").\n * snt.REVERSE_CAUSAL post-pads to ensure that each output value only\n depends on input values at the same or *greater* indices (\"no\n dependence on the past\").\n If you use the same padding for all dimensions, and it is one of SAME\n or VALID, then this is supported directly by the underlying\n convolution op. In all other cases, the input data will be padded\n using tf.pad before calling the convolution op.\n use_bias: Whether to include bias parameters. Default `True`.\n initializers: Optional dict containing ops to initialize the filters (with\n key 'w') or biases (with key 'b').\n partitioners: Optional dict containing partitioners to partition the\n filters (with key 'w') or biases (with key 'b'). As a default, no\n partitioners are used.\n regularizers: Optional dict containing regularizers for the filters\n (with key 'w') and the biases (with key 'b'). As a default, no\n regularizers are used. A regularizer should be a function that takes\n a single `Tensor` as an input and returns a scalar `Tensor` output,\n e.g. the L1 and L2 regularizers in `tf.contrib.layers`.\n data_format: A string. Specifies whether the channel dimension\n of the input and output is the last dimension (default, NHWC), or the\n second dimension (NCHW).\n custom_getter: Callable or dictionary of callables to use as\n custom getters inside the module. If a dictionary, the keys\n correspond to regexes to match variable names. See the\n `tf.get_variable` documentation for information about the\n custom_getter API.\n name: Name of the module.\n\n Raises:\n ValueError: If the given data_format is not a supported format (see\n `SUPPORTED_2D_DATA_FORMATS`).\n base.IncompatibleShapeError: If the given kernel shape is not an integer;\n or if the given kernel shape is not a sequence of two integers.\n base.IncompatibleShapeError: If the given stride is not an integer; or if\n the given stride is not a sequence of two integers.\n ValueError: If the given padding is not `snt.VALID`, `snt.SAME`,\n `snt.FULL`, `snt.CAUSAL`, `snt.REVERSE_CAUSAL` or a sequence of these.\n KeyError: If `initializers`, `partitioners` or `regularizers` contain any\n keys other than 'w' or 'b'.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n ValueError: If the passed-in data_format doesn't have a channel dimension.\n \"\"\"\n if data_format not in SUPPORTED_2D_DATA_FORMATS:\n raise ValueError(\"Invalid data_format {:s}. Allowed formats \"\n \"{}\".format(data_format, SUPPORTED_2D_DATA_FORMATS))\n super(InPlaneConv2D, self).__init__(\n output_channels=lambda: self.input_channels,\n kernel_shape=kernel_shape,\n stride=stride, padding=padding, use_bias=use_bias,\n initializers=initializers, partitioners=partitioners,\n regularizers=regularizers, data_format=data_format,\n custom_getter=custom_getter, name=name)\n\n def _construct_w(self, inputs):\n \"\"\"Construct the convolution weight matrix.\n\n Figures out the shape of the weight matrix, initialize it, and return it.\n\n Args:\n inputs: A Tensor of shape `data_format` and of type `tf.float16`,\n `tf.bfloat16`, `tf.float32` or `tf.float64`.\n\n Returns:\n w: A weight matrix of the same type as `inputs` and of shape\n [kernel_shape, 1, 1].\n \"\"\"\n weight_shape = self._kernel_shape + (1, 1)\n\n if \"w\" not in self._initializers:\n self._initializers[\"w\"] = create_weight_initializer(weight_shape[:2],\n dtype=inputs.dtype)\n\n w = tf.get_variable(\"w\",\n shape=weight_shape,\n dtype=inputs.dtype,\n initializer=self._initializers[\"w\"],\n partitioner=self._partitioners.get(\"w\", None),\n regularizer=self._regularizers.get(\"w\", None))\n return w\n\n def _apply_conv(self, inputs, w):\n \"\"\"Apply a depthwise_conv2d operation on `inputs` using variable `w`.\n\n Args:\n inputs: A Tensor of shape `data_format` and of type `tf.float16`,\n `tf.bfloat16`, `tf.float32` or `tf.float64`.\n w: A weight matrix of the same type as `inputs`.\n\n Returns:\n outputs: The result of the convolution operation on `inputs`.\n \"\"\"\n tiled_weights = tf.tile(w, [1, 1, self._input_channels, 1])\n outputs = tf.nn.depthwise_conv2d(inputs,\n tiled_weights,\n strides=self.stride,\n padding=self._conv_op_padding,\n data_format=self._data_format)\n return outputs\n\n\nclass DepthwiseConv2D(_ConvND):\n \"\"\"Spatial depthwise 2D convolution module, including bias.\n\n This acts as a light wrapper around the TensorFlow ops\n `tf.nn.depthwise_conv2d`, abstracting away variable creation and sharing.\n \"\"\"\n\n def __init__(self,\n channel_multiplier,\n kernel_shape,\n stride=1,\n padding=SAME,\n use_bias=True,\n initializers=None,\n partitioners=None,\n regularizers=None,\n data_format=DATA_FORMAT_NHWC,\n custom_getter=None,\n name=\"conv_2d_depthwise\"):\n \"\"\"Constructs a DepthwiseConv2D module.\n\n See the following documentation for an explanation of VALID versus SAME\n padding modes:\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution\n\n Args:\n channel_multiplier: Number of channels to expand convolution to. Must be\n an integer. Must be > 0. When `channel_multiplier` is set to 1, apply\n a different filter to each input channel producing one output channel\n per input channel. Numbers larger than 1 cause multiple different\n filters to be applied to each input channel, with their outputs being\n concatenated together, producing `channel_multiplier` *\n `input_channels` output channels.\n kernel_shape: Iterable with 2 elements in the following layout:\n [filter_height, filter_width] or integer that is\n used to define the list in all dimensions.\n stride: Iterable with 2 or 4 elements of kernel strides, or integer that\n is used to define stride in all dimensions. Layout of list:\n In case of 4 elements: `[1, stride_height, stride_widith, 1]`\n In case of 2 elements: `[stride_height, stride_width]`.\n padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,\n `snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings\n of length 2.\n * snt.SAME and snt.VALID are explained in the Tensorflow docs at\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution.\n * snt.FULL pre- and post-pads with the maximum padding which does not\n result in a convolution over just padded elements.\n * snt.CAUSAL pre-pads to ensure that each output value only depends on\n input values at the same or preceding indices (\"no dependence on the\n future\").\n * snt.REVERSE_CAUSAL post-pads to ensure that each output value only\n depends on input values at the same or *greater* indices (\"no\n dependence on the past\").\n If you use the same padding for all dimensions, and it is one of SAME\n or VALID, then this is supported directly by the underlying\n convolution op. In all other cases, the input data will be padded\n using tf.pad before calling the convolution op.\n use_bias: Whether to include bias parameters. Default `True`.\n initializers: Optional dict containing ops to initialize the filters (with\n key 'w') or biases (with key 'b').\n partitioners: Optional dict containing partitioners for the filters\n (with key 'w') and the biases (with key 'b'). As a default, no\n partitioners are used.\n regularizers: Optional dict containing regularizers for the filters\n (with key 'w') and the biases (with key 'b'). As a default, no\n regularizers are used. A regularizer should be a function that takes\n a single `Tensor` as an input and returns a scalar `Tensor` output,\n e.g. the L1 and L2 regularizers in `tf.contrib.layers`.\n data_format: A string. Specifies whether the channel dimension\n of the input and output is the last dimension (default, NHWC), or the\n second dimension (\"NCHW\").\n custom_getter: Callable or dictionary of callables to use as\n custom getters inside the module. If a dictionary, the keys\n correspond to regexes to match variable names. See the\n `tf.get_variable` documentation for information about the\n custom_getter API.\n name: Name of the module.\n\n Raises:\n ValueError: If `channel_multiplier` isn't of type (`numbers.Integral` or\n `tf.Dimension`).\n ValueError: If `channel_multiplier` is less than 1.\n ValueError: If the given data_format is not a supported format (see\n `SUPPORTED_2D_DATA_FORMATS`).\n base.IncompatibleShapeError: If the given kernel shape is not an integer;\n or if the given kernel shape is not a sequence of two integers.\n base.IncompatibleShapeError: If the given stride is not an integer; or if\n the given stride is not a sequence of two integers.\n ValueError: If the given padding is not `snt.VALID`, `snt.SAME`,\n `snt.FULL`, `snt.CAUSAL`, `snt.REVERSE_CAUSAL` or a sequence of these.\n KeyError: If `initializers`, `partitioners` or `regularizers` contain any\n keys other than 'w' or 'b'.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n ValueError: If the passed-in data_format doesn't have a channel dimension.\n \"\"\"\n if (not isinstance(channel_multiplier, numbers.Integral) and\n not isinstance(channel_multiplier, tf.Dimension)):\n raise ValueError((\"channel_multiplier ({}), must be of type \"\n \"(`tf.Dimension`, `numbers.Integral`).\").format(\n channel_multiplier))\n if channel_multiplier < 1:\n raise ValueError(\"channel_multiplier ({}), must be >= 1\".format(\n channel_multiplier))\n\n self._channel_multiplier = channel_multiplier\n\n if data_format not in SUPPORTED_2D_DATA_FORMATS:\n raise ValueError(\"Invalid data_format {:s}. Allowed formats \"\n \"{}\".format(data_format, SUPPORTED_2D_DATA_FORMATS))\n\n super(DepthwiseConv2D, self).__init__(\n output_channels=lambda: self._input_channels * self._channel_multiplier,\n kernel_shape=kernel_shape,\n stride=stride, padding=padding, use_bias=use_bias,\n initializers=initializers, partitioners=partitioners,\n regularizers=regularizers, data_format=data_format,\n custom_getter=custom_getter, name=name)\n\n def _construct_w(self, inputs):\n \"\"\"Construct the convolution weight matrix.\n\n Figures out the shape of the weight matrix, initializes it, and returns it.\n\n Args:\n inputs: A Tensor of shape `data_format` and of type `tf.float16`,\n `tf.bfloat16`, `tf.float32` or `tf.float64`.\n\n Returns:\n w: A weight matrix of the same type as `inputs` and of shape\n [kernel_sizes, input_channels, channel_multiplier].\n \"\"\"\n # For depthwise conv, output_channels = in_channels * channel_multiplier.\n # By default, depthwise conv applies a different filter to every input\n # channel. If channel_multiplier > 1, one input channel is used to produce\n # `channel_multiplier` outputs, which are then concatenated together.\n # This results in:\n weight_shape = self._kernel_shape + (self._input_channels,\n self._channel_multiplier)\n\n if \"w\" not in self._initializers:\n self._initializers[\"w\"] = create_weight_initializer(weight_shape[:2],\n dtype=inputs.dtype)\n\n w = tf.get_variable(\"w\",\n shape=weight_shape,\n dtype=inputs.dtype,\n initializer=self._initializers[\"w\"],\n partitioner=self._partitioners.get(\"w\", None),\n regularizer=self._regularizers.get(\"w\", None))\n return w\n\n def _apply_conv(self, inputs, w):\n \"\"\"Apply a depthwise_conv2d operation on `inputs` using variable `w`.\n\n Args:\n inputs: A Tensor of shape `data_format` and of type `tf.float16`,\n `tf.bfloat16`, `tf.float32` or `tf.float64`.\n w: A weight matrix of the same type as `inputs`.\n\n Returns:\n outputs: The result of the convolution operation on `inputs`.\n \"\"\"\n outputs = tf.nn.depthwise_conv2d(inputs,\n w,\n strides=self.stride,\n padding=self._conv_op_padding,\n data_format=self._data_format)\n return outputs\n\n @property\n def channel_multiplier(self):\n \"\"\"Returns the channel multiplier argument.\"\"\"\n return self._channel_multiplier\n\n\nclass SeparableConv2D(_ConvND):\n \"\"\"Performs an in-plane convolution to each channel independently.\n\n This acts as a light wrapper around the TensorFlow op\n `tf.nn.separable_conv2d`, abstracting away variable creation and sharing.\n \"\"\"\n\n def __init__(self,\n output_channels,\n channel_multiplier,\n kernel_shape,\n stride=1,\n rate=1,\n padding=SAME,\n use_bias=True,\n initializers=None,\n partitioners=None,\n regularizers=None,\n data_format=DATA_FORMAT_NHWC,\n custom_getter=None,\n name=\"separable_conv2d\"):\n \"\"\"Constructs a SeparableConv2D module.\n\n See the following documentation for an explanation of VALID versus SAME\n padding modes:\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution\n\n Args:\n output_channels: Number of output channels. Must be an integer.\n channel_multiplier: Number of channels to expand pointwise (depthwise)\n convolution to. Must be an integer. Must be > 0.\n When `channel_multiplier` is set to 1, applies a different filter to\n each input channel. Numbers larger than 1 cause the filter to be\n applied to `channel_multiplier` input channels. Outputs are\n concatenated together.\n kernel_shape: List with 2 elements in the following layout:\n [filter_height, filter_width] or integer that is\n used to define the list in all dimensions.\n stride: List with 4 elements of kernel strides, or integer that is used to\n define stride in all dimensions. Layout of list:\n [1, stride_y, stride_x, 1].\n rate: Sequence of dilation rates (of size 2), or integer that is used to\n define dilation rate in all dimensions. 1 corresponds to standard 2D\n convolution, `rate > 1` corresponds to dilated convolution. Cannot be\n > 1 if any of `stride` is also > 1.\n padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,\n `snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings\n of length 2.\n * snt.SAME and snt.VALID are explained in the Tensorflow docs at\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution.\n * snt.FULL pre- and post-pads with the maximum padding which does not\n result in a convolution over just padded elements.\n * snt.CAUSAL pre-pads to ensure that each output value only depends on\n input values at the same or preceding indices (\"no dependence on the\n future\").\n * snt.REVERSE_CAUSAL post-pads to ensure that each output value only\n depends on input values at the same or *greater* indices (\"no\n dependence on the past\").\n If you use the same padding for all dimensions, and it is one of SAME\n or VALID, then this is supported directly by the underlying\n convolution op. In all other cases, the input data will be padded\n using tf.pad before calling the convolution op.\n use_bias: Whether to include bias parameters. Default `True`.\n initializers: Optional dict containing ops to initialize the filters (with\n keys 'w_dw' for depthwise and 'w_pw' for pointwise) or biases\n (with key 'b').\n partitioners: Optional dict containing partitioners to partition the\n filters (with key 'w') or biases (with key 'b'). As a default, no\n partitioners are used.\n regularizers: Optional dict containing regularizers for the filters\n (with keys 'w_dw' for depthwise and 'w_pw' for pointwise) and the\n biases (with key 'b'). As a default, no regularizers are used.\n A regularizer should be a function that takes a single `Tensor` as an\n input and returns a scalar `Tensor` output, e.g. the L1 and L2\n regularizers in `tf.contrib.layers`.\n data_format: A string. Specifies whether the channel dimension\n of the input and output is the last dimension (default, NHWC), or the\n second dimension (\"NCHW\").\n custom_getter: Callable or dictionary of callables to use as\n custom getters inside the module. If a dictionary, the keys\n correspond to regexes to match variable names. See the\n `tf.get_variable` documentation for information about the\n custom_getter API.\n name: Name of the module.\n\n Raises:\n ValueError: If `channel_multiplier` isn't of type (`numbers.Integral` or\n `tf.Dimension`).\n ValueError: If `channel_multiplier` is less than 1.\n ValueError: If the given data_format is not a supported format (see\n `SUPPORTED_2D_DATA_FORMATS`).\n base.IncompatibleShapeError: If the given kernel shape is not an integer;\n or if the given kernel shape is not a sequence of two integers.\n base.IncompatibleShapeError: If the given stride is not an integer; or if\n the given stride is not a sequence of two integers.\n base.IncompatibleShapeError: If the given rate is not an integer; or if\n the given rate is not a sequence of two integers.\n base.IncompatibleShapeError: If a mask is a TensorFlow Tensor with\n a not fully defined shape.\n base.NotSupportedError: If rate in any dimension and the stride in any\n dimension are simultaneously > 1.\n ValueError: If the given padding is not `snt.VALID`, `snt.SAME`,\n `snt.FULL`, `snt.CAUSAL`, `snt.REVERSE_CAUSAL` or a sequence of these.\n KeyError: If `initializers`, `partitioners` or `regularizers` contain any\n keys other than 'w_dw', 'w_pw' or 'b'.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n TypeError: If mask is given and it is not convertible to a Tensor.\n ValueError: If the passed-in data_format doesn't have a channel dimension.\n \"\"\"\n if (not isinstance(channel_multiplier, numbers.Integral) and\n not isinstance(channel_multiplier, tf.Dimension)):\n raise ValueError((\"channel_multiplier ({}), must be of type \"\n \"(`tf.Dimension`, `numbers.Integral`).\").format(\n channel_multiplier))\n if channel_multiplier < 1:\n raise ValueError(\"channel_multiplier ({}), must be >= 1\".format(\n channel_multiplier))\n\n self._channel_multiplier = channel_multiplier\n\n if data_format not in SUPPORTED_2D_DATA_FORMATS:\n raise ValueError(\"Invalid data_format {:s}. Allowed formats \"\n \"{}\".format(data_format, SUPPORTED_2D_DATA_FORMATS))\n\n super(SeparableConv2D, self).__init__(\n output_channels=output_channels,\n kernel_shape=kernel_shape,\n stride=stride, padding=padding, rate=rate,\n use_bias=use_bias,\n initializers=initializers, partitioners=partitioners,\n regularizers=regularizers, data_format=data_format,\n custom_getter=custom_getter, name=name)\n\n @classmethod\n def get_possible_initializer_keys(cls, use_bias=True):\n return {\"w_dw\", \"w_pw\", \"b\"} if use_bias else {\"w_dw\", \"w_pw\"}\n\n def _construct_w(self, inputs):\n \"\"\"Connects the module into the graph, with input Tensor `inputs`.\n\n Args:\n inputs: A 4D Tensor of shape:\n [batch_size, input_height, input_width, input_channels]\n and of type `tf.float16`, `tf.bfloat16`, `tf.float32` or `tf.float64`.\n\n Returns:\n A tuple of two 4D Tensors, each with the same dtype as `inputs`:\n 1. w_dw, the depthwise weight matrix, of shape:\n [kernel_size, input_channels, channel_multiplier]\n 2. w_pw, the pointwise weight matrix, of shape:\n [1, 1, channel_multiplier * input_channels, output_channels].\n \"\"\"\n depthwise_weight_shape = self._kernel_shape + (self._input_channels,\n self._channel_multiplier)\n pointwise_input_size = self._channel_multiplier * self._input_channels\n pointwise_weight_shape = (1, 1, pointwise_input_size, self._output_channels)\n\n if \"w_dw\" not in self._initializers:\n fan_in_shape = depthwise_weight_shape[:2]\n self._initializers[\"w_dw\"] = create_weight_initializer(fan_in_shape,\n dtype=inputs.dtype)\n\n if \"w_pw\" not in self._initializers:\n fan_in_shape = pointwise_weight_shape[:3]\n self._initializers[\"w_pw\"] = create_weight_initializer(fan_in_shape,\n dtype=inputs.dtype)\n\n w_dw = tf.get_variable(\n \"w_dw\",\n shape=depthwise_weight_shape,\n dtype=inputs.dtype,\n initializer=self._initializers[\"w_dw\"],\n partitioner=self._partitioners.get(\"w_dw\", None),\n regularizer=self._regularizers.get(\"w_dw\", None))\n\n w_pw = tf.get_variable(\n \"w_pw\",\n shape=pointwise_weight_shape,\n dtype=inputs.dtype,\n initializer=self._initializers[\"w_pw\"],\n partitioner=self._partitioners.get(\"w_pw\", None),\n regularizer=self._regularizers.get(\"w_pw\", None))\n\n return w_dw, w_pw\n\n def _apply_conv(self, inputs, w):\n \"\"\"Apply a `separable_conv2d` operation on `inputs` using `w`.\n\n Args:\n inputs: A Tensor of shape `data_format` and of type `tf.float16`,\n `tf.bfloat16`, `tf.float32` or `tf.float64`.\n w: A tuple of weight matrices of the same type as `inputs`, the first\n being the depthwise weight matrix, and the second being the pointwise\n weight matrix.\n\n Returns:\n outputs: The result of the convolution operation on `inputs`.\n \"\"\"\n w_dw, w_pw = w\n outputs = tf.nn.separable_conv2d(inputs,\n w_dw,\n w_pw,\n rate=self._rate,\n strides=self.stride,\n padding=self._conv_op_padding,\n data_format=self._data_format)\n return outputs\n\n @property\n def channel_multiplier(self):\n \"\"\"Returns the channel multiplier argument.\"\"\"\n return self._channel_multiplier\n\n @property\n def w_dw(self):\n \"\"\"Returns the Variable containing the depthwise weight matrix.\"\"\"\n self._ensure_is_connected()\n return self._w[0]\n\n @property\n def w_pw(self):\n \"\"\"Returns the Variable containing the pointwise weight matrix.\"\"\"\n self._ensure_is_connected()\n return self._w[1]\n\n\nclass SeparableConv1D(_ConvND):\n \"\"\"Performs an in-plane convolution to each channel independently.\n\n This acts as a light wrapper around the TensorFlow op\n `tf.nn.separable_conv2d`, abstracting away variable creation and sharing.\n \"\"\"\n\n def __init__(self,\n output_channels,\n channel_multiplier,\n kernel_shape,\n stride=1,\n rate=1,\n padding=SAME,\n use_bias=True,\n initializers=None,\n partitioners=None,\n regularizers=None,\n data_format=DATA_FORMAT_NWC,\n custom_getter=None,\n name=\"separable_conv1d\"):\n \"\"\"Constructs a SeparableConv1D module.\n\n See the following documentation for an explanation of VALID versus SAME\n padding modes:\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution\n\n Args:\n output_channels: Number of output channels. Must be an integer.\n channel_multiplier: Number of channels to expand pointwise (depthwise)\n convolution to. Must be an integer. Must be > 0.\n When `channel_multiplier` is set to 1, applies a different filter to\n each input channel. Numbers larger than 1 cause the filter to be\n applied to `channel_multiplier` input channels. Outputs are\n concatenated together.\n kernel_shape: List with 2 elements in the following layout:\n [filter_height, filter_width] or integer that is\n used to define the list in all dimensions.\n stride: List with 4 elements of kernel strides, or integer that is used to\n define stride in all dimensions. Layout of list:\n [1, stride_y, stride_x, 1].\n rate: Sequence of dilation rates (of size 1), or integer that is used to\n define dilation rate in all dimensions. 1 corresponds to standard 1D\n convolution, `rate > 1` corresponds to dilated convolution. Cannot be\n > 1 if any of `stride` is also > 1.\n padding: Padding algorithm. Either `snt.SAME`, `snt.VALID`, `snt.FULL`,\n `snt.CAUSAL`, `snt.REVERSE_CAUSAL`, or a sequence of these paddings\n of length 1.\n * snt.SAME and snt.VALID are explained in the Tensorflow docs at\n https://www.tensorflow.org/api_docs/python/tf/nn/convolution.\n * snt.FULL pre- and post-pads with the maximum padding which does not\n result in a convolution over just padded elements.\n * snt.CAUSAL pre-pads to ensure that each output value only depends on\n input values at the same or preceding indices (\"no dependence on the\n future\").\n * snt.REVERSE_CAUSAL post-pads to ensure that each output value only\n depends on input values at the same or *greater* indices (\"no\n dependence on the past\").\n If you use the same padding for all dimensions, and it is one of SAME\n or VALID, then this is supported directly by the underlying\n convolution op. In all other cases, the input data will be padded\n using tf.pad before calling the convolution op.\n use_bias: Whether to include bias parameters. Default `True`.\n initializers: Optional dict containing ops to initialize the filters (with\n keys 'w_dw' for depthwise and 'w_pw' for pointwise) or biases\n (with key 'b').\n partitioners: Optional dict containing partitioners to partition the\n filters (with key 'w') or biases (with key 'b'). As a default, no\n partitioners are used.\n regularizers: Optional dict containing regularizers for the filters\n (with keys 'w_dw' for depthwise and 'w_pw' for pointwise) and the\n biases (with key 'b'). As a default, no regularizers are used.\n A regularizer should be a function that takes a single `Tensor` as an\n input and returns a scalar `Tensor` output, e.g. the L1 and L2\n regularizers in `tf.contrib.layers`.\n data_format: A string. Specifies whether the channel dimension\n of the input and output is the last dimension (default, NWC), or the\n second dimension (\"NCW\").\n custom_getter: Callable or dictionary of callables to use as\n custom getters inside the module. If a dictionary, the keys\n correspond to regexes to match variable names. See the\n `tf.get_variable` documentation for information about the\n custom_getter API.\n name: Name of the module.\n\n Raises:\n ValueError: If `channel_multiplier` isn't of type (`numbers.Integral` or\n `tf.Dimension`).\n ValueError: If `channel_multiplier` is less than 1.\n ValueError: If the given data_format is not a supported format (see\n `SUPPORTED_1D_DATA_FORMATS`).\n base.IncompatibleShapeError: If the given kernel shape is not an integer;\n or if the given kernel shape is not a sequence of one integer.\n base.IncompatibleShapeError: If the given stride is not an integer; or if\n the given stride is not a sequence of two integers.\n base.IncompatibleShapeError: If the given rate is not an integer; or if\n the given rate is not a sequence of two integers.\n base.IncompatibleShapeError: If a mask is a TensorFlow Tensor with\n a not fully defined shape.\n base.NotSupportedError: If rate in any dimension and the stride in any\n dimension are simultaneously > 1.\n ValueError: If the given padding is not `snt.VALID`, `snt.SAME`,\n `snt.FULL`, `snt.CAUSAL`, `snt.REVERSE_CAUSAL` or a sequence of these.\n KeyError: If `initializers`, `partitioners` or `regularizers` contain any\n keys other than 'w_dw', 'w_pw' or 'b'.\n TypeError: If any of the given initializers, partitioners or regularizers\n are not callable.\n TypeError: If mask is given and it is not convertible to a Tensor.\n ValueError: If the passed-in data_format doesn't have a channel dimension.\n \"\"\"\n if (not isinstance(channel_multiplier, numbers.Integral) and\n not isinstance(channel_multiplier, tf.Dimension)):\n raise ValueError((\"channel_multiplier ({}), must be of type \"\n \"(`tf.Dimension`, `numbers.Integral`).\").format(\n channel_multiplier))\n if channel_multiplier < 1:\n raise ValueError(\"channel_multiplier ({}), must be >= 1\".format(\n channel_multiplier))\n\n self._channel_multiplier = channel_multiplier\n\n if data_format not in SUPPORTED_1D_DATA_FORMATS:\n raise ValueError(\"Invalid data_format {:s}. Allowed formats \"\n \"{}\".format(data_format, SUPPORTED_1D_DATA_FORMATS))\n\n super(SeparableConv1D, self).__init__(\n output_channels=output_channels,\n kernel_shape=kernel_shape,\n stride=stride, rate=rate, padding=padding, use_bias=use_bias,\n initializers=initializers, partitioners=partitioners,\n regularizers=regularizers, data_format=data_format,\n custom_getter=custom_getter, name=name)\n\n @classmethod\n def get_possible_initializer_keys(cls, use_bias=True):\n return {\"w_dw\", \"w_pw\", \"b\"} if use_bias else {\"w_dw\", \"w_pw\"}\n\n def _construct_w(self, inputs):\n \"\"\"Connects the module into the graph, with input Tensor `inputs`.\n\n Args:\n inputs: A 4D Tensor of shape:\n [batch_size, input_height, input_width, input_channels]\n and of type `tf.float16`, `tf.bfloat16`, `tf.float32` or `tf.float64`.\n\n Returns:\n A tuple of two 4D Tensors, each with the same dtype as `inputs`:\n 1. w_dw, the depthwise weight matrix, of shape:\n [kernel_size, input_channels, channel_multiplier]\n 2. w_pw, the pointwise weight matrix, of shape:\n [1, 1, channel_multiplier * input_channels, output_channels].\n \"\"\"\n depthwise_weight_shape = ((1,) + self._kernel_shape +\n (self._input_channels, self._channel_multiplier))\n pointwise_input_size = self._channel_multiplier * self._input_channels\n pointwise_weight_shape = (1, 1, pointwise_input_size, self._output_channels)\n\n if \"w_dw\" not in self._initializers:\n fan_in_shape = depthwise_weight_shape[:2]\n self._initializers[\"w_dw\"] = create_weight_initializer(fan_in_shape,\n dtype=inputs.dtype)\n\n if \"w_pw\" not in self._initializers:\n fan_in_shape = pointwise_weight_shape[:3]\n self._initializers[\"w_pw\"] = create_weight_initializer(fan_in_shape,\n dtype=inputs.dtype)\n\n w_dw = tf.get_variable(\n \"w_dw\",\n shape=depthwise_weight_shape,\n dtype=inputs.dtype,\n initializer=self._initializers[\"w_dw\"],\n partitioner=self._partitioners.get(\"w_dw\", None),\n regularizer=self._regularizers.get(\"w_dw\", None))\n\n w_pw = tf.get_variable(\n \"w_pw\",\n shape=pointwise_weight_shape,\n dtype=inputs.dtype,\n initializer=self._initializers[\"w_pw\"],\n partitioner=self._partitioners.get(\"w_pw\", None),\n regularizer=self._regularizers.get(\"w_pw\", None))\n\n return w_dw, w_pw\n\n def _apply_conv(self, inputs, w):\n \"\"\"Apply a `separable_conv2d` operation on `inputs` using `w`.\n\n Args:\n inputs: A Tensor of shape `data_format` and of type `tf.float16`,\n `tf.bfloat16`, `tf.float32` or `tf.float64`.\n w: A tuple of weight matrices of the same type as `inputs`, the first\n being the depthwise weight matrix, and the second being the pointwise\n weight matrix.\n\n Returns:\n outputs: The result of the convolution operation on `inputs`.\n \"\"\"\n if self._data_format == DATA_FORMAT_NWC:\n h_dim = 1\n two_dim_conv_data_format = DATA_FORMAT_NHWC\n else:\n h_dim = 2\n two_dim_conv_data_format = DATA_FORMAT_NCHW\n\n inputs = tf.expand_dims(inputs, axis=h_dim)\n two_dim_conv_stride = self.stride[:h_dim] + (1,) + self.stride[h_dim:]\n\n # Height always precedes width.\n two_dim_conv_rate = (1,) + self._rate\n\n w_dw, w_pw = w\n outputs = tf.nn.separable_conv2d(inputs,\n w_dw,\n w_pw,\n strides=two_dim_conv_stride,\n rate=two_dim_conv_rate,\n padding=self._conv_op_padding,\n data_format=two_dim_conv_data_format)\n outputs = tf.squeeze(outputs, [h_dim])\n return outputs\n\n @property\n def channel_multiplier(self):\n \"\"\"Returns the channel multiplier argument.\"\"\"\n return self._channel_multiplier\n\n @property\n def w_dw(self):\n \"\"\"Returns the Variable containing the depthwise weight matrix.\"\"\"\n self._ensure_is_connected()\n return self._w[0]\n\n @property\n def w_pw(self):\n \"\"\"Returns the Variable containing the pointwise weight matrix.\"\"\"\n self._ensure_is_connected()\n return self._w[1]\n" ]
[ [ "tensorflow.reshape", "tensorflow.nn.conv3d_transpose", "tensorflow.nn.separable_conv2d", "tensorflow.bfloat16.is_compatible_with", "tensorflow.tile", "tensorflow.shape", "tensorflow.concat", "tensorflow.float32.is_compatible_with", "numpy.prod", "tensorflow.squeeze", "tensorflow.nn.conv2d_transpose", "tensorflow.pad", "tensorflow.nn.bias_add", "tensorflow.float16.is_compatible_with", "tensorflow.expand_dims", "tensorflow.nn.convolution", "tensorflow.nn.depthwise_conv2d", "tensorflow.float64.is_compatible_with", "tensorflow.zeros_initializer", "tensorflow.convert_to_tensor", "tensorflow.truncated_normal_initializer" ] ]
Lechatelia/own_mmdet
[ "eac5db1d1bee8eafe0ed46fa4bb61ca8605b502f" ]
[ "mmdet/ops/upsample.py" ]
[ "import torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom mmcv.cnn import xavier_init\r\n\r\nfrom .carafe import CARAFEPack\r\n\r\n\r\nclass PixelShufflePack(nn.Module):\r\n \"\"\" Pixel Shuffle upsample layer\r\n\r\n Args:\r\n in_channels (int): Number of input channels\r\n out_channels (int): Number of output channels\r\n scale_factor (int): Upsample ratio\r\n upsample_kernel (int): Kernel size of Conv layer to expand the channels\r\n\r\n Returns:\r\n upsampled feature map\r\n \"\"\"\r\n\r\n def __init__(self, in_channels, out_channels, scale_factor,\r\n upsample_kernel):\r\n super(PixelShufflePack, self).__init__()\r\n self.in_channels = in_channels\r\n self.out_channels = out_channels\r\n self.scale_factor = scale_factor\r\n self.upsample_kernel = upsample_kernel\r\n self.upsample_conv = nn.Conv2d(\r\n self.in_channels,\r\n self.out_channels * scale_factor * scale_factor,\r\n self.upsample_kernel,\r\n padding=(self.upsample_kernel - 1) // 2)\r\n self.init_weights()\r\n\r\n def init_weights(self):\r\n xavier_init(self.upsample_conv, distribution='uniform')\r\n\r\n def forward(self, x):\r\n x = self.upsample_conv(x)\r\n x = F.pixel_shuffle(x, self.scale_factor)\r\n return x\r\n\r\n\r\nupsample_cfg = {\r\n # layer_abbreviation: module\r\n 'nearest': nn.Upsample,\r\n 'bilinear': nn.Upsample,\r\n 'deconv': nn.ConvTranspose2d,\r\n 'pixel_shuffle': PixelShufflePack,\r\n 'carafe': CARAFEPack\r\n}\r\n\r\n\r\ndef build_upsample_layer(cfg):\r\n \"\"\" Build upsample layer\r\n\r\n Args:\r\n cfg (dict): cfg should contain:\r\n type (str): Identify upsample layer type.\r\n upsample ratio (int): Upsample ratio\r\n layer args: args needed to instantiate a upsample layer.\r\n\r\n Returns:\r\n layer (nn.Module): Created upsample layer\r\n \"\"\"\r\n assert isinstance(cfg, dict) and 'type' in cfg\r\n cfg_ = cfg.copy()\r\n\r\n layer_type = cfg_.pop('type')\r\n if layer_type not in upsample_cfg:\r\n raise KeyError('Unrecognized upsample type {}'.format(layer_type))\r\n else:\r\n upsample = upsample_cfg[layer_type]\r\n if upsample is None:\r\n raise NotImplementedError\r\n\r\n layer = upsample(**cfg_)\r\n return layer\r\n" ]
[ [ "torch.nn.functional.pixel_shuffle", "torch.nn.Conv2d" ] ]
ksrinivs64/lale
[ "e0ffc357c3711940078718717aebc5b06c9dc4ae", "e0ffc357c3711940078718717aebc5b06c9dc4ae" ]
[ "lale/lib/autogen/extra_trees_classifier.py", "lale/helpers.py" ]
[ "import sklearn\nfrom numpy import inf, nan\nfrom sklearn.ensemble import ExtraTreesClassifier as Op\n\nfrom lale.docstrings import set_docstrings\nfrom lale.operators import make_operator\n\n\nclass _ExtraTreesClassifierImpl:\n def __init__(self, **hyperparams):\n self._hyperparams = hyperparams\n self._wrapped_model = Op(**self._hyperparams)\n\n def fit(self, X, y=None):\n if y is not None:\n self._wrapped_model.fit(X, y)\n else:\n self._wrapped_model.fit(X)\n return self\n\n def predict(self, X):\n return self._wrapped_model.predict(X)\n\n def predict_proba(self, X):\n return self._wrapped_model.predict_proba(X)\n\n\n_hyperparams_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"inherited docstring for ExtraTreesClassifier An extra-trees classifier.\",\n \"allOf\": [\n {\n \"type\": \"object\",\n \"required\": [\n \"n_estimators\",\n \"criterion\",\n \"max_depth\",\n \"min_samples_split\",\n \"min_samples_leaf\",\n \"min_weight_fraction_leaf\",\n \"max_features\",\n \"max_leaf_nodes\",\n \"min_impurity_decrease\",\n \"min_impurity_split\",\n \"bootstrap\",\n \"oob_score\",\n \"n_jobs\",\n \"random_state\",\n \"verbose\",\n \"warm_start\",\n \"class_weight\",\n ],\n \"relevantToOptimizer\": [\n \"n_estimators\",\n \"criterion\",\n \"max_depth\",\n \"min_samples_split\",\n \"min_samples_leaf\",\n \"max_features\",\n \"bootstrap\",\n ],\n \"additionalProperties\": False,\n \"properties\": {\n \"n_estimators\": {\n \"type\": \"integer\",\n \"minimumForOptimizer\": 10,\n \"maximumForOptimizer\": 100,\n \"distribution\": \"uniform\",\n \"default\": 10,\n \"description\": \"The number of trees in the forest\",\n },\n \"criterion\": {\n \"enum\": [\"entropy\", \"gini\"],\n \"default\": \"gini\",\n \"description\": \"The function to measure the quality of a split\",\n },\n \"max_depth\": {\n \"anyOf\": [\n {\n \"type\": \"integer\",\n \"minimumForOptimizer\": 3,\n \"maximumForOptimizer\": 5,\n \"distribution\": \"uniform\",\n },\n {\"enum\": [None]},\n ],\n \"default\": None,\n \"description\": \"The maximum depth of the tree\",\n },\n \"min_samples_split\": {\n \"anyOf\": [\n {\"type\": \"integer\", \"forOptimizer\": False},\n {\n \"type\": \"number\",\n \"minimumForOptimizer\": 0.01,\n \"maximumForOptimizer\": 0.5,\n \"distribution\": \"uniform\",\n },\n ],\n \"default\": 2,\n \"description\": \"The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number\",\n },\n \"min_samples_leaf\": {\n \"anyOf\": [\n {\"type\": \"integer\", \"forOptimizer\": False},\n {\n \"type\": \"number\",\n \"minimumForOptimizer\": 0.01,\n \"maximumForOptimizer\": 0.5,\n \"distribution\": \"uniform\",\n },\n ],\n \"default\": 1,\n \"description\": \"The minimum number of samples required to be at a leaf node\",\n },\n \"min_weight_fraction_leaf\": {\n \"type\": \"number\",\n \"default\": 0.0,\n \"description\": \"The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node\",\n },\n \"max_features\": {\n \"anyOf\": [\n {\"type\": \"integer\", \"forOptimizer\": False},\n {\n \"type\": \"number\",\n \"minimumForOptimizer\": 0.01,\n \"maximumForOptimizer\": 1.0,\n \"distribution\": \"uniform\",\n },\n {\"type\": \"string\", \"forOptimizer\": False},\n {\"enum\": [None]},\n ],\n \"default\": \"auto\",\n \"description\": \"The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split\",\n },\n \"max_leaf_nodes\": {\n \"anyOf\": [{\"type\": \"integer\"}, {\"enum\": [None]}],\n \"default\": None,\n \"description\": \"Grow trees with ``max_leaf_nodes`` in best-first fashion\",\n },\n \"min_impurity_decrease\": {\n \"type\": \"number\",\n \"default\": 0.0,\n \"description\": \"A node will be split if this split induces a decrease of the impurity greater than or equal to this value\",\n },\n \"min_impurity_split\": {\n \"anyOf\": [{\"type\": \"number\"}, {\"enum\": [None]}],\n \"default\": None,\n \"description\": \"Threshold for early stopping in tree growth\",\n },\n \"bootstrap\": {\n \"type\": \"boolean\",\n \"default\": False,\n \"description\": \"Whether bootstrap samples are used when building trees\",\n },\n \"oob_score\": {\n \"type\": \"boolean\",\n \"default\": False,\n \"description\": \"Whether to use out-of-bag samples to estimate the generalization accuracy.\",\n },\n \"n_jobs\": {\n \"anyOf\": [{\"type\": \"integer\"}, {\"enum\": [None]}],\n \"default\": 4,\n \"description\": \"The number of jobs to run in parallel for both `fit` and `predict`\",\n },\n \"random_state\": {\n \"anyOf\": [\n {\"type\": \"integer\"},\n {\"laleType\": \"numpy.random.RandomState\"},\n {\"enum\": [None]},\n ],\n \"default\": None,\n \"description\": \"If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.\",\n },\n \"verbose\": {\n \"type\": \"integer\",\n \"default\": 0,\n \"description\": \"Controls the verbosity when fitting and predicting.\",\n },\n \"warm_start\": {\n \"type\": \"boolean\",\n \"default\": False,\n \"description\": \"When set to ``True``, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest\",\n },\n \"class_weight\": {\n \"XXX TODO XXX\": 'dict, list of dicts, \"balanced\", \"balanced_subsample\" or None, optional (default=None)',\n \"description\": \"Weights associated with classes in the form ``{class_label: weight}``\",\n \"enum\": [\"balanced\"],\n \"default\": \"balanced\",\n },\n },\n },\n {\n \"XXX TODO XXX\": \"Parameter: min_samples_leaf > only be considered if it leaves at least min_samples_leaf training samples in each of the left and right branches\"\n },\n ],\n}\n_input_fit_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"Build a forest of trees from the training set (X, y).\",\n \"type\": \"object\",\n \"required\": [\"X\", \"y\"],\n \"properties\": {\n \"X\": {\n \"anyOf\": [\n {\n \"type\": \"array\",\n \"items\": {\"laleType\": \"Any\", \"XXX TODO XXX\": \"item type\"},\n \"XXX TODO XXX\": \"array-like or sparse matrix of shape = [n_samples, n_features]\",\n },\n {\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n },\n ],\n \"description\": \"The training input samples\",\n },\n \"y\": {\n \"anyOf\": [\n {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n {\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n },\n ],\n \"description\": \"The target values (class labels in classification, real numbers in regression).\",\n },\n \"sample_weight\": {\n \"anyOf\": [{\"type\": \"array\", \"items\": {\"type\": \"number\"}}, {\"enum\": [None]}],\n \"description\": \"Sample weights\",\n },\n },\n}\n_input_predict_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"Predict class for X.\",\n \"type\": \"object\",\n \"required\": [\"X\"],\n \"properties\": {\n \"X\": {\n \"anyOf\": [\n {\n \"type\": \"array\",\n \"items\": {\"laleType\": \"Any\", \"XXX TODO XXX\": \"item type\"},\n \"XXX TODO XXX\": \"array-like or sparse matrix of shape = [n_samples, n_features]\",\n },\n {\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n },\n ],\n \"description\": \"The input samples\",\n }\n },\n}\n_output_predict_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"The predicted classes.\",\n \"anyOf\": [\n {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n {\"type\": \"array\", \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}}},\n ],\n}\n_input_predict_proba_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"Predict class probabilities for X.\",\n \"type\": \"object\",\n \"required\": [\"X\"],\n \"properties\": {\n \"X\": {\n \"anyOf\": [\n {\n \"type\": \"array\",\n \"items\": {\"laleType\": \"Any\", \"XXX TODO XXX\": \"item type\"},\n \"XXX TODO XXX\": \"array-like or sparse matrix of shape = [n_samples, n_features]\",\n },\n {\n \"type\": \"array\",\n \"items\": {\"type\": \"array\", \"items\": {\"type\": \"number\"}},\n },\n ],\n \"description\": \"The input samples\",\n }\n },\n}\n_output_predict_proba_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"such arrays if n_outputs > 1\",\n \"laleType\": \"Any\",\n \"XXX TODO XXX\": \"array of shape = [n_samples, n_classes], or a list of n_outputs\",\n}\n_combined_schemas = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"description\": \"Combined schema for expected data and hyperparameters.\",\n \"documentation_url\": \"https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.ExtraTreesClassifier#sklearn-ensemble-extratreesclassifier\",\n \"import_from\": \"sklearn.ensemble\",\n \"type\": \"object\",\n \"tags\": {\"pre\": [], \"op\": [\"estimator\", \"classifier\"], \"post\": []},\n \"properties\": {\n \"hyperparams\": _hyperparams_schema,\n \"input_fit\": _input_fit_schema,\n \"input_predict\": _input_predict_schema,\n \"output_predict\": _output_predict_schema,\n \"input_predict_proba\": _input_predict_proba_schema,\n \"output_predict_proba\": _output_predict_proba_schema,\n },\n}\nExtraTreesClassifier = make_operator(_ExtraTreesClassifierImpl, _combined_schemas)\n\nif sklearn.__version__ >= \"0.22\":\n # old: https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html\n # new: https://scikit-learn.org/0.23/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html\n from lale.schemas import AnyOf, Float, Int, Null\n\n ExtraTreesClassifier = ExtraTreesClassifier.customize_schema(\n n_estimators=Int(\n desc=\"The number of trees in the forest.\",\n default=100,\n forOptimizer=True,\n minimumForOptimizer=10,\n maximumForOptimizer=100,\n ),\n ccp_alpha=Float(\n desc=\"Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed.\",\n default=0.0,\n forOptimizer=False,\n minimum=0.0,\n maximumForOptimizer=0.1,\n ),\n max_samples=AnyOf(\n types=[\n Null(desc=\"Draw X.shape[0] samples.\"),\n Int(desc=\"Draw max_samples samples.\", minimum=1),\n Float(\n desc=\"Draw max_samples * X.shape[0] samples.\",\n minimum=0.0,\n exclusiveMinimum=True,\n maximum=1.0,\n exclusiveMaximum=True,\n ),\n ],\n desc=\"If bootstrap is True, the number of samples to draw from X to train each base estimator.\",\n default=None,\n ),\n set_as_available=True,\n )\n\nif sklearn.__version__ >= \"1.0\":\n # old: https://scikit-learn.org/0.24/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html\n # new: https://scikit-learn.org/1.0/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html\n ExtraTreesClassifier = ExtraTreesClassifier.customize_schema(\n min_impurity_split=None, set_as_available=True\n )\n\nset_docstrings(ExtraTreesClassifier)\n", "# Copyright 2019 IBM Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport ast\nimport copy\nimport importlib\nimport logging\nimport os\nimport re\nimport sys\nimport time\nimport traceback\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n Iterable,\n List,\n Mapping,\n Optional,\n Set,\n Tuple,\n TypeVar,\n Union,\n)\n\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse\nimport sklearn.pipeline\nfrom sklearn.metrics import accuracy_score, check_scoring, log_loss\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.utils.metaestimators import _safe_split\n\nimport lale.datasets.data_schemas\n\ntry:\n import torch\n\n torch_installed = True\nexcept ImportError:\n torch_installed = False\n\nfrom importlib import util\n\nspark_loader = util.find_spec(\"pyspark\")\nspark_installed = spark_loader is not None\nif spark_installed:\n from pyspark.sql.dataframe import DataFrame as spark_df\n\nlogger = logging.getLogger(__name__)\n\nLALE_NESTED_SPACE_KEY = \"__lale_nested_space\"\n\n\ndef make_nested_hyperopt_space(sub_space):\n return {LALE_NESTED_SPACE_KEY: sub_space}\n\n\ndef assignee_name(level=1) -> Optional[str]:\n tb = traceback.extract_stack()\n file_name, line_number, function_name, text = tb[-(level + 2)]\n try:\n tree = ast.parse(text, file_name)\n except SyntaxError:\n return None\n assert tree is not None and isinstance(tree, ast.Module)\n if len(tree.body) == 1:\n stmt = tree.body[0]\n if isinstance(stmt, ast.Assign):\n lhs = stmt.targets\n if len(lhs) == 1:\n res = lhs[0]\n if isinstance(res, ast.Name):\n return res.id\n return None\n\n\ndef arg_name(pos=0, level=1) -> Optional[str]:\n tb = traceback.extract_stack()\n file_name, line_number, function_name, text = tb[-(level + 2)]\n try:\n tree = ast.parse(text, file_name)\n except SyntaxError:\n return None\n assert tree is not None and isinstance(tree, ast.Module)\n if len(tree.body) == 1:\n stmt = tree.body[0]\n if isinstance(stmt, ast.Expr):\n expr = stmt.value\n if isinstance(expr, ast.Call):\n args = expr.args\n if pos < len(args):\n res = args[pos]\n if isinstance(res, ast.Name):\n return res.id\n return None\n\n\ndef data_to_json(data, subsample_array: bool = True) -> Union[list, dict, int, float]:\n if type(data) is tuple:\n # convert to list\n return [data_to_json(elem, subsample_array) for elem in data]\n if type(data) is list:\n return [data_to_json(elem, subsample_array) for elem in data]\n elif type(data) is dict:\n return {key: data_to_json(data[key], subsample_array) for key in data}\n elif isinstance(data, np.ndarray):\n return ndarray_to_json(data, subsample_array)\n elif type(data) is scipy.sparse.csr_matrix:\n return ndarray_to_json(data.toarray(), subsample_array)\n elif isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):\n np_array = data.values\n return ndarray_to_json(np_array, subsample_array)\n elif torch_installed and isinstance(data, torch.Tensor):\n np_array = data.detach().numpy()\n return ndarray_to_json(np_array, subsample_array)\n elif isinstance(data, (np.int64, np.int32, np.int16)): # type: ignore\n return int(data)\n elif isinstance(data, (np.float32, np.float64)): # type: ignore\n return float(data)\n else:\n return data\n\n\ndef is_empty_dict(val) -> bool:\n return isinstance(val, dict) and len(val) == 0\n\n\ndef dict_without(orig_dict: Dict[str, Any], key: str) -> Dict[str, Any]:\n return {k: orig_dict[k] for k in orig_dict if k != key}\n\n\ndef json_lookup(ptr, jsn, default=None):\n steps = ptr.split(\"/\")\n sub_jsn = jsn\n for s in steps:\n if s not in sub_jsn:\n return default\n sub_jsn = sub_jsn[s]\n return sub_jsn\n\n\ndef ndarray_to_json(arr: np.ndarray, subsample_array: bool = True) -> Union[list, dict]:\n # sample 10 rows and no limit on columns\n num_subsamples: List[int]\n if subsample_array:\n num_subsamples = [10, np.iinfo(int).max, np.iinfo(int).max]\n else:\n num_subsamples = [\n np.iinfo(int).max,\n np.iinfo(int).max,\n np.iinfo(int).max,\n ]\n\n def subarray_to_json(indices: Tuple[int, ...]) -> Any:\n if len(indices) == len(arr.shape):\n if (\n isinstance(arr[indices], bool)\n or isinstance(arr[indices], int)\n or isinstance(arr[indices], float)\n or isinstance(arr[indices], str)\n ):\n return arr[indices]\n elif np.issubdtype(arr.dtype, np.bool_):\n return bool(arr[indices])\n elif np.issubdtype(arr.dtype, np.integer):\n return int(arr[indices])\n elif np.issubdtype(arr.dtype, np.number):\n return float(arr[indices])\n elif arr.dtype.kind in [\"U\", \"S\", \"O\"]:\n return str(arr[indices])\n else:\n raise ValueError(\n f\"Unexpected dtype {arr.dtype}, \"\n f\"kind {arr.dtype.kind}, \"\n f\"type {type(arr[indices])}.\"\n )\n else:\n assert len(indices) < len(arr.shape)\n return [\n subarray_to_json(indices + (i,))\n for i in range(\n min(num_subsamples[len(indices)], arr.shape[len(indices)])\n )\n ]\n\n return subarray_to_json(())\n\n\ndef split_with_schemas(estimator, all_X, all_y, indices, train_indices=None):\n subset_X, subset_y = _safe_split(estimator, all_X, all_y, indices, train_indices)\n if hasattr(all_X, \"json_schema\"):\n n_rows = subset_X.shape[0]\n schema = {\n \"type\": \"array\",\n \"minItems\": n_rows,\n \"maxItems\": n_rows,\n \"items\": all_X.json_schema[\"items\"],\n }\n lale.datasets.data_schemas.add_schema(subset_X, schema)\n if hasattr(all_y, \"json_schema\"):\n n_rows = subset_y.shape[0]\n schema = {\n \"type\": \"array\",\n \"minItems\": n_rows,\n \"maxItems\": n_rows,\n \"items\": all_y.json_schema[\"items\"],\n }\n lale.datasets.data_schemas.add_schema(subset_y, schema)\n return subset_X, subset_y\n\n\ndef fold_schema(X, y, cv=1, is_classifier=True):\n def fold_schema_aux(data, n_rows):\n orig_schema = lale.datasets.data_schemas.to_schema(data)\n aux_result = {**orig_schema, \"minItems\": n_rows, \"maxItems\": n_rows}\n return aux_result\n\n n_splits = cv if isinstance(cv, int) else cv.get_n_splits()\n try:\n n_samples = X.shape[0] if hasattr(X, \"shape\") else len(X)\n except TypeError: # raised for Spark dataframes.\n n_samples = X.count() if hasattr(X, \"count\") else 0\n\n if n_splits == 1:\n n_rows_fold = n_samples\n elif is_classifier:\n n_classes = len(set(y))\n n_rows_unstratified = (n_samples // n_splits) * (n_splits - 1)\n # in stratified case, fold sizes can differ by up to n_classes\n n_rows_fold = max(1, n_rows_unstratified - n_classes)\n else:\n n_rows_fold = (n_samples // n_splits) * (n_splits - 1)\n schema_X = fold_schema_aux(X, n_rows_fold)\n schema_y = fold_schema_aux(y, n_rows_fold)\n result = {\"properties\": {\"X\": schema_X, \"y\": schema_y}}\n return result\n\n\ndef cross_val_score_track_trials(\n estimator,\n X,\n y=None,\n scoring=accuracy_score,\n cv=5,\n args_to_scorer=None,\n args_to_cv=None,\n **fit_params,\n):\n \"\"\"\n Use the given estimator to perform fit and predict for splits defined by 'cv' and compute the given score on\n each of the splits.\n\n Parameters\n ----------\n\n estimator: A valid sklearn_wrapper estimator\n X, y: Valid data and target values that work with the estimator\n scoring: string or a scorer object created using\n https://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html#sklearn.metrics.make_scorer.\n A string from sklearn.metrics.SCORERS.keys() can be used or a scorer created from one of\n sklearn.metrics (https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics).\n A completely custom scorer object can be created from a python function following the example at\n https://scikit-learn.org/stable/modules/model_evaluation.html\n The metric has to return a scalar value,\n cv: an integer or an object that has a split function as a generator yielding (train, test) splits as arrays of indices.\n Integer value is used as number of folds in sklearn.model_selection.StratifiedKFold, default is 5.\n Note that any of the iterators from https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators can be used here.\n args_to_scorer: A dictionary of additional keyword arguments to pass to the scorer.\n Used for cases where the scorer has a signature such as ``scorer(estimator, X, y, **kwargs)``.\n args_to_cv: A dictionary of additional keyword arguments to pass to the split method of cv.\n This is only applicable when cv is not an integer.\n Returns\n -------\n cv_results: a list of scores corresponding to each cross validation fold\n \"\"\"\n if isinstance(cv, int):\n cv = StratifiedKFold(cv)\n\n if args_to_scorer is None:\n args_to_scorer = {}\n if args_to_cv is None:\n args_to_cv = {}\n scorer = check_scoring(estimator, scoring=scoring)\n cv_results: List[float] = []\n log_loss_results = []\n time_results = []\n for train, test in cv.split(X, y, **args_to_cv):\n X_train, y_train = split_with_schemas(estimator, X, y, train)\n X_test, y_test = split_with_schemas(estimator, X, y, test, train)\n start = time.time()\n # Not calling sklearn.base.clone() here, because:\n # (1) For Lale pipelines, clone() calls the pipeline constructor\n # with edges=None, so the resulting topology is incorrect.\n # (2) For Lale individual operators, the fit() method already\n # clones the impl object, so cloning again is redundant.\n trained = estimator.fit(X_train, y_train, **fit_params)\n score_value = scorer(trained, X_test, y_test, **args_to_scorer)\n execution_time = time.time() - start\n # not all estimators have predict probability\n try:\n y_pred_proba = trained.predict_proba(X_test)\n logloss = log_loss(y_true=y_test, y_pred=y_pred_proba)\n log_loss_results.append(logloss)\n except BaseException:\n logger.debug(\"Warning, log loss cannot be computed\")\n cv_results.append(score_value)\n time_results.append(execution_time)\n result = (\n np.array(cv_results).mean(),\n np.array(log_loss_results).mean(),\n np.array(time_results).mean(),\n )\n return result\n\n\ndef cross_val_score(estimator, X, y=None, scoring=accuracy_score, cv=5):\n \"\"\"\n Use the given estimator to perform fit and predict for splits defined by 'cv' and compute the given score on\n each of the splits.\n\n Parameters\n ----------\n\n estimator: A valid sklearn_wrapper estimator\n X, y: Valid data and target values that work with the estimator\n scoring: a scorer object from sklearn.metrics (https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics)\n Default value is accuracy_score.\n cv: an integer or an object that has a split function as a generator yielding (train, test) splits as arrays of indices.\n Integer value is used as number of folds in sklearn.model_selection.StratifiedKFold, default is 5.\n Note that any of the iterators from https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation-iterators can be used here.\n\n Returns\n -------\n cv_results: a list of scores corresponding to each cross validation fold\n \"\"\"\n if isinstance(cv, int):\n cv = StratifiedKFold(cv)\n\n cv_results = []\n for train, test in cv.split(X, y):\n X_train, y_train = split_with_schemas(estimator, X, y, train)\n X_test, y_test = split_with_schemas(estimator, X, y, test, train)\n trained_estimator = estimator.fit(X_train, y_train)\n predicted_values = trained_estimator.predict(X_test)\n cv_results.append(scoring(y_test, predicted_values))\n\n return cv_results\n\n\ndef create_individual_op_using_reflection(class_name, operator_name, param_dict):\n instance = None\n if class_name is not None:\n class_name_parts = class_name.split(\".\")\n assert (\n len(class_name_parts)\n ) > 1, (\n \"The class name needs to be fully qualified, i.e. module name + class name\"\n )\n module_name = \".\".join(class_name_parts[0:-1])\n class_name = class_name_parts[-1]\n\n module = importlib.import_module(module_name)\n class_ = getattr(module, class_name)\n\n if param_dict is None:\n instance = class_()\n else:\n instance = class_(**param_dict)\n return instance\n\n\nif TYPE_CHECKING:\n import lale.operators\n\n\ndef to_graphviz(\n lale_operator: \"lale.operators.Operator\",\n ipython_display: bool = True,\n call_depth: int = 1,\n **dot_graph_attr,\n):\n import lale.json_operator\n import lale.operators\n import lale.visualize\n\n if not isinstance(lale_operator, lale.operators.Operator):\n raise TypeError(\"The input to to_graphviz needs to be a valid LALE operator.\")\n jsn = lale.json_operator.to_json(lale_operator, call_depth=call_depth + 1)\n dot = lale.visualize.json_to_graphviz(jsn, ipython_display, dot_graph_attr)\n return dot\n\n\ndef println_pos(message, out_file=sys.stdout):\n tb = traceback.extract_stack()[-2]\n match = re.search(r\"<ipython-input-([0-9]+)-\", tb[0])\n if match:\n pos = \"notebook cell [{}] line {}\".format(match[1], tb[1])\n else:\n pos = \"{}:{}\".format(tb[0], tb[1])\n strtime = time.strftime(\"%Y-%m-%d_%H-%M-%S\")\n to_log = \"{}: {} {}\".format(pos, strtime, message)\n print(to_log, file=out_file)\n if match:\n os.system(\"echo {}\".format(to_log))\n\n\ndef instantiate_from_hyperopt_search_space(obj_hyperparams, new_hyperparams):\n if isinstance(new_hyperparams, dict) and LALE_NESTED_SPACE_KEY in new_hyperparams:\n sub_params = new_hyperparams[LALE_NESTED_SPACE_KEY]\n\n sub_op = obj_hyperparams\n if isinstance(sub_op, list):\n if len(sub_op) == 1:\n sub_op = sub_op[0]\n else:\n step_index, step_params = list(sub_params)[0]\n if step_index < len(sub_op):\n sub_op = sub_op[step_index]\n sub_params = step_params\n\n return create_instance_from_hyperopt_search_space(sub_op, sub_params)\n\n elif isinstance(new_hyperparams, (list, tuple)):\n assert isinstance(obj_hyperparams, (list, tuple))\n params_len = len(new_hyperparams)\n assert params_len == len(obj_hyperparams)\n res: Optional[List[Any]] = None\n\n for i in range(params_len):\n nhi = new_hyperparams[i]\n ohi = obj_hyperparams[i]\n updated_params = instantiate_from_hyperopt_search_space(ohi, nhi)\n if updated_params is not None:\n if res is None:\n res = list(new_hyperparams)\n res[i] = updated_params\n if res is not None:\n if isinstance(obj_hyperparams, tuple):\n return tuple(res)\n else:\n return res\n # workaround for what seems to be a hyperopt bug\n # where hyperopt returns a tuple even though the\n # hyperopt search space specifies a list\n is_obj_tuple = isinstance(obj_hyperparams, tuple)\n is_new_tuple = isinstance(new_hyperparams, tuple)\n if is_obj_tuple != is_new_tuple:\n if is_obj_tuple:\n return tuple(new_hyperparams)\n else:\n return list(new_hyperparams)\n return None\n\n elif isinstance(new_hyperparams, dict):\n assert isinstance(obj_hyperparams, dict)\n\n for k, sub_params in new_hyperparams.items():\n if k in obj_hyperparams:\n sub_op = obj_hyperparams[k]\n updated_params = instantiate_from_hyperopt_search_space(\n sub_op, sub_params\n )\n if updated_params is not None:\n new_hyperparams[k] = updated_params\n return None\n else:\n return None\n\n\ndef create_instance_from_hyperopt_search_space(\n lale_object, hyperparams\n) -> \"lale.operators.Operator\":\n \"\"\"\n Hyperparams is a n-tuple of dictionaries of hyper-parameters, each\n dictionary corresponds to an operator in the pipeline\n \"\"\"\n # lale_object can either be an individual operator, a pipeline or an operatorchoice\n # Validate that the number of elements in the n-tuple is the same\n # as the number of steps in the current pipeline\n\n from lale.operators import (\n BasePipeline,\n OperatorChoice,\n PlannedIndividualOp,\n TrainableOperator,\n TrainablePipeline,\n )\n\n if isinstance(lale_object, PlannedIndividualOp):\n new_hyperparams: Dict[str, Any] = dict_without(hyperparams, \"name\")\n hps = lale_object.hyperparams()\n if hps:\n obj_hyperparams = dict(hps)\n else:\n obj_hyperparams = {}\n\n for k, sub_params in new_hyperparams.items():\n if k in obj_hyperparams:\n sub_op = obj_hyperparams[k]\n updated_params = instantiate_from_hyperopt_search_space(\n sub_op, sub_params\n )\n if updated_params is not None:\n new_hyperparams[k] = updated_params\n\n all_hyperparams = {**obj_hyperparams, **new_hyperparams}\n return lale_object(**all_hyperparams)\n elif isinstance(lale_object, BasePipeline):\n steps = lale_object.steps_list()\n if len(hyperparams) != len(steps):\n raise ValueError(\n \"The number of steps in the hyper-parameter space does not match the number of steps in the pipeline.\"\n )\n op_instances = []\n edges = lale_object.edges()\n # op_map:Dict[PlannedOpType, TrainableOperator] = {}\n op_map = {}\n for op_index, sub_params in enumerate(hyperparams):\n sub_op = steps[op_index]\n op_instance = create_instance_from_hyperopt_search_space(sub_op, sub_params)\n assert isinstance(op_instance, TrainableOperator)\n assert (\n isinstance(sub_op, OperatorChoice)\n or sub_op.class_name() == op_instance.class_name()\n ), f\"sub_op {sub_op.class_name()}, op_instance {op_instance.class_name()}\"\n op_instances.append(op_instance)\n op_map[sub_op] = op_instance\n\n # trainable_edges:List[Tuple[TrainableOperator, TrainableOperator]]\n try:\n trainable_edges = [(op_map[x], op_map[y]) for (x, y) in edges]\n except KeyError as e:\n raise ValueError(\n \"An edge was found with an endpoint that is not a step (\" + str(e) + \")\"\n )\n\n return TrainablePipeline(op_instances, trainable_edges, ordered=True) # type: ignore\n elif isinstance(lale_object, OperatorChoice):\n # Hyperopt search space for an OperatorChoice is generated as a dictionary with a single element\n # corresponding to the choice made, the only key is the index of the step and the value is\n # the params corresponding to that step.\n step_index: int\n choices = lale_object.steps_list()\n\n if len(choices) == 1:\n step_index = 0\n else:\n step_index_str, hyperparams = list(hyperparams.items())[0]\n step_index = int(step_index_str)\n step_object = choices[step_index]\n return create_instance_from_hyperopt_search_space(step_object, hyperparams)\n else:\n assert False, f\"Unknown operator type: {type(lale_object)}\"\n\n\ndef import_from_sklearn_pipeline(sklearn_pipeline, fitted=True, is_hyperparam=False):\n # For all pipeline steps, identify equivalent lale wrappers if present,\n # if not, call make operator on sklearn classes and create a lale pipeline.\n # For higher order operators, we allow hyperparameters to be trainable even with\n # fitted is True. This is achieved using the is_hyperparam flag.\n\n def find_lale_wrapper(sklearn_obj):\n module_names = [\n \"lale.lib.sklearn\",\n \"lale.lib.autoai_libs\",\n \"lale.lib.xgboost\",\n \"lale.lib.lightgbm\",\n \"lale.lib.snapml\",\n ]\n\n try:\n import autoai_ts_libs # type: ignore # noqa\n\n module_names.append(\"lale.lib.autoai_ts_libs\")\n except ImportError:\n pass\n\n lale_wrapper_found = False\n class_name = sklearn_obj.__class__.__name__\n for module_name in module_names:\n try:\n module = importlib.import_module(module_name)\n except ModuleNotFoundError:\n continue\n try:\n class_ = getattr(module, class_name)\n lale_wrapper_found = True\n break\n except AttributeError:\n continue\n else:\n return lale_wrapper_found, sklearn_obj\n return lale_wrapper_found, class_\n\n import lale.operators\n import lale.type_checking\n\n sklearn_obj = sklearn_pipeline\n\n if isinstance(sklearn_obj, lale.operators.TrainableIndividualOp) and fitted:\n if hasattr(sklearn_obj, \"_trained\"):\n return sklearn_obj._trained\n elif is_hyperparam or not hasattr(\n sklearn_obj._impl_instance(), \"fit\"\n ): # Operators such as NoOp do not have a fit, so return them as is.\n return sklearn_obj\n else:\n raise ValueError(\n f\"\"\"The input pipeline has an operator {sklearn_obj} that is not trained and fitted is set to True,\n please pass fitted=False if you want a trainable pipeline as output.\"\"\"\n )\n elif isinstance(sklearn_obj, lale.operators.Operator):\n return sklearn_obj\n\n if isinstance(sklearn_pipeline, sklearn.pipeline.Pipeline):\n nested_pipeline_steps = sklearn_pipeline.named_steps\n nested_pipeline_lale_named_steps = [\n (\n nested_pipeline_step[0],\n import_from_sklearn_pipeline(\n nested_pipeline_step[1], fitted=fitted, is_hyperparam=is_hyperparam\n ),\n )\n for nested_pipeline_step in nested_pipeline_steps.items()\n ]\n if type(sklearn_pipeline) == sklearn.pipeline.Pipeline:\n nested_pipeline_lale_objects = [\n nested_pipeline_lale_named_step[1]\n for nested_pipeline_lale_named_step in nested_pipeline_lale_named_steps\n ]\n lale_op_obj = lale.operators.make_pipeline(*nested_pipeline_lale_objects)\n else:\n lale_wrapper_found, wrapper_class = find_lale_wrapper(sklearn_pipeline)\n if lale_wrapper_found:\n # This is a custom subclass of sklearn pipeline, so use the wrapper class\n # instead of creating a lale pipeline\n # We assume it has a hyperparameter `steps`.\n if (\n not fitted\n ): # If fitted is False, we do not want to return a Trained operator.\n lale_op = wrapper_class\n else:\n lale_op = lale.operators.TrainedIndividualOp(\n wrapper_class._name,\n wrapper_class._impl,\n wrapper_class._schemas,\n None,\n _lale_trained=True,\n )\n lale_op_obj = lale_op(steps=nested_pipeline_lale_named_steps)\n else: # no conversion to lale if a wrapper is not found for a subclass of pipeline\n return sklearn_pipeline\n elif isinstance(sklearn_pipeline, sklearn.pipeline.FeatureUnion):\n transformer_list = sklearn_pipeline.transformer_list\n concat_predecessors = [\n import_from_sklearn_pipeline(\n transformer[1], fitted=fitted, is_hyperparam=is_hyperparam\n )\n for transformer in transformer_list\n ]\n lale_op_obj = lale.operators.make_union(*concat_predecessors)\n else:\n # Validate that the sklearn_obj is a valid sklearn-compatible object\n if sklearn_obj is None or not hasattr(sklearn_obj, \"get_params\"):\n raise ValueError(\n f\"The input pipeline has a step {sklearn_obj} that is not scikit-learn compatible.\"\n )\n orig_hyperparams = sklearn_obj.get_params(deep=False)\n higher_order = False\n for hp_name, hp_val in orig_hyperparams.items():\n higher_order = higher_order or hasattr(hp_val, \"get_params\")\n if higher_order:\n hyperparams = {}\n for hp_name, hp_val in orig_hyperparams.items():\n if hasattr(hp_val, \"get_params\"):\n nested_op = import_from_sklearn_pipeline(\n hp_val, fitted, is_hyperparam=True\n ) # allow nested_op to be trainable\n hyperparams[hp_name] = nested_op\n else:\n hyperparams[hp_name] = hp_val\n else:\n hyperparams = orig_hyperparams\n\n lale_wrapper_found, class_ = find_lale_wrapper(sklearn_obj)\n if not lale_wrapper_found:\n return class_ # Return the original object\n\n if (\n not fitted\n ): # If fitted is False, we do not want to return a Trained operator.\n lale_op = class_\n else:\n lale_op = lale.operators.TrainedIndividualOp(\n class_._name, class_._impl, class_._schemas, None, _lale_trained=True\n )\n class_ = lale_op(**hyperparams)\n lale_op_obj = class_\n if lale_wrapper_found and hasattr(class_._impl_instance(), \"_wrapped_model\"):\n wrapped_model = copy.deepcopy(sklearn_obj)\n class_._impl_instance()._wrapped_model = wrapped_model\n else: # If there is no lale wrapper, there is no _wrapped_model\n class_._impl = copy.deepcopy(sklearn_obj)\n class_._impl_class_ = class_._impl.__class__\n lale_op_obj = class_\n return lale_op_obj\n\n\nclass val_wrapper:\n \"\"\"This is used to wrap values that cause problems for hyper-optimizer backends\n lale will unwrap these when given them as the value of a hyper-parameter\"\"\"\n\n def __init__(self, base):\n self._base = base\n\n def unwrap_self(self):\n return self._base\n\n @classmethod\n def unwrap(cls, obj):\n if isinstance(obj, cls):\n return cls.unwrap(obj.unwrap_self())\n else:\n return obj\n\n\ndef append_batch(data, batch_data):\n if data is None:\n return batch_data\n elif isinstance(data, np.ndarray):\n if isinstance(batch_data, np.ndarray):\n if len(data.shape) == 1 and len(batch_data.shape) == 1:\n return np.concatenate([data, batch_data])\n else:\n return np.vstack((data, batch_data))\n elif isinstance(data, tuple):\n X, y = data\n if isinstance(batch_data, tuple):\n batch_X, batch_y = batch_data\n X = append_batch(X, batch_X)\n y = append_batch(y, batch_y)\n return X, y\n elif torch_installed and isinstance(data, torch.Tensor):\n if isinstance(batch_data, torch.Tensor):\n return torch.cat((data, batch_data))\n try:\n import h5py\n\n if isinstance(data, h5py.File):\n if isinstance(batch_data, tuple):\n batch_X, batch_y = batch_data\n except ModuleNotFoundError:\n pass\n\n # TODO:Handle dataframes\n\n\ndef create_data_loader(X, y=None, batch_size=1, num_workers=0, shuffle=True):\n \"\"\"A function that takes a dataset as input and outputs a Pytorch dataloader.\n\n Parameters\n ----------\n X : Input data.\n The formats supported are Pandas DataFrame, Numpy array,\n a sparse matrix, torch.tensor, torch.utils.data.Dataset, path to a HDF5 file,\n lale.util.batch_data_dictionary_dataset.BatchDataDict,\n a Python dictionary of the format `{\"dataset\": torch.utils.data.Dataset,\n \"collate_fn\":collate_fn for torch.utils.data.DataLoader}`\n y : Labels., optional\n Supported formats are Numpy array or Pandas series, by default None\n batch_size : int, optional\n Number of samples in each batch, by default 1\n num_workers : int, optional\n Number of workers used by the data loader, by default 0\n shuffle: boolean, optional, default True\n Whether to use SequentialSampler or RandomSampler for creating batches\n\n Returns\n -------\n torch.utils.data.DataLoader\n\n Raises\n ------\n TypeError\n Raises a TypeError if the input format is not supported.\n \"\"\"\n import torch\n from torch.utils.data import DataLoader, Dataset, TensorDataset\n\n from lale.util.batch_data_dictionary_dataset import BatchDataDict\n from lale.util.hdf5_to_torch_dataset import HDF5TorchDataset\n from lale.util.numpy_torch_dataset import NumpyTorchDataset, numpy_collate_fn\n from lale.util.pandas_torch_dataset import PandasTorchDataset, pandas_collate_fn\n\n collate_fn = None\n worker_init_fn = None\n\n if isinstance(X, Dataset):\n dataset = X\n elif isinstance(X, pd.DataFrame):\n dataset = PandasTorchDataset(X, y)\n collate_fn = pandas_collate_fn\n elif isinstance(X, scipy.sparse.csr.csr_matrix):\n # unfortunately, NumpyTorchDataset won't accept a subclass of np.ndarray\n X = X.toarray()\n if isinstance(y, lale.datasets.data_schemas.NDArrayWithSchema):\n y = y.view(np.ndarray)\n dataset = NumpyTorchDataset(X, y)\n collate_fn = numpy_collate_fn\n elif isinstance(X, np.ndarray):\n # unfortunately, NumpyTorchDataset won't accept a subclass of np.ndarray\n if isinstance(X, lale.datasets.data_schemas.NDArrayWithSchema):\n X = X.view(np.ndarray)\n if isinstance(y, lale.datasets.data_schemas.NDArrayWithSchema):\n y = y.view(np.ndarray)\n dataset = NumpyTorchDataset(X, y)\n collate_fn = numpy_collate_fn\n elif isinstance(X, str): # Assume that this is path to hdf5 file\n dataset = HDF5TorchDataset(X)\n elif isinstance(X, BatchDataDict):\n dataset = X\n\n def my_collate_fn(batch):\n return batch[\n 0\n ] # because BatchDataDict's get_item returns a batch, so no collate is required.\n\n return DataLoader(\n dataset, batch_size=1, collate_fn=my_collate_fn, shuffle=shuffle\n )\n elif isinstance(X, dict): # Assumed that it is data indexed by batch number\n if \"dataset\" in X:\n dataset = X[\"dataset\"]\n collate_fn = X.get(\"collate_fn\", None)\n worker_init_fn = getattr(dataset, \"worker_init_fn\", None)\n else:\n return [X]\n elif isinstance(X, torch.Tensor) and y is not None:\n if isinstance(y, np.ndarray):\n y = torch.from_numpy(y)\n dataset = TensorDataset(X, y)\n elif isinstance(X, torch.Tensor):\n dataset = TensorDataset(X)\n else:\n raise TypeError(\n \"Can not create a data loader for a dataset with type {}\".format(type(X))\n )\n return DataLoader(\n dataset,\n batch_size=batch_size,\n collate_fn=collate_fn,\n num_workers=num_workers,\n worker_init_fn=worker_init_fn,\n shuffle=shuffle,\n )\n\n\ndef write_batch_output_to_file(\n file_obj,\n file_path,\n total_len,\n batch_idx,\n batch_X,\n batch_y,\n batch_out_X,\n batch_out_y,\n):\n if file_obj is None and file_path is None:\n raise ValueError(\"Only one of the file object or file path can be None.\")\n if file_obj is None:\n import h5py\n\n file_obj = h5py.File(file_path, \"w\")\n # estimate the size of the dataset based on the first batch output size\n transform_ratio = int(len(batch_out_X) / len(batch_X))\n if len(batch_out_X.shape) == 1:\n h5_data_shape = (transform_ratio * total_len,)\n elif len(batch_out_X.shape) == 2:\n h5_data_shape = (transform_ratio * total_len, batch_out_X.shape[1])\n elif len(batch_out_X.shape) == 3:\n h5_data_shape = (\n transform_ratio * total_len,\n batch_out_X.shape[1],\n batch_out_X.shape[2],\n )\n else:\n raise ValueError(\n \"batch_out_X is expected to be a 1-d, 2-d or 3-d array. Any other data types are not handled.\"\n )\n dataset = file_obj.create_dataset(\n name=\"X\", shape=h5_data_shape, chunks=True, compression=\"gzip\"\n )\n if batch_out_y is None and batch_y is not None:\n batch_out_y = batch_y\n if batch_out_y is not None:\n if len(batch_out_y.shape) == 1:\n h5_labels_shape = (transform_ratio * total_len,)\n elif len(batch_out_y.shape) == 2:\n h5_labels_shape = (transform_ratio * total_len, batch_out_y.shape[1])\n else:\n raise ValueError(\n \"batch_out_y is expected to be a 1-d or 2-d array. Any other data types are not handled.\"\n )\n dataset = file_obj.create_dataset(\n name=\"y\", shape=h5_labels_shape, chunks=True, compression=\"gzip\"\n )\n dataset = file_obj[\"X\"]\n dataset[\n batch_idx * len(batch_out_X) : (batch_idx + 1) * len(batch_out_X)\n ] = batch_out_X\n if batch_out_y is not None or batch_y is not None:\n labels = file_obj[\"y\"]\n if batch_out_y is not None:\n labels[\n batch_idx * len(batch_out_y) : (batch_idx + 1) * len(batch_out_y)\n ] = batch_out_y\n else:\n labels[batch_idx * len(batch_y) : (batch_idx + 1) * len(batch_y)] = batch_y\n return file_obj\n\n\ndef add_missing_values(orig_X, missing_rate=0.1, seed=None):\n # see scikit-learn.org/stable/auto_examples/impute/plot_missing_values.html\n n_samples, n_features = orig_X.shape\n n_missing_samples = int(n_samples * missing_rate)\n if seed is None:\n rng = np.random.RandomState()\n else:\n rng = np.random.RandomState(seed)\n missing_samples = np.zeros(n_samples, dtype=bool)\n missing_samples[:n_missing_samples] = True\n rng.shuffle(missing_samples)\n missing_features = rng.randint(0, n_features, n_missing_samples)\n missing_X = orig_X.copy()\n if isinstance(missing_X, np.ndarray):\n missing_X[missing_samples, missing_features] = np.nan\n else:\n assert isinstance(missing_X, pd.DataFrame)\n i_missing_sample = 0\n for i_sample in range(n_samples):\n if missing_samples[i_sample]:\n i_feature = missing_features[i_missing_sample]\n i_missing_sample += 1\n missing_X.iloc[i_sample, i_feature] = np.nan\n return missing_X\n\n\n# helpers for manipulating (extended) sklearn style paths.\n# documentation of the path format is part of the operators module docstring\n\n\ndef partition_sklearn_params(\n d: Dict[str, Any]\n) -> Tuple[Dict[str, Any], Dict[str, Dict[str, Any]]]:\n sub_parts: Dict[str, Dict[str, Any]] = {}\n main_parts: Dict[str, Any] = {}\n\n for k, v in d.items():\n ks = k.split(\"__\", 1)\n if len(ks) == 1:\n assert k not in main_parts\n main_parts[k] = v\n else:\n assert len(ks) == 2\n bucket: Dict[str, Any] = {}\n group: str = ks[0]\n param: str = ks[1]\n if group in sub_parts:\n bucket = sub_parts[group]\n else:\n sub_parts[group] = bucket\n assert param not in bucket\n bucket[param] = v\n return (main_parts, sub_parts)\n\n\ndef partition_sklearn_choice_params(d: Dict[str, Any]) -> Tuple[int, Dict[str, Any]]:\n discriminant_value: int = -1\n choice_parts: Dict[str, Any] = {}\n\n for k, v in d.items():\n if k == discriminant_name:\n assert discriminant_value == -1\n discriminant_value = int(v)\n else:\n k_rest = unnest_choice(k)\n choice_parts[k_rest] = v\n assert discriminant_value != -1\n return (discriminant_value, choice_parts)\n\n\nDUMMY_SEARCH_SPACE_GRID_PARAM_NAME: str = \"$\"\ndiscriminant_name: str = \"?\"\nchoice_prefix: str = \"?\"\nstructure_type_name: str = \"#\"\nstructure_type_list: str = \"list\"\nstructure_type_tuple: str = \"tuple\"\nstructure_type_dict: str = \"dict\"\n\n\ndef get_name_and_index(name: str) -> Tuple[str, int]:\n \"\"\"given a name of the form \"name@i\", returns (name, i)\n if given a name of the form \"name\", returns (name, 0)\n \"\"\"\n splits = name.split(\"@\", 1)\n if len(splits) == 1:\n return splits[0], 0\n else:\n return splits[0], int(splits[1])\n\n\ndef make_degen_indexed_name(name, index):\n return f\"{name}@{index}\"\n\n\ndef make_indexed_name(name, index):\n if index == 0:\n return name\n else:\n return f\"{name}@{index}\"\n\n\ndef make_array_index_name(index, is_tuple: bool = False):\n sep = \"##\" if is_tuple else \"#\"\n return f\"{sep}{str(index)}\"\n\n\ndef is_numeric_structure(structure_type: str):\n\n if structure_type == \"list\" or structure_type == \"tuple\":\n return True\n elif structure_type == \"dict\":\n return False\n else:\n assert False, f\"Unknown structure type {structure_type} found\"\n\n\nV = TypeVar(\"V\")\n\n\ndef nest_HPparam(name: str, key: str):\n if key == DUMMY_SEARCH_SPACE_GRID_PARAM_NAME:\n # we can get rid of the dummy now, since we have a name for it\n return name\n return name + \"__\" + key\n\n\ndef nest_HPparams(name: str, grid: Mapping[str, V]) -> Dict[str, V]:\n return {(nest_HPparam(name, k)): v for k, v in grid.items()}\n\n\ndef nest_all_HPparams(\n name: str, grids: Iterable[Mapping[str, V]]\n) -> List[Dict[str, V]]:\n \"\"\"Given the name of an operator in a pipeline, this transforms every key(parameter name) in the grids\n to use the operator name as a prefix (separated by __). This is the convention in scikit-learn pipelines.\n \"\"\"\n return [nest_HPparams(name, grid) for grid in grids]\n\n\ndef nest_choice_HPparam(key: str):\n return choice_prefix + key\n\n\ndef nest_choice_HPparams(grid: Mapping[str, V]) -> Dict[str, V]:\n return {(nest_choice_HPparam(k)): v for k, v in grid.items()}\n\n\ndef nest_choice_all_HPparams(grids: Iterable[Mapping[str, V]]) -> List[Dict[str, V]]:\n \"\"\"this transforms every key(parameter name) in the grids\n to be nested under a choice, using a ? as a prefix (separated by __). This is the convention in scikit-learn pipelines.\n \"\"\"\n return [nest_choice_HPparams(grid) for grid in grids]\n\n\ndef unnest_choice(k: str) -> str:\n assert k.startswith(choice_prefix)\n return k[len(choice_prefix) :]\n\n\ndef unnest_HPparams(k: str) -> List[str]:\n return k.split(\"__\")\n\n\ndef are_hyperparameters_equal(hyperparam1, hyperparam2):\n if isinstance(\n hyperparam1, np.ndarray\n ): # hyperparam2 is from schema default, so it may not always be an array\n return np.all(hyperparam1 == hyperparam2)\n else:\n return hyperparam1 == hyperparam2\n\n\ndef _is_ast_subscript(expr):\n return isinstance(expr, ast.Subscript)\n\n\ndef _is_ast_attribute(expr):\n return isinstance(expr, ast.Attribute)\n\n\ndef _is_ast_constant(expr):\n return isinstance(expr, ast.Constant)\n\n\ndef _is_ast_subs_or_attr(expr):\n return isinstance(expr, ast.Subscript) or isinstance(expr, ast.Attribute)\n\n\ndef _is_ast_call(expr):\n return isinstance(expr, ast.Call)\n\n\ndef _is_ast_name(expr):\n return isinstance(expr, ast.Name)\n\n\ndef _ast_func_id(expr):\n if isinstance(expr, ast.Name):\n return expr.id\n else:\n raise ValueError(\"function name expected\")\n\n\ndef _is_df(df):\n return _is_pandas_df(df) or _is_spark_df(df)\n\n\ndef _is_pandas_series(df):\n return isinstance(df, pd.Series)\n\n\ndef _is_pandas_df(df):\n return isinstance(df, pd.DataFrame)\n\n\ndef _is_spark_df(df):\n if spark_installed:\n return isinstance(df, spark_df)\n else:\n return False\n\n\ndef _is_spark_with_index(df):\n if spark_installed:\n return isinstance(df, lale.datasets.data_schemas.SparkDataFrameWithIndex)\n else:\n return False\n\n\ndef _ensure_pandas(df) -> pd.DataFrame:\n if _is_spark_df(df):\n return df.toPandas()\n assert _is_pandas_df(df), type(df)\n return df\n\n\ndef _get_subscript_value(subscript_expr):\n if isinstance(subscript_expr.slice, ast.Constant): # for Python 3.9\n subscript_value = subscript_expr.slice.value\n else:\n subscript_value = subscript_expr.slice.value.s # type: ignore\n return subscript_value\n\n\nclass GenSym:\n def __init__(self, names: Set[str]):\n self._names = names\n\n def __call__(self, prefix):\n if prefix in self._names:\n suffix = 0\n while True:\n result = f\"{prefix}_{suffix}\"\n if result not in self._names:\n break\n suffix += 1\n else:\n result = prefix\n self._names |= {result}\n return result\n" ]
[ [ "sklearn.ensemble.ExtraTreesClassifier" ], [ "numpy.concatenate", "numpy.array", "torch.cat", "sklearn.model_selection.StratifiedKFold", "numpy.zeros", "numpy.random.RandomState", "sklearn.metrics.check_scoring", "sklearn.utils.metaestimators._safe_split", "torch.from_numpy", "torch.utils.data.DataLoader", "sklearn.metrics.log_loss", "numpy.all", "numpy.issubdtype", "numpy.iinfo", "torch.utils.data.TensorDataset", "numpy.vstack" ] ]
jinseuk56/gms_dm_python
[ "6d47715830e7cc8e5c008e2ecf154f045942358b" ]
[ "codes/virtual_annular_detector.py" ]
[ "# Jinseok Ryu\n# Electron Microscopy and Spectroscopy Lab.\n# Seoul National University\n# last update : 20210604\n# virtual STEM imaging for 4D-STEM data\n\n\n# ********************************************************************************\nprint(\"Execute Python script in GMS 3\")\n\nimport DigitalMicrograph as DM\nfrom scipy import optimize\nimport numpy as np\nimport sys\nsys.argv.extend(['-a', ' '])\nimport matplotlib.pyplot as plt\n\nprint(\"Libraries have been imported completely\")\n# ********************************************************************************\n\n# refer to https://scipy-cookbook.readthedocs.io/items/FittingData.html\n\nif ( False == DM.IsScriptOnMainThread() ):\n\tprint( ' MatplotLib scripts require to be run on the main thread.' )\n\texit()\n\n\ndef gaussian(height, center_x, center_y, width_x, width_y):\n \"\"\"Returns a gaussian function with the given parameters\"\"\"\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)\n\ndef moments(data):\n \"\"\"Returns (height, x, y, width_x, width_y)\n the gaussian parameters of a 2D distribution by calculating its\n moments\"\"\"\n total = data.sum()\n X, Y = np.indices(data.shape) # row, col\n x = (X*data).sum()/total # row\n y = (Y*data).sum()/total # col\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum()) # row\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum()) # col\n height = data.max()\n return height, x, y, width_x, width_y\n\ndef fitgaussian(data):\n \"\"\"Returns (height, x, y, width_x, width_y)\n the gaussian parameters of a 2D distribution found by a fit\"\"\"\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p\n\n\ndef gaussian_center(image, cbox_edge=0):\n y, x = np.indices(image.shape)\n if not cbox_edge:\n center = np.array([(y.max()-y.min())/2.0, (x.max()-x.min())/2.0])\n \n else:\n cbox_outy = int(image.shape[0]/2 - cbox_edge/2)\n cbox_outx = int(image.shape[1]/2 - cbox_edge/2)\n center_box = image[cbox_outy:-cbox_outy, cbox_outx:-cbox_outx]\n fit_params = fitgaussian(center_box)\n (_, center_y, center_x, _, _) = fit_params\n center = [center_y+cbox_outy, center_x+cbox_outx]\n \n return center\n\n\ndef fourd_roll_axis(stack):\n stack = np.rollaxis(np.rollaxis(stack, 2, 0), 3, 1)\n return stack\n\n\nfd = DM.GetFrontImage()\nprint(fd)\n\norigin0, scale0, unit0 = fd.GetDimensionCalibration(0, 0)\nprint(origin0, scale0, unit0)\norigin1, scale1, unit1 = fd.GetDimensionCalibration(1, 0)\nprint(origin1, scale1, unit1)\norigin2, scale2, unit2 = fd.GetDimensionCalibration(2, 0)\nprint(origin2, scale2, unit2)\norigin3, scale3, unit3 = fd.GetDimensionCalibration(3, 0)\nprint(origin3, scale3, unit3)\n\nprint(\"loading 4D-STEM data\")\nstack_4d_cropped = fourd_roll_axis(fd.GetNumArray())\nstack_4d_cropped = np.nan_to_num(stack_4d_cropped)\nprint(stack_4d_cropped.shape)\nprint(np.max(stack_4d_cropped))\nprint(np.min(stack_4d_cropped))\nprint(np.mean(stack_4d_cropped))\n\nf_shape = stack_4d_cropped.shape\n\nprint(\"maximum-normalizing\")\nstack_4d_cropped = stack_4d_cropped / np.max(stack_4d_cropped)\nprint(np.max(stack_4d_cropped))\nprint(np.min(stack_4d_cropped))\nprint(np.mean(stack_4d_cropped))\n\npacbed = np.mean(stack_4d_cropped, axis=(0,1))\n\ncheck_center = input(\"Do you want to input the center position manually? (Y / N): \")\nif check_center == \"Y\":\n\tx_ct = float(input(\"write the x index of the center: \"))\n\ty_ct = float(input(\"write the y index of the center: \"))\n\tct = [y_ct, x_ct]\n\t\nelif check_center==\"N\":\n\t#find center position\n\tq_text = \"\"\"Select one option for finding the center position.\n\t1: Gaussian fitting - PACBED\n\t2: Center of mass - PACBED\"\"\"\n\n\tq_check = int(input(q_text))\n\n\tif q_check == 1:\n\t\tcb = int(input(\"size of the fitting box (data index): \"))\n\t\tct = gaussian_center(pacbed, cbox_edge=cb)\n\t\tprint(\"center position\")\n\t\tprint(ct)\n\n\telif q_check == 2:\n\t\tY, X = np.indices(pacbed.shape)\n\t\tcom_y = np.sum(pacbed * Y) / np.sum(pacbed)\n\t\tcom_x = np.sum(pacbed * X) / np.sum(pacbed)\n\t\tct = [com_y, com_x]\n\t\t\n\t\tprint(\"center position\")\n\t\tprint(ct)\n\t\t\n\telse:\n\t\tprint(\"*\"*50)\n\t\tprint(\"wrong input !\")\n\t\tprint(\"*\"*50)\n\t\texit()\n\nelse:\n\tprint(\"*\"*50)\n\tprint(\"wrong input !\")\n\tprint(\"*\"*50)\n\texit()\n\nfig2, ax2 = plt.subplots(1, 1, figsize=(5, 5))\nax2.imshow(pacbed, cmap=\"gray\")\nax2.scatter(ct[1], ct[0], c=\"red\")\nax2.axis(\"off\")\n\ndef max_rad(shape, center=None):\n y, x = np.indices(shape)\n if not center:\n center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])\n \n r = np.hypot(y - center[0], x - center[1])\n \n return np.max(r)\n \ndef radial_indices(shape, radial_range, scale, center=None):\n y, x = np.indices(shape)\n if not center:\n center = np.array([(y.max()-y.min())/2.0, (x.max()-x.min())/2.0])\n \n r = np.hypot(y - center[0], x - center[1]) * scale\n ri = np.ones(r.shape)\n \n if len(np.unique(radial_range)) > 1:\n ri[np.where(r <= radial_range[0])] = 0\n ri[np.where(r > radial_range[1])] = 0\n \n else:\n r = np.round(r)\n ri[np.where(r != round(radial_range[0]))] = 0\n \n return ri\n\n\nmrad_per_pixel = 1\nradii = np.arange(max_rad(f_shape[2:], center=ct)) * mrad_per_pixel\nprint(\"maximum angle = %.2f\"%(radii[-1]))\n\ncheck_det = input(\"Do you want a STEM image for a specific annular region ? (Y or N) \")\n\nif check_det == \"Y\":\n\tdet_inner_ind = int(input(\"index of the inner angle (positive integer): \"))\n\tdet_outer_ind = int(input(\"index of the outer angle (positive integer): \"))\n\n\tdet_img = DM.CreateImage(ri.copy())\n\tdet_img.SetName(\"Detector\")\n\tdet_img.SetDimensionCalibration(0, origin2, scale2, unit2, 0)\n\tdet_img.SetDimensionCalibration(1, origin3, scale3, unit3, 0)\n\tdet_img.ShowImage()\n\n\toutput_img = DM.CreateImage(img_temp.copy())\n\toutput_img.SetName(\"Annular Dark-filed STEM image\")\n\toutput_img.SetDimensionCalibration(0, origin0, scale0, unit0, 0)\n\toutput_img.SetDimensionCalibration(1, origin1, scale1, unit1, 0)\n\toutput_img.ShowImage()\n\t\n\nelif check_det == \"N\":\n\tdetector = []\n\tstem_img = []\n\tfor i in range(len(radii)):\n\t\tri = radial_indices(f_shape[2:], [radii[i]], mrad_per_pixel, center=ct)\n\t\tdetector.append(ri)\n\t\tstem_img.append(np.sum(np.multiply(stack_4d_cropped, ri), axis=(2, 3)))\n\t\t\n\tdetector = np.asarray(detector).reshape(1, -1, f_shape[2], f_shape[3])\n\tprint(detector.shape)\n\tdetector = fourd_roll_axis(detector)\n\tprint(detector.shape)\n\t\n\tstem_img = np.asarray(stem_img).reshape(1, -1, f_shape[0], f_shape[1])\n\tprint(stem_img.shape)\n\tstem_img = fourd_roll_axis(stem_img)\n\tprint(stem_img.shape)\n\t\n\tdet_img = DM.CreateImage(detector.copy())\n\tdet_img.SetName(\"Virtual Detector\")\n\tdet_img.SetDimensionCalibration(2, origin2, scale2, unit2, 0)\n\tdet_img.SetDimensionCalibration(3, origin3, scale3, unit3, 0)\n\tdet_img.ShowImage()\n\t\n\tstem = DM.CreateImage(stem_img.copy())\n\tstem.SetName(\"STEM image\")\n\tstem.SetDimensionCalibration(2, origin0, scale0, unit0, 0)\n\tstem.SetDimensionCalibration(3, origin1, scale1, unit1, 0)\n\tstem.ShowImage()\n\t\n\nelse:\n\tprint(\"*\"*50)\n\tprint(\"Wrong input !\")\n\tprint(\"*\"*50)\n\texit()\n\npacbed_dm = DM.CreateImage(pacbed.copy())\npacbed_dm.SetName(\"PACBED\")\npacbed_dm.SetDimensionCalibration(0, origin2, scale2, unit2, 0)\npacbed_dm.SetDimensionCalibration(1, origin3, scale3, unit3, 0)\npacbed_dm.ShowImage()\n\nplt.show()" ]
[ [ "numpy.rollaxis", "numpy.min", "numpy.mean", "numpy.exp", "numpy.multiply", "numpy.where", "numpy.max", "numpy.nan_to_num", "matplotlib.pyplot.subplots", "numpy.arange", "numpy.hypot", "numpy.round", "matplotlib.pyplot.show", "numpy.asarray", "numpy.sum", "numpy.ones", "scipy.optimize.leastsq", "numpy.indices", "numpy.unique" ] ]
WxBDM/nwsapy
[ "728090300c1b4cd97b0e2f50f600c6168b04e0b1" ]
[ "nwsapy/endpoints/server_ping.py" ]
[ "import pandas as pd\n\nfrom nwsapy.core.inheritance.base_endpoint import BaseEndpoint\n\nclass ServerPing(BaseEndpoint):\n \"\"\"Sends a ping to the server.\n \"\"\"\n def __init__(self):\n super(ServerPing, self).__init__()\n \n def to_dict(self):\n \"\"\"Returns the glossary in a dictionary format.\n\n :return: Dictionary containing the values of the glossary.\n :rtype: dict\n \"\"\"\n return self.values\n\n def to_df(self):\n \"\"\"Returns the values of the glossary in a pandas dataframe structure.\n\n :return: Dataframe of the values of the glossary.\n :rtype: pandas.DataFrame\n \"\"\"\n data = {'Term' : list(self.values.keys()),\n 'Definition' : list(self.values.values())}\n return pd.DataFrame.from_dict(data)" ]
[ [ "pandas.DataFrame.from_dict" ] ]
samarthbhargav/nlp1-project
[ "4cec7ffb0ae800b8fc6ba57e4177b3f38ae28d0c" ]
[ "experiment/model.py" ]
[ "import numpy as np\nimport argparse\nimport codecs\nimport math\nimport os\nimport sys\nfrom itertools import count, takewhile, zip_longest\nfrom collections import defaultdict\n\nimport utils\n\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nsys.path.insert(0, os.path.abspath(\"../\"))\n\nfrom opennmt import opts\nfrom opennmt.onmt import IO\nfrom opennmt import onmt\nfrom data import *\n\nnlp_en_notok = spacy.load(\"en\", disable=[\"tokenizer\"])\nnlp_nl_notok = spacy.load(\"nl\", disable=[\"tokenizer\"])\n\n\n\ndef report_score(name, score_total, words_total):\n print(\"%s AVG SCORE: %.4f, %s PPL: %.4f\" % (\n name, score_total / words_total,\n name, math.exp(-score_total / words_total)))\n\n\ndef get_src_words(src_indices, index2str):\n words = []\n raw_words = (index2str[i] for i in src_indices)\n words = takewhile(lambda w: w != onmt.IO.PAD_WORD, raw_words)\n return \" \".join(words)\n\n\ndef construct_args(model_path):\n args = [\"-model\", model_path, \"-src\", \"europalProcessedEN.txt\"]\n sys.argv = sys.argv[:] + args\n\n\ndef plot_attention(path, source_words, target_words, attention):\n # Set up figure with colorbar\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n\n attention = attention[0]\n\n\n attention = attention.numpy()\n cax = ax.matshow(attention[:, :len(source_words) + 1], cmap='RdPu')\n fig.colorbar(cax, ticks=[0, 1] )\n\n en_orig_tokens = \" \".join(source_words)\n en_tokens = nlp_en(spacy_fix(en_orig_tokens))\n en_token_pos = []\n for tok in en_tokens:\n en_token_pos.append(tok.pos_ + \" \" + tok.text)\n\n nl_orig_tokens = \" \".join(target_words)\n nl_tokens = nlp_nl(spacy_fix(nl_orig_tokens))\n nl_token_pos = []\n for tok in nl_tokens:\n nl_token_pos.append(tok.pos_ + \" \" + tok.text)\n\n\n ax.set_xticklabels([''] + en_token_pos + ['<EOS>'], rotation=90)\n ax.set_yticklabels([''] + nl_token_pos )\n\n # Show label at every tick\n ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n ax.yaxis.set_major_locator(ticker.MultipleLocator(1))\n\n plt.savefig(path, dpi=600, bbox_inches='tight')\n\ndef spacy_fix(sentence):\n return sentence.replace(\"<unk>\", \"UNK\").replace(\"&apos;\", \"'\")\n\n\nclass Scorer:\n def __init__(self, k=None):\n if k is None:\n k = [3]\n \n # k must be odd\n assert all(_ % 2 != 0 for _ in k)\n \n self.k = k\n \n # total number of sentences\n self.total = 0\n \n # number of perfect verb alignments\n self.perfect_verb_match_pos = 0 \n self.perfect_verb_match_dep = 0\n \n # number of top k verb matches\n self.top_k_verb_match_dep = defaultdict(int)\n self.top_k_verb_match_pos = defaultdict(int)\n\n # confusion matrix for perfect verb alignments\n # structure is POS - count\n self.conf_matrix_dep = defaultdict(int) \n self.conf_matrix_pos = defaultdict(int)\n\n def _find_verb_nl_pos(self, doc):\n \"\"\" \n Finds and returns the position of the verb in the sentence\n \"\"\"\n for index, token in reversed(list(enumerate(doc))):\n if token.pos_ == \"VERB\":\n return index\n \n def _find_verb_nl_dep(self, doc):\n for index, token in reversed(list(enumerate(doc))):\n if token.dep_ == \"ROOT\":\n return index\n\n def _find_verb_en(self, doc):\n \"\"\"\n Finds and returns the position of the first verb in the sentence\n \"\"\"\n for index, token in list(enumerate(doc)):\n if token.dep_ == \"ROOT\":\n return index\n\n def spacy_fix(self, sentence):\n return sentence.replace(\"<unk>\", \"UNK\").replace(\"&apos;\", \"'\")\n\n def accumulate_scores(self, source_sentence, target_sentence, true_sentence,\n attention, jaccard_threshold=0.3):\n attention = attention[0]\n \n # fix so that spacy doesn't tokenize <unk> into [< , UNK, >]\n source_sentence = self.spacy_fix(source_sentence)\n target_sentence = self.spacy_fix(target_sentence)\n true_sentence = self.spacy_fix(true_sentence)\n\n en_doc = nlp_en(source_sentence)\n en_doc = [tok for tok in en_doc]\n nl_doc = nlp_nl(target_sentence)\n nl_doc = [tok for tok in nl_doc]\n true_nl_doc = nlp_nl(true_sentence)\n true_nl_doc = [tok for tok in true_nl_doc]\n\n print(\"\\tSentence: {}\\n\\tTranslation:{}\\n\\tActual Translation:{}\".format([(tok.text, tok.pos_,\n tok.dep_) for tok in en_doc],[(tok.text, tok.pos_, tok.dep_) for tok in nl_doc], \n [(tok.text, tok.pos_, tok.dep_) for tok in true_nl_doc]))\n\n jaccard_index = utils.jaccard_index([tok.text for tok in nl_doc],\n [tok.text for tok in true_nl_doc])\n\n print(\"\\tJaccard Index: {}\".format(jaccard_index))\n\n if jaccard_index < jaccard_threshold:\n print(\"\\tJaccard Index not high enough. Not considering this sentence for scoring\")\n return \n\n verb_index_en = self._find_verb_en(en_doc)\n \n print(\"\\t*** POS ***\")\n # find the ending verb, using POS first\n verb_index_nl = self._find_verb_nl_pos(nl_doc)\n \n\n print(\"\\tSource Verb: {}\\n\\tTarget Verb:{}\".format(en_doc[verb_index_en],\n nl_doc[verb_index_nl]))\n\n verb_attention = attention[verb_index_nl, :].numpy()\n \n pred_max_attention = verb_attention.argmax()\n if len(en_doc) < pred_max_attention:\n print(\"\\tIncorrect Dimensions :(\")\n return\n\n print(\"\\tNL Actual Index: {}, English Index: {}, Predicted Index:{}\\n\\tAttention: {}\".format(verb_index_nl, \n verb_index_en, pred_max_attention, verb_attention))\n\n # check for perfect match\n if pred_max_attention == verb_index_en:\n print(\"\\tPerfect Match!\")\n self.perfect_verb_match_pos += 1\n else:\n print(\"\\tNot a perfect match. Instead: {}\".format(en_doc[pred_max_attention]))\n \n # update confusion matrix\n self.conf_matrix_pos[en_doc[pred_max_attention].pos_] += 1\n\n for k in self.k:\n print(\"\\tFor window: {}\".format(k))\n # check if it's a k/2 sized window\n start = max(0, pred_max_attention - (k-1)//2)\n end = min(len(en_doc), pred_max_attention + (k-1)//2 + 1)\n allowed_range = np.arange(start, end)\n print(\"\\t\\tAllowed Range: {}\".format(allowed_range))\n if verb_index_en in allowed_range:\n print(\"\\t\\tIt's in the allowed range\")\n self.top_k_verb_match_pos[k] += 1\n else:\n print(\"\\t\\tIt's not in the allowed range :(\")\n \n verb_index_nl = self._find_verb_nl_dep(nl_doc)\n\n print(\"\\t*** DEP ***\")\n # find the ending verb, using POS first\n verb_index_nl = self._find_verb_nl_dep(nl_doc)\n \n\n print(\"\\tSource Verb: {}\\n\\tTarget Verb:{}\".format(en_doc[verb_index_en],\n nl_doc[verb_index_nl]))\n\n verb_attention = attention[:, verb_index_nl].numpy()\n \n pred_max_attention = verb_attention.argmax()\n print(\"\\tEnglish Index: {}, NL Actual Index: {}, Predicted Index:{}\\n\\tAttention: {}\".format(verb_index_en, \n verb_index_nl, pred_max_attention, verb_attention))\n\n # check for perfect match\n if pred_max_attention == verb_index_en:\n print(\"\\tPerfect Match!\")\n self.perfect_verb_match_dep += 1\n else:\n print(\"\\tNot a perfect match. Instead: {}\".format(en_doc[pred_max_attention]))\n \n # update confusion matrix\n self.conf_matrix_dep[en_doc[pred_max_attention].pos_] += 1\n\n for k in self.k:\n print(\"\\tFor window: {}\".format(k))\n # check if it's a k/2 sized window\n start = max(0, pred_max_attention - (k-1)//2)\n end = min(len(en_doc), pred_max_attention + (k-1)//2 + 1)\n allowed_range = np.arange(start, end)\n print(\"\\t\\tAllowed Range: {}\".format(allowed_range))\n\n if verb_index_en in allowed_range:\n print(\"\\t\\tIt's in the allowed range\")\n self.top_k_verb_match_dep[k] += 1\n else:\n print(\"\\t\\tIt's not in the allowed range :(\")\n print(\"\\t**********************\")\n \n self.total += 1\n\n\ndef read_true_nl():\n true_nl_sentences = []\n with codecs.open(\"europalProcessedNL.txt\", \"r\", \"utf-8\") as reader:\n for line in reader:\n true_nl_sentences.append(line)\n return true_nl_sentences\n\nif __name__ == '__main__':\n ### PARAMs for Model\n model = \"../models/ted_sgd_acc_55.43_ppl_12.39_e11.pt\"\n ##\n construct_args(model)\n\n parser = argparse.ArgumentParser(\n description='translate.py',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n opts.add_md_help_argument(parser)\n opts.translate_opts(parser)\n opt = parser.parse_args()\n\n dummy_parser = argparse.ArgumentParser(description='train.py')\n opts.model_opts(dummy_parser)\n opt.cuda = opt.gpu > -1\n\n dummy_opt = dummy_parser.parse_known_args([])[0]\n\n translator = onmt.Translator(opt, dummy_opt.__dict__)\n\n data = IO.ONMTDataset(\n opt.src, opt.tgt, translator.fields,\n use_filter_pred=False)\n\n test_data = IO.OrderedIterator(\n dataset=data, device=opt.gpu,\n batch_size=opt.batch_size, train=False, sort=False,\n shuffle=False)\n pred_score_total, pred_words_total = 0, 0\n gold_score_total, gold_words_total = 0, 0\n out_file = codecs.open(opt.output, 'w', 'utf-8')\n\n counter = count(1)\n sentence = 0\n\n scorer = Scorer([3, 5, 7])\n\n true_nl_sentences = read_true_nl()\n\n for batch in test_data:\n pred_batch, gold_batch, pred_scores, gold_scores, attn, src \\\n = translator.translate(batch, data)\n pred_score_total += sum(score[0] for score in pred_scores)\n pred_words_total += sum(len(x[0]) for x in pred_batch)\n if opt.tgt:\n gold_score_total += sum(gold_scores)\n gold_words_total += sum(len(x) for x in batch.tgt[1:])\n\n # z_batch: an iterator over the predictions, their scores,\n # the gold sentence, its score, and the source sentence for each\n # sentence in the batch. It has to be zip_longest instead of\n # plain-old zip because the gold_batch has length 0 if the target\n # is not included.\n z_batch = zip_longest(\n pred_batch, gold_batch,\n pred_scores, gold_scores,\n (sent.squeeze(1) for sent in src.split(1, dim=1)))\n\n for index, (pred_sents, gold_sent, pred_score, gold_score, src_sent) in enumerate(z_batch):\n n_best_preds = [\" \".join(pred) for pred in pred_sents[:opt.n_best]]\n out_file.write('\\n'.join(n_best_preds))\n out_file.write('\\n')\n out_file.flush()\n\n sent_number = next(counter)\n words = get_src_words(\n src_sent, translator.fields[\"src\"].vocab.itos)\n\n os.write(1, bytes('\\nSENT %d: %s\\n' %\n (sent_number, words), 'UTF-8'))\n\n best_pred = n_best_preds[0]\n best_score = pred_score[0]\n os.write(1, bytes('PRED %d: %s\\n' %\n (sent_number, best_pred), 'UTF-8'))\n print(\"PRED SCORE: %.4f\" % best_score)\n\n plot_attention(\"attentions/{}.png\".format(sentence + 1),\n words.split(), best_pred.split(), attn[index])\n try:\n scorer.accumulate_scores(words, best_pred,\n true_nl_sentences[sentence], attn[index])\n except:\n ...\n print(\"\\n\\n\\n\")\n\n\n sentence += 1\n\n if sentence > 500:\n break\n\n if sentence > 500:\n break\n\n report_score('PRED', pred_score_total, pred_words_total)\n \n import json\n print(json.dumps(scorer.__dict__, indent=2))\n\n with open(\"results.json\", \"w\") as writer:\n writer.write(json.dumps(scorer.__dict__, indent=2))\n" ]
[ [ "matplotlib.pyplot.savefig", "numpy.arange", "matplotlib.ticker.MultipleLocator", "matplotlib.pyplot.figure" ] ]
shubham0704/MeshCNN
[ "0085e06ab6b06402344130af4e25f0038918bb73" ]
[ "util/edge_history.py" ]
[ "import mpl_toolkits.mplot3d as a3\nimport matplotlib.colors as colors\nimport pylab as pl\nimport numpy as np\nimport pdb\nV = np.array\nr2h = lambda x: colors.rgb2hex(tuple(map(lambda y: y / 255., x)))\nsurface_color = r2h((255, 230, 205))\nedge_color = r2h((90, 90, 90))\nedge_colors = (r2h((15, 167, 175)), r2h((230, 81, 81)), r2h((142, 105, 252)), r2h((248, 235, 57)),\n r2h((51, 159, 255)), r2h((225, 117, 231)), r2h((97, 243, 185)), r2h((161, 183, 196)))\n\n\nselected_edge_colors = (r2h((0, 255, 0)), r2h((255, 0, 0)), r2h((0, 0, 255)))\n\ndef init_plot():\n fig = pl.figure()\n fig.set_size_inches(8, 6)\n ax = fig.add_subplot(111, projection='3d')\n # hide axis, thank to\n # https://stackoverflow.com/questions/29041326/3d-plot-with-matplotlib-hide-axes-but-keep-axis-labels/\n ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))\n # Get rid of the spines\n ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0))\n # Get rid of the ticks\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_zticks([])\n return (ax, [np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf], fig)\n\n\ndef update_lim(mesh, plot):\n vs = mesh[0]\n for i in range(3):\n plot[1][2 * i] = min(plot[1][2 * i], vs[:, i].min())\n plot[1][2 * i + 1] = max(plot[1][2 * i], vs[:, i].max())\n return plot\n\n\ndef update_plot(mesh, plot):\n if plot is None:\n plot = init_plot()\n return update_lim(mesh, plot)\n\n\ndef surfaces(mesh, plot):\n vs, faces, edges = mesh\n vtx = vs[faces]\n edgecolor = edge_color if not len(edges) else 'none'\n tri = a3.art3d.Poly3DCollection(vtx, facecolors=surface_color +'55', edgecolors=edgecolor,\n linewidths=.5, linestyles='dashdot')\n plot[0].add_collection3d(tri)\n return plot\n\n\ndef segments(mesh, plot):\n vs, _, edges = mesh\n for edge_c, edge_group in enumerate(edges):\n # print(edge_group.shape)\n for edge_idx in edge_group:\n edge = vs[edge_idx]\n line = a3.art3d.Line3DCollection([edge], linewidths=.5, linestyles='dashdot')\n line.set_color(selected_edge_colors[edge_c % len(selected_edge_colors)])\n plot[0].add_collection3d(line)\n return plot\n\n\ndef plot_mesh(mesh, *whats, show=True, plot=None):\n for what in [update_plot] + list(whats):\n plot = what(mesh, plot)\n if show:\n li = max(plot[1][1], plot[1][3], plot[1][5])\n plot[0].auto_scale_xyz([0, li], [0, li], [0, li])\n pl.tight_layout()\n pl.show()\n # pdb.set_trace()\n plot[2].savefig('./temp.png')\n return plot\n\n\ndef parse_obje(obj_file, highlighted_edges_file, scale_by):\n vs = []\n faces = []\n edges = []\n\n def add_to_edges():\n if edge_c >= len(edges):\n for _ in range(len(edges), edge_c + 1):\n edges.append([])\n edges[edge_c].append(edge_v)\n\n def fix_vertices():\n nonlocal vs, scale_by\n vs = V(vs)\n z = vs[:, 2].copy()\n vs[:, 2] = vs[:, 1]\n vs[:, 1] = z\n max_range = 0\n for i in range(3):\n min_value = np.min(vs[:, i])\n max_value = np.max(vs[:, i])\n max_range = max(max_range, max_value - min_value)\n vs[:, i] -= min_value\n if not scale_by:\n scale_by = max_range\n vs /= scale_by\n\n with open(highlighted_edges_file) as f:\n selected_set = set()\n for line in f:\n line = tuple((float(c) for c in line.strip().split(' ')))\n selected_set.add(line)\n\n # print(selected_set)\n\n with open(obj_file) as f:\n for line in f:\n line = line.strip()\n splitted_line = line.split()\n if not splitted_line:\n continue\n elif splitted_line[0] == 'v':\n vs.append([float(v) for v in splitted_line[1:]])\n elif splitted_line[0] == 'f':\n faces.append([int(c) - 1 for c in splitted_line[1:]])\n elif splitted_line[0] == 'e':\n print([int(c) - 1 for c in splitted_line[1:-1]])\n # if len(splitted_line) < 4 and selected_set is not None:\n # if \n # splitted_line.append()\n if len(splitted_line) >= 4:\n edge_v = [int(c) - 1 for c in splitted_line[1:-1]]\n print(edge_v)\n edge_c = int(splitted_line[-1])\n print(edge_c)\n add_to_edges()\n \n\n vs = V(vs)\n fix_vertices()\n faces = V(faces, dtype=int)\n edges = [V(c, dtype=int) for c in edges]\n return (vs, faces, edges), scale_by\n\ndef parse_mesh(mesh, highlighted_edges_file, scale_by):\n # should return vs, faces, edges and scale_by factor\n # vs: np.ndarray (float)\n # faces: np.ndarray (int)\n # edges ?\n # all edges belonging to highlighted edges file belong to one group\n # all others belong to another group\n new_edges = [[], []]# 2 groups - selected, not-selected\n new_edges = [[], [], []] # selected, not selected, non-manifold\n \n with open(highlighted_edges_file) as f:\n selected_set = set()\n for line in f:\n line = tuple((float(c) for c in line.strip().split(' ')))\n selected_set.add(line)\n\n def fix_vertices():\n nonlocal vs, scale_by\n vs = V(vs)\n z = vs[:, 2].copy()\n vs[:, 2] = vs[:, 1]\n vs[:, 1] = z\n max_range = 0\n for i in range(3):\n min_value = np.min(vs[:, i])\n max_value = np.max(vs[:, i])\n max_range = max(max_range, max_value - min_value)\n vs[:, i] -= min_value\n if not scale_by:\n scale_by = max_range\n vs /= scale_by\n\n \n vs, faces, edges = convert_mesh(mesh)\n non_selected_edge_color = (0,255,0)\n selected_edge_color = (255, 0, 0)\n for edge in edges:\n nodeA, nodeB = edge\n if tuple(mesh.vs[nodeA]) in selected_set or tuple(mesh.vs[nodeB]) in selected_set:\n new_edges[0].append(edge)\n else:\n new_edges[1].append(edge)\n # pdb.set_trace()\n fix_vertices()\n # face is 4 vertices not 4 edge ids! \n return (np.array(vs), np.array(faces), np.array(new_edges)), scale_by\n # pdb.set_trace() \n \n\n\ndef view_meshes(*files, offset=.2):\n plot = None\n max_x = 0\n scale = 0\n for file, highlighted_edges_file in files:\n # mesh, scale = parse_obje(file, highlighted_edges_file, scale)\n mesh, scale = parse_mesh(file, highlighted_edges_file, scale)\n max_x_current = mesh[0][:, 0].max()\n mesh[0][:, 0] += max_x + offset\n # pdb.set_trace()\n\n plot = plot_mesh(mesh, surfaces, segments, plot=plot, show=file == files[-1])\n # azims = [-60, -30, 0, 30, 60]\n # dists = [5, 10, 15]\n # elevs = [-30, 0, 10, 30]\n azims = [-60, 0, 30]\n dists = [0]\n elevs = [0, 10, 30]\n \n import itertools\n for i, (azim, dist, elev) in enumerate(itertools.product(azims, dists, elevs)):\n plot[0].azim = azim\n plot[0].dist = dist\n plot[0].elev = elev\n plot[2].savefig(f'./temp1/temp_{i}.png', dpi=300)\n # 27 -> (0, 5, 30), 38 -> (30, 5, 10), 39-> (30, 5, 30), 42 -> (30, 10, 10)\n print(i, (azim, dist, elev))\n pdb.set_trace()\n max_x += max_x_current + offset\n\n\ndef convert_mesh(mesh):\n\n faces = []\n final_vs = []\n final_faces = []\n final_edges = []\n pdb.set_trace()\n vs = mesh.vs[mesh.v_mask]\n gemm = np.array(mesh.gemm_edges)\n new_indices = np.zeros(mesh.v_mask.shape[0], dtype=np.int32)\n new_indices[mesh.v_mask] = np.arange(0, np.ma.where(mesh.v_mask)[0].shape[0])\n\n for edge_index in range(len(gemm)):\n cycles = mesh.get_cycle(gemm, edge_index)\n for cycle in cycles:\n faces.append(mesh.cycle_to_face(cycle, new_indices))\n\n for v in vs:\n final_vs.append([v[0], v[1], v[2]])\n # vcol = ' %f %f %f' % (vcolor[vi, 0], vcolor[vi, 1], vcolor[vi, 2]) if vcolor is not None else ''\n # f.write(\"v %f %f %f%s\\n\" % (v[0], v[1], v[2], vcol))\n for face_id in range(len(faces) - 1):\n # f.write(\"f %d %d %d\\n\" % (faces[face_id][0] + 1, faces[face_id][1] + 1, faces[face_id][2] + 1))\n final_faces.append([faces[face_id][0], faces[face_id][1], faces[face_id][2]])\n # f.write(\"f %d %d %d\" % (faces[-1][0] + 1, faces[-1][1] + 1, faces[-1][2] + 1))\n final_faces.append([faces[-1][0], faces[-1][1], faces[-1][2]])\n for edge in mesh.edges:\n # f.write(\"\\ne %d %d\" % (new_indices[edge[0]] + 1, new_indices[edge[1]] + 1))\n final_edges.append([new_indices[edge[0]], new_indices[edge[1]]])\n return final_vs, final_faces, final_edges" ]
[ [ "numpy.max", "numpy.ma.where", "numpy.array", "numpy.zeros", "numpy.min" ] ]
BIDS-Apps/rsHRF
[ "e1751e77629f9e960f156b1bd6a9842f7c34f719", "a07715b764df69fffbc7f1a43718e958662ade9b" ]
[ "rsHRF/CLI.py", "rsHRF/rsHRF_GUI/gui_windows/plotterWindow.py" ]
[ "import sys\nimport numpy as np\nimport os.path as op\nimport json\nfrom argparse import ArgumentParser\nfrom bids.layout import BIDSLayout\nfrom pathlib import Path\nfrom rsHRF import spm_dep, fourD_rsHRF, utils\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nwith open(op.join(op.dirname(op.realpath(__file__)), \"VERSION\"), \"r\") as fh:\n __version__ = fh.read().strip('\\n')\n\ndef get_parser():\n parser = ArgumentParser(description='retrieves the onsets of pseudo-events triggering a '\n 'haemodynamic response from resting state fMRI BOLD '\n 'voxel-wise signal')\n\n group_input = parser.add_mutually_exclusive_group(required=True)\n\n group_input.add_argument('--ts', action='store', type=op.abspath,\n help='the absolute path to a single data file')\n\n group_input.add_argument('--input_file', action='store', type=op.abspath,\n help='the absolute path to a single data file')\n\n group_input.add_argument('--bids_dir', nargs='?', action='store', type=op.abspath,\n help='the root folder of a BIDS valid dataset '\n '(sub-XXXXX folders should be found at the '\n 'top level in this folder).')\n\n group_input.add_argument('--GUI', action='store_true',\n help='to execute the toolbox in GUI mode')\n\n parser.add_argument('--output_dir', action='store', type=op.abspath,\n help='the output path for the outcomes of processing')\n\n parser.add_argument('--n_jobs', action='store', type=int, default=-1,\n help='the number of parallel processing elements')\n\n parser.add_argument('-V', '--version', action='version', version='rsHRF version {}'.format(__version__))\n\n parser.add_argument('--analysis_level', help='Level of the analysis that will be performed. '\n 'Multiple participant level analyses can be run independently '\n '(in parallel) using the same output_dir.', choices=['participant'], nargs='?')\n\n parser.add_argument('--participant_label',\n help='The label(s) of the participant(s) that should be analyzed. The label '\n 'corresponds to sub-<participant_label> from the BIDS spec '\n '(so it does not include \"sub-\"). If this parameter is not '\n 'provided all subjects should be analyzed. Multiple '\n 'participants can be specified with a space separated list.',\n nargs=\"+\")\n \n parser.add_argument('--bids_filter_file', action='store', type=op.abspath,\n help='a JSON file describing custom BIDS input filters using PyBIDS. '\n 'For further details, please check out http://bids-apps.neuroimaging.io/rsHRF/')\n \n group_mask = parser.add_mutually_exclusive_group(required=False)\n\n group_mask.add_argument('--atlas', action='store', type=op.abspath,\n help='the absolute path to a single atlas file')\n\n group_mask.add_argument('--brainmask', action='store_true',\n help='to enable the use of mask files present in the BIDS '\n 'directory itself')\n\n group_para = parser.add_argument_group('Parameters')\n\n group_para.add_argument('--estimation', action='store',\n choices=['canon2dd', 'sFIR', 'FIR', 'fourier', 'hanning', 'gamma'],\n help='Choose the estimation procedure from '\n 'canon2dd (canonical shape with 2 derivatives), '\n 'sFIR (smoothed Finite Impulse Response), '\n 'FIR (Finite Impulse Response), '\n 'fourier (Fourier Basis Set), '\n 'hanning (Fourier Basis w Hanning), '\n 'gamma (Gamma Basis Set)')\n\n group_para.add_argument('--passband', action='store', type=float, nargs=2, metavar=('LOW_FREQ','HIGH_FREQ'),\n default=[0.01, 0.08],\n help='set intervals for bandpass filter, default is 0.01 - 0.08')\n\n group_para.add_argument('--passband_deconvolve', action='store', type=float, nargs=2, metavar=('LOW_FREQ', 'HIGH_FREQ'),\n default=[0.0, sys.float_info.max],\n help='set intervals for bandpass filter (used while deconvolving BOLD), default is no-filtering')\n\n group_para.add_argument('-TR', action='store', type=float, default=-1,\n help='set TR parameter')\n\n group_para.add_argument('-T', action='store', type=int, default=3,\n help='set T parameter')\n\n group_para.add_argument('-T0', action='store', type=int, default=1,\n help='set T0 parameter')\n\n group_para.add_argument('-TD_DD', action='store', type=int, default=2,\n help='set TD_DD parameter')\n\n group_para.add_argument('-AR_lag', action='store', type=int, default=1,\n help='set AR_lag parameter')\n\n group_para.add_argument('--thr', action='store', type=float, default=1,\n help='set thr parameter')\n\n group_para.add_argument('--temporal_mask', action='store', type=op.abspath,\n help='the path for the (temporal) mask file.\\n The mask file should be a \".dat\" file, consisting of a binary string of the same length as the signal')\n\n group_para.add_argument('--order', action='store', type=int, default=3,\n help='set the number of basis vectors')\n\n group_para.add_argument('--len', action='store', type=int, default=24,\n help='set len parameter')\n\n group_para.add_argument('--min_onset_search', action='store', type=int, default=4,\n help='set min_onset_search parameter')\n\n group_para.add_argument('--max_onset_search', action='store', type=int, default=8,\n help='set max_onset_search parameter')\n\n group_para.add_argument('--localK', action='store', type=int,\n help='set localK')\n\n group_para.add_argument('--wiener', action='store_true',\n help='to perform iterative wiener deconvolution')\n\n return parser\n\n\ndef run_rsHRF():\n parser = get_parser()\n args = parser.parse_args()\n arg_groups = {}\n for group in parser._action_groups:\n group_dict = {a.dest: getattr(args, a.dest, None) for a in group._group_actions }\n arg_groups[group.title] = group_dict\n para = arg_groups['Parameters']\n nargs = len(sys.argv)\n temporal_mask = []\n\n if (not args.GUI) and (args.output_dir is None):\n parser.error('--output_dir is required when executing in command-line interface')\n\n if (not args.GUI) and (args.estimation is None):\n parser.error('--estimation rule is required when executing in command-line interface')\n\n if (args.GUI):\n if (nargs == 2):\n try:\n from .rsHRF_GUI import run\n run.run()\n except ModuleNotFoundError:\n parser.error('--GUI should not be used inside a Docker container')\n else:\n parser.error('--no other arguments should be supplied with --GUI')\n\n if (args.input_file is not None or args.ts is not None) and args.analysis_level:\n parser.error('analysis_level cannot be used with --input_file or --ts, do not supply it')\n\n if (args.input_file is not None or args.ts is not None) and args.participant_label:\n parser.error('participant_labels are not to be used with --input_file or --ts, do not supply it')\n\n if args.input_file is not None and args.brainmask:\n parser.error('--brainmask cannot be used with --input_file, use --atlas instead')\n\n if args.ts is not None and (args.brainmask or args.atlas):\n parser.error('--atlas or --brainmask cannot be used with --ts, do not supply it')\n\n if args.bids_dir is not None and not (args.brainmask or args.atlas):\n parser.error('--atlas or --brainmask needs to be supplied with --bids_dir')\n\n if args.bids_dir is not None and not args.analysis_level:\n parser.error('analysis_level needs to be supplied with bids_dir, choices=[participant]')\n\n if args.input_file is not None and (not args.input_file.endswith(('.nii', '.nii.gz', '.gii', '.gii.gz'))):\n parser.error('--input_file should end with .gii, .gii.gz, .nii or .nii.gz')\n\n if args.atlas is not None and (not args.atlas.endswith(('.nii', '.nii.gz','.gii', '.gii.gz'))):\n parser.error('--atlas should end with .gii, .gii.gz, .nii or .nii.gz')\n\n if args.ts is not None and (not args.ts.endswith(('.txt'))):\n parser.error('--ts file should end with .txt')\n\n if args.temporal_mask is not None and (not args.temporal_mask.endswith(('.dat'))):\n parser.error('--temporal_mask ile should end with \".dat\"')\n\n if args.temporal_mask is not None:\n f = open(args.temporal_mask,'r')\n for line in f:\n for each in line:\n if each in ['0','1']:\n temporal_mask.append(int(each))\n\n if args.estimation == 'sFIR' or args.estimation == 'FIR':\n para['T'] = 1\n\n if args.ts is not None:\n file_type = op.splitext(args.ts)\n if para['TR'] <= 0:\n parser.error('Please supply a valid TR using -TR argument')\n else:\n TR = para['TR']\n para['dt'] = para['TR'] / para['T']\n para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']),\n np.fix(para['max_onset_search'] / para['dt']) + 1,\n dtype='int')\n fourD_rsHRF.demo_rsHRF(args.ts, None, args.output_dir, para, args.n_jobs, file_type, mode='time-series', temporal_mask=temporal_mask, wiener=args.wiener)\n\n if args.input_file is not None:\n if args.atlas is not None:\n if (args.input_file.endswith(('.nii', '.nii.gz')) and args.atlas.endswith(('.gii', '.gii.gz'))) or (args.input_file.endswith(('.gii', '.gii.gz')) and args.atlas.endswith(('.nii', '.nii.gz'))):\n parser.error('--atlas and input_file should be of the same type [NIfTI or GIfTI]')\n\n # carry analysis with input_file and atlas\n file_type = op.splitext(args.input_file)\n if file_type[-1] == \".gz\":\n file_type = op.splitext(file_type[-2])[-1] + file_type[-1]\n else:\n file_type = file_type[-1]\n if \".nii\" in file_type:\n TR = (spm_dep.spm.spm_vol(args.input_file).header.get_zooms())[-1]\n else:\n if para['TR'] == -1:\n parser.error('Please supply a valid TR using -TR argument')\n else:\n TR = para['TR']\n if TR <= 0:\n if para['TR'] <= 0:\n parser.error('Please supply a valid TR using -TR argument')\n else:\n if para['TR'] == -1:\n para['TR'] = TR\n elif para['TR'] <= 0:\n print('Invalid TR supplied, using implicit TR: {0}'.format(TR))\n para['TR'] = TR\n para['dt'] = para['TR'] / para['T']\n para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']),\n np.fix(para['max_onset_search'] / para['dt']) + 1,\n dtype='int')\n fourD_rsHRF.demo_rsHRF(args.input_file, args.atlas, args.output_dir, para, args.n_jobs, file_type, mode='input', temporal_mask=temporal_mask, wiener=args.wiener)\n\n if args.bids_dir is not None:\n utils.bids.write_derivative_description(args.bids_dir, args.output_dir)\n bids_dir = Path(args.bids_dir)\n fname = bids_dir / 'dataset_description.json'\n \n if fname.exists():\n desc = json.loads(Path(fname).read_text())\n if 'DataType' in desc :\n if desc['DataType'] != 'derivative':\n parser.error('Input data is not a derivative dataset'\n ' (DataType in dataset_description.json is not equal to \"derivative\")')\n \n else :\n parser.error('DataType is not defined in the dataset_description.json file. Please make sure DataType is defined. '\n 'Information on the dataset_description.json file can be found online '\n '(https://bids-specification.readthedocs.io/en/stable/03-modality-agnostic-files.html'\n '#derived-dataset-and-pipeline-description)')\n else :\n parser.error('Could not find dataset_description.json file. Please make sure the BIDS data '\n 'structure is present and correct. Datasets can be validated online '\n 'using the BIDS Validator (http://incf.github.io/bids-validator/).')\n \n \n if args.bids_dir is not None and args.atlas is not None:\n # carry analysis with bids_dir and 1 atlas\n layout = BIDSLayout(args.bids_dir, validate=False, config =['bids', 'derivatives'])\n \n if args.participant_label:\n input_subjects = args.participant_label\n subjects_to_analyze = layout.get_subjects(subject=input_subjects)\n else:\n subjects_to_analyze = layout.get_subjects()\n\n if not subjects_to_analyze:\n parser.error('Could not find participants. Please make sure the BIDS data '\n 'structure is present and correct. Datasets can be validated online '\n 'using the BIDS Validator (http://incf.github.io/bids-validator/).')\n\n if not args.atlas.endswith(('.nii', '.nii.gz')):\n parser.error('--atlas should end with .nii or .nii.gz')\n \n if args.bids_filter_file is not None:\n filter_list = json.loads(Path(args.bids_filter_file).read_text()) \n \n default_input = {'extension': 'nii.gz', \n 'datatype' : 'func', \n 'desc': 'preproc', \n 'task' : 'rest',\n 'suffix': 'bold'}\n default_input['subject']=subjects_to_analyze \n default_input.update(filter_list['bold'])\n \n all_inputs = layout.get(return_type='filename',**default_input)\n \n else :\n all_inputs = layout.get(return_type='filename',datatype='func', subject=subjects_to_analyze, task='rest',desc='preproc',suffix='bold', extension=['nii', 'nii.gz'])\n \n if not all_inputs != []:\n parser.error('There are no files of type *bold.nii / *bold.nii.gz '\n 'Please make sure to have at least one file of the above type '\n 'in the BIDS specification')\n else:\n num_errors = 0\n for file_count in range(len(all_inputs)):\n try:\n TR = layout.get_metadata(all_inputs[file_count])['RepetitionTime']\n except KeyError as e:\n TR = spm_dep.spm.spm_vol(all_inputs[file_count]).header.get_zooms()[-1]\n para['TR'] = TR\n para['dt'] = para['TR'] / para['T']\n para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']),\n np.fix(para['max_onset_search'] / para['dt']) + 1,\n dtype='int')\n num_errors += 1\n try:\n fourD_rsHRF.demo_rsHRF(all_inputs[file_count], args.atlas, args.output_dir, para, args.n_jobs, file_type, mode='bids w/ atlas', temporal_mask=temporal_mask, wiener=args.wiener)\n num_errors -=1\n except ValueError as err:\n print(err.args[0])\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n success = len(all_inputs) - num_errors\n if success == 0:\n raise RuntimeError('Dimensions were inconsistent for all input-mask pairs; \\n'\n 'No inputs were processed!')\n\n if args.bids_dir is not None and args.brainmask:\n # carry analysis with bids_dir and brainmask\n layout = BIDSLayout(args.bids_dir, validate=False, config =['bids', 'derivatives'])\n\n if args.participant_label:\n input_subjects = args.participant_label\n subjects_to_analyze = layout.get_subjects(subject=input_subjects)\n else:\n subjects_to_analyze = layout.get_subjects()\n\n if not subjects_to_analyze:\n parser.error('Could not find participants. Please make sure the BIDS data '\n 'structure is present and correct. Datasets can be validated online '\n 'using the BIDS Validator (http://incf.github.io/bids-validator/).')\n\n if args.bids_filter_file is not None:\n filter_list = json.loads(Path(args.bids_filter_file).read_text()) \n \n default_input = {'extension': 'nii.gz', \n 'datatype' : 'func', \n 'desc': 'preproc', \n 'task' : 'rest',\n 'suffix': 'bold'}\n default_input['subject']=subjects_to_analyze \n default_input.update(filter_list['bold'])\n \n all_inputs = layout.get(return_type='filename',**default_input)\n \n default_mask={'extension': 'nii.gz',\n 'datatype': 'func',\n 'desc': 'brain',\n 'task':'rest',\n 'suffix':'mask'}\n default_mask['subject']=subjects_to_analyze\n default_mask.update(filter_list['mask'])\n \n all_masks = layout.get(return_type='filename',**default_mask)\n \n \n else: \n all_inputs = layout.get(return_type='filename',datatype='func', subject=subjects_to_analyze, task='rest',desc='preproc',suffix='bold', extension=['nii', 'nii.gz'])\n all_masks = layout.get(return_type='filename', datatype='func', subject=subjects_to_analyze, task='rest',desc='brain',suffix='mask', extension=['nii', 'nii.gz'])\n \n if not all_inputs != []:\n parser.error('There are no files of type *bold.nii / *bold.nii.gz '\n 'Please make sure to have at least one file of the above type '\n 'in the BIDS specification')\n if not all_masks != []:\n parser.error('There are no files of type *mask.nii / *mask.nii.gz '\n 'Please make sure to have at least one file of the above type '\n 'in the BIDS specification')\n if len(all_inputs) != len(all_masks):\n parser.error('The number of *bold.nii / .nii.gz and the number of '\n '*mask.nii / .nii.gz are different. Please make sure that '\n 'there is one mask for each input_file present')\n\n all_inputs.sort()\n all_masks.sort()\n\n all_prefix_match = False\n prefix_match_count = 0\n for i in range(len(all_inputs)):\n input_prefix = all_inputs[i].split('/')[-1].split('_desc')[0]\n mask_prefix = all_masks[i].split('/')[-1].split('_desc')[0]\n if input_prefix == mask_prefix:\n prefix_match_count += 1\n else:\n all_prefix_match = False\n break\n if prefix_match_count == len(all_inputs):\n all_prefix_match = True\n\n if not all_prefix_match:\n parser.error('The mask and input files should have the same prefix for correspondence. '\n 'Please consider renaming your files')\n else:\n num_errors = 0\n for file_count in range(len(all_inputs)):\n file_type = all_inputs[file_count].split('bold')[1]\n if file_type == \".nii\" or file_type == \".nii.gz\":\n try:\n TR = layout.get_metadata(all_inputs[file_count])['RepetitionTime']\n except KeyError as e:\n TR = spm_dep.spm.spm_vol(all_inputs[file_count]).header.get_zooms()[-1]\n para['TR'] = TR\n else:\n spm_dep.spm.spm_vol(all_inputs[file_count])\n TR = spm_dep.spm.spm_vol(all_inputs[file_count]).get_arrays_from_intent(\"NIFTI_INTENT_TIME_SERIES\")[0].meta.get_metadata()[\"TimeStep\"]\n para['TR'] = float(TR) * 0.001\n\n\n para['dt'] = para['TR'] / para['T']\n para['lag'] = np.arange(np.fix(para['min_onset_search'] / para['dt']),\n np.fix(para['max_onset_search'] / para['dt']) + 1,\n dtype='int')\n num_errors += 1\n try:\n fourD_rsHRF.demo_rsHRF(all_inputs[file_count], all_masks[file_count], args.output_dir, para, args.n_jobs, mode='bids', temporal_mask=temporal_mask, wiener=args.wiener)\n num_errors -=1\n except ValueError as err:\n print(err.args[0])\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n success = len(all_inputs) - num_errors\n if success == 0:\n raise RuntimeError('Dimensions were inconsistent for all input-mask pairs; \\n'\n 'No inputs were processed!')\n\n\n\ndef main():\n warnings.filterwarnings(\"ignore\")\n run_rsHRF()\n\n\nif __name__ == '__main__':\n raise RuntimeError(\"CLI.py should not be run directly;\\n\"\n \"Please `pip install` rsHRF and use the `rsHRF` command\")", "import mpld3\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nimport numpy as np\nfrom matplotlib.figure import Figure\nfrom tkinter import ttk, Toplevel, Canvas, TOP, BOTH, BOTTOM\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\n\nclass PlotterWindow(): \n def __init__(self):\n window = Toplevel()\n window.title(\"Screen\")\n # get screen width and height\n screen_width = window.winfo_screenwidth()\n screen_height = window.winfo_screenheight()\n window.geometry(\"600x400+%d+%d\" % (screen_width-600, screen_height/2))\n figure = Figure(figsize=(5,5), dpi=100)\n self.numberOfPlots = 3\n self.ts = [[] for i in range(0, self.numberOfPlots)]\n self.plot = [figure.add_subplot(111) for i in range(0, self.numberOfPlots)]\n self.canvas = FigureCanvasTkAgg(figure, window)\n\n def get_numberOfPlots(self):\n return self.numberOfPlots\n\n def makePlot(self, ts, val, num):\n for each in self.plot:\n each.clear()\n if val == 0:\n self.ts[num] = 0\n elif val == 1:\n self.ts[num] = ts\n for i in range(self.numberOfPlots):\n self.plot[i].plot(self.ts[i])\n self.canvas.draw()\n self.canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True)\n\n" ]
[ [ "numpy.fix" ], [ "matplotlib.use", "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "matplotlib.figure.Figure" ] ]
sjyk/ar4ds
[ "668fc5d06152d4d5c3dbeeaaedb8bc3ddb393d88" ]
[ "examples/optimization.py" ]
[ "from _examples import *\nimport pandas as pd\n\nfrom ar4ds.api import *\nfrom ar4ds.core import *\n\n\"\"\"\nIntroduction to the query optimizer\n\"\"\"\n\ndata = pd.DataFrame(raw_data, columns=['title','branch', 'salary'])\ncode = '''implies(conj(eq(s.branch,'NY'), eq(t.branch,'SF')), gt(s.salary, t.salary))'''\n\n#1. ar4ds uses a query optimizer to optimize the evaluation of the rules\n#you can see how a rule is being evaluated with\ndc = compile(code)\nprint(\"Rule evaluated with a nested loop join: \\n\")\ndc.explainPlan(data)\n#executes the evaluation with a nested loop join\n\nprint(\"\\n\\n\")\n\nfrom ar4ds.opt.cascades import CascadesQueryOptimizer\n#2. you can add more sophisticated optimizers:\ndc = compile(code, CascadesQueryOptimizer)\nprint(\"Cascades finds push down optimizations: \\n\")\ndc.explainPlan(data)\nprint(\"\\n\\n\")\n\n#3. more complicated expression\ncode = '''implies(conj(conj(eq(s.branch,'NY'), eq(t.branch,'SF')), eq(s.title, t.title)) , gt(s.salary, t.salary))'''\ndc = compile(code, CascadesQueryOptimizer)\nprint(\"Cascades finds join type optimizations: \\n\")\ndc.explainPlan(data)\n\n\n\n\n\n\n" ]
[ [ "pandas.DataFrame" ] ]
neuronalX/Funky_Reservoir
[ "ce11aafb02ad36d37232a8a813e70923e0da1cc8" ]
[ "reservoirpy/nodes/tests/test_io.py" ]
[ "# Author: Nathan Trouvain at 14/03/2022 <nathan.trouvain@inria.fr>\n# Licence: MIT License\n# Copyright: Xavier Hinaut (2018) <xavier.hinaut@inria.fr>\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_equal\n\nfrom reservoirpy.nodes import Input, Output\n\n\ndef test_input():\n inp = Input()\n x = np.ones((1, 10))\n out = inp(x)\n assert_equal(out, x)\n x = np.ones((10, 10))\n out = inp.run(x)\n assert_equal(out, x)\n\n with pytest.raises(ValueError):\n inp = Input(input_dim=9)\n inp.run(x)\n\n\ndef test_output():\n output = Output()\n x = np.ones((1, 10))\n out = output(x)\n assert_equal(out, x)\n x = np.ones((10, 10))\n out = output.run(x)\n assert_equal(out, x)\n" ]
[ [ "numpy.ones", "numpy.testing.assert_equal" ] ]
m0tchy/hub
[ "54494b4c8aa83dc20e3b1e94017b9a029cb92822" ]
[ "tensorflow_hub/tools/make_nearest_neighbour_index/make_nearest_neighbour_index.py" ]
[ "# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Entry point to run the hub2ann tool.\"\"\"\n\nfrom absl import app\nfrom absl import flags\nimport tensorflow as tf\n\nfrom tensorflow_hub.tools.make_nearest_neighbour_index import embedding_generator as generator\nfrom tensorflow_hub.tools.make_nearest_neighbour_index import index_builder as builder\nfrom tensorflow_hub.tools.make_nearest_neighbour_index import similarity_finder as finder\n\n# Embedding generator flags\nflags.DEFINE_string(\n \"data_file_pattern\", None,\n \"Path to data file(s) to generate embeddings for.\")\nflags.DEFINE_string(\n \"module_url\", None, \"TF-Hub module to use. \"\n \"For more options, search https://tfhub.dev.\")\nflags.DEFINE_integer(\n \"projected_dim\", None,\n \"The desired target dimension to project the embedding to. \"\n \"If specified, random projection will be uses.\")\nflags.DEFINE_string(\n \"embed_output_dir\", None,\n \"The directory to store the generated embedding files to. \"\n \"This can be a local or a GCS location.\")\n\n# index builder parameters\nflags.DEFINE_integer(\n \"num_trees\", 100,\n \"The number of trees to build the ANN index. Default is 100. \"\n \"For more details, refer to https://github.com/spotify/annoy.\")\nflags.DEFINE_string(\n \"index_output_dir\", None,\n \"The directory to store the created index and mapping files. \"\n \"This can be a local or GCS location.\")\n\n# similarity matching parameters\nflags.DEFINE_integer(\n \"num_matches\", 10,\n \"The number of similar matches to retrieve from the ANN index. \"\n \"Default is 10.\")\n\nFLAGS = flags.FLAGS\n\n\ndef validate_args(args):\n \"\"\"Validates the command line arguments specified by the user.\"\"\"\n\n if len(args) < 2 or args[1] not in [\"generate\", \"build\", \"e2e\", \"query\"]:\n raise ValueError(\"You need to specify one of four operations: \"\n \"generate | build | e2e | query\")\n\n def _validate_generate_args():\n \"\"\"Validates generate operation args.\"\"\"\n if not FLAGS.data_file_pattern:\n raise ValueError(\n \"You must provide --data_file_pattern to generate embeddings for.\")\n if not FLAGS.module_url:\n raise ValueError(\n \"You must provide --module_url to use for embeddings generation.\")\n if not FLAGS.embed_output_dir:\n raise ValueError(\n \"You must provide --embed_output_dir to store the embedding files.\")\n if FLAGS.projected_dim and FLAGS.projected_dim < 1:\n raise ValueError(\"--projected_dim must be a positive integer value.\")\n\n def _validate_build_args(e2e=False):\n \"\"\"Validates build operation args.\"\"\"\n if not FLAGS.embed_output_dir and not e2e:\n raise ValueError(\n \"You must provide --embed_output_dir of the embeddings\"\n \"to build the ANN index for.\")\n if not FLAGS.index_output_dir:\n raise ValueError(\n \"You must provide --index_output_dir to store the index files.\")\n if not FLAGS.num_trees or FLAGS.num_trees < 1:\n raise ValueError(\n \"You must provide --num_trees as a positive integer value.\")\n\n def _validate_query_args():\n if not FLAGS.module_url:\n raise ValueError(\"You must provide --module_url to use for query.\")\n if not FLAGS.index_output_dir:\n raise ValueError(\"You must provide --index_output_dir to use for query.\")\n\n operation = args[1]\n if operation == \"generate\":\n _validate_generate_args()\n elif operation == \"build\":\n _validate_build_args()\n elif operation == \"e2e\":\n _validate_generate_args()\n _validate_build_args(True)\n else:\n _validate_query_args()\n\n return operation\n\n\ndef _ensure_tf2():\n \"\"\"Ensure running with TensorFlow 2 behavior.\n\n This function is safe to call even before flags have been parsed.\n\n Raises:\n ImportError: If tensorflow is too old for proper TF2 behavior.\n \"\"\"\n print(\"Running with tensorflow %s (git version %s)\",\n tf.__version__, tf.__git_version__)\n if tf.__version__.startswith(\"1.\"):\n if tf.__git_version__ == \"unknown\": # For internal testing use.\n try:\n tf.compat.v1.enable_v2_behavior()\n return\n except AttributeError:\n pass # Fail below for missing enabler function.\n raise ImportError(\"Sorry, this program needs TensorFlow 2.\")\n\n\ndef main(args):\n \"\"\"Entry point main function.\"\"\"\n\n operation = validate_args(args)\n print(\"Selected operation: {}\".format(operation))\n\n if operation == \"generate\":\n print(\"Generating embeddings...\")\n generator.run(FLAGS)\n print(\"Embedding generation completed.\")\n\n elif operation == \"build\":\n print(\"Building ANN index...\")\n builder.run(FLAGS)\n print(\"Building ANN index completed.\")\n\n elif operation == \"e2e\":\n print(\"Generating embeddings and building ANN index...\")\n generator.run(FLAGS)\n print(\"Embedding generation completed.\")\n if FLAGS.projected_dim:\n FLAGS.dimensions = FLAGS.projected_dim\n\n builder.run(FLAGS)\n print(\"Building ANN index completed.\")\n\n else:\n print(\"Querying the ANN index...\")\n similarity_finder = finder.load(FLAGS)\n num_matches = FLAGS.num_matches\n while True:\n print(\"Enter your query: \", end=\"\")\n query = str(input())\n similar_items = similarity_finder.find_similar_items(query, num_matches)\n print(\"Results:\")\n print(\"=========\")\n for item in similar_items:\n print(item)\n\n\nif __name__ == \"__main__\":\n _ensure_tf2()\n app.run(main)\n" ]
[ [ "tensorflow.compat.v1.enable_v2_behavior", "tensorflow.__version__.startswith" ] ]
HKJL10201/fast-feature-fool
[ "bdd44212461628e94e62e75e528fefcf6467336c" ]
[ "nets/googlenet.py" ]
[ "from urllib.request import urlretrieve\nimport tensorflow as tf\nfrom misc.layers import *\nimport numpy as np\nimport os\n\ndef model(image, weights, biases):\n #check image dimensions\n assert image.get_shape().as_list()[1:] == [224, 224, 3]\n layers = {}\n with tf.name_scope(\"conv1\"):\n layers['conv1_7x7_s2'] = conv_layer(image, weights['conv1_7x7_s2'], biases['conv1_7x7_s2'], s=2)\n layers['pool1_3x3_s2'] = max_pool(layers['conv1_7x7_s2'], k=3, s=2)\n layers['pool1_norm1'] = tf.nn.lrn(layers['pool1_3x3_s2'], 2, 1.0, 2e-05, 0.75)\n\n with tf.name_scope(\"conv2\"):\n layers['conv2_3x3_reduce'] = conv_layer(layers['pool1_norm1'], weights['conv2_3x3_reduce'], biases['conv2_3x3_reduce'])\n layers['conv2_3x3'] = conv_layer(layers['conv2_3x3_reduce'], weights['conv2_3x3'], biases['conv2_3x3'])\n layers['conv2_norm2'] = tf.nn.lrn(layers['conv2_3x3'], 2, 1.0, 2e-05, 0.75)\n layers['pool2_3x3_s2'] = max_pool(layers['conv2_norm2'], k=3, s=2)\n\n with tf.name_scope('inception_3'):\n layers['inception_3a_output'] = inception_block(layers['pool2_3x3_s2'], '3a', weights, biases)\n layers['inception_3b_output'] = inception_block(layers['inception_3a_output']['concat'], '3b', weights, biases)\n layers['pool3_3x3_s2'] = max_pool(layers['inception_3b_output']['concat'], k=3, s=2)\n\n with tf.name_scope('inception_4'):\n layers['inception_4a_output'] = inception_block(layers['pool3_3x3_s2'], '4a', weights, biases)\n layers['inception_4b_output'] = inception_block(layers['inception_4a_output']['concat'], '4b', weights, biases)\n layers['inception_4c_output'] = inception_block(layers['inception_4b_output']['concat'], '4c', weights, biases)\n layers['inception_4d_output'] = inception_block(layers['inception_4c_output']['concat'], '4d', weights, biases)\n layers['inception_4e_output'] = inception_block(layers['inception_4d_output']['concat'], '4e', weights, biases)\n layers['pool4_3x3_s2'] = max_pool(layers['inception_4e_output']['concat'], k=3, s=2)\n\n with tf.name_scope('inception_5'):\n layers['inception_5a_output'] = inception_block(layers['pool4_3x3_s2'], '5a', weights, biases)\n layers['inception_5b_output'] = inception_block(layers['inception_5a_output']['concat'], '5b', weights, biases)\n layers['pool5_7x7_s1'] = tf.nn.avg_pool(layers['inception_5b_output']['concat'], [1,7,7,1], [1,1,1,1], padding='VALID')\n layers['pool5_7x7_s1'] = tf.reshape(layers['pool5_7x7_s1'], [-1,1024])\n\n with tf.name_scope('fc'):\n layers['loss3_classifier'] = fully_connected(layers['pool5_7x7_s1'], weights['loss3_classifier'], biases['loss3_classifier'])\n layers['prob'] = tf.nn.softmax(layers['loss3_classifier'])\n\n return layers\n\ndef googlenet(input):\n #weigths and biases for tensorflow\n weights_path = os.path.join('weights', 'googlenet.npy')\n if not os.path.isfile(weights_path):\n print('Downloading GoogLeNet weights...')\n urlretrieve (\"https://www.dropbox.com/s/kzlgksuginkatb5/googlenet.npy?raw=1\", os.path.join('weights', 'googlenet.npy'))\n net = np.load('weights/googlenet.npy',allow_pickle=True,encoding=\"latin1\").item()\n weights = {}\n biases = {}\n for name in net.keys():\n weights[name] = tf.Variable(tf.constant(net[name]['weights']), dtype='float32' ,name=name+'_weights', trainable=False)\n biases[name] = tf.Variable(tf.constant(net[name]['biases']), dtype='float32' ,name=name+'_biases', trainable=False)\n\n return model(input, weights, biases)\n" ]
[ [ "tensorflow.nn.lrn", "numpy.load", "tensorflow.reshape", "tensorflow.constant", "tensorflow.name_scope", "tensorflow.nn.softmax", "tensorflow.nn.avg_pool" ] ]
brandontrabucco/allenact
[ "f75ed98f7d5bcc87b460f0c13e24dafc18edc895" ]
[ "allenact_plugins/habitat_plugin/habitat_tasks.py" ]
[ "from abc import ABC\nfrom typing import Tuple, List, Dict, Any, Optional, Union, Sequence, cast\n\nimport gym\nimport numpy as np\nfrom habitat.sims.habitat_simulator.actions import HabitatSimActions\nfrom habitat.sims.habitat_simulator.habitat_simulator import HabitatSim\nfrom habitat.tasks.nav.shortest_path_follower import ShortestPathFollower\n\nfrom allenact.base_abstractions.misc import RLStepResult\nfrom allenact.base_abstractions.sensor import Sensor\nfrom allenact.base_abstractions.task import Task\nfrom allenact.utils.system import get_logger\nfrom allenact_plugins.habitat_plugin.habitat_constants import (\n MOVE_AHEAD,\n ROTATE_LEFT,\n ROTATE_RIGHT,\n END,\n LOOK_UP,\n LOOK_DOWN,\n)\nfrom allenact_plugins.habitat_plugin.habitat_environment import HabitatEnvironment\n\n\nclass HabitatTask(Task[HabitatEnvironment], ABC):\n def __init__(\n self,\n env: HabitatEnvironment,\n sensors: List[Sensor],\n task_info: Dict[str, Any],\n max_steps: int,\n **kwargs\n ) -> None:\n super().__init__(\n env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs\n )\n\n self._last_action: Optional[str] = None\n self._last_action_ind: Optional[int] = None\n self._last_action_success: Optional[bool] = None\n self._actions_taken: List[str] = []\n self._positions = []\n pos = self.get_observations()[\"agent_position_and_rotation\"]\n self._positions.append(\n {\"x\": pos[0], \"y\": pos[1], \"z\": pos[2], \"rotation\": pos[3]}\n )\n ep = self.env.get_current_episode()\n # Extract the scene name from the scene path and append the episode id to generate\n # a globally unique episode_id\n self._episode_id = ep.scene_id[-15:-4] + \"_\" + ep.episode_id\n\n @property\n def last_action(self):\n return self._last_action\n\n @last_action.setter\n def last_action(self, value: str):\n self._last_action = value\n\n @property\n def last_action_success(self):\n return self._last_action_success\n\n @last_action_success.setter\n def last_action_success(self, value: Optional[bool]):\n self._last_action_success = value\n\n def render(self, mode: str = \"rgb\", *args, **kwargs) -> np.ndarray:\n if mode == \"rgb\":\n return self.env.current_frame[\"rgb\"]\n elif mode == \"depth\":\n return self.env.current_frame[\"depth\"]\n else:\n raise NotImplementedError()\n\n\nclass PointNavTask(Task[HabitatEnvironment]):\n _actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT, END)\n\n def __init__(\n self,\n env: HabitatEnvironment,\n sensors: List[Sensor],\n task_info: Dict[str, Any],\n max_steps: int,\n failed_end_reward: float = 0.0,\n **kwargs\n ) -> None:\n super().__init__(\n env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs\n )\n self._took_end_action: bool = False\n self._success: Optional[bool] = False\n self._subsampled_locations_from_which_obj_visible = None\n\n # Get the geodesic distance to target from the environemnt and make sure it is\n # a valid value\n self.last_geodesic_distance = self.current_geodesic_dist_to_target()\n self.start_distance = self.last_geodesic_distance\n assert self.last_geodesic_distance is not None\n\n # noinspection PyProtectedMember\n self._shortest_path_follower = ShortestPathFollower(\n cast(HabitatSim, env.env.sim), env.env._config.TASK.SUCCESS_DISTANCE, False\n )\n self._shortest_path_follower.mode = \"geodesic_path\"\n\n self._rewards: List[float] = []\n self._metrics = None\n self.failed_end_reward = failed_end_reward\n\n def current_geodesic_dist_to_target(self) -> Optional[float]:\n metrics = self.env.env.get_metrics()\n if metrics[\"distance_to_goal\"] is None:\n habitat_env = self.env.env\n habitat_env.task.measurements.update_measures(\n episode=habitat_env.current_episode, action=None, task=habitat_env.task\n )\n metrics = self.env.env.get_metrics()\n\n return metrics[\"distance_to_goal\"]\n\n @property\n def action_space(self):\n return gym.spaces.Discrete(len(self._actions))\n\n def reached_terminal_state(self) -> bool:\n return self.env.env.episode_over\n\n @classmethod\n def class_action_names(cls, **kwargs) -> Tuple[str, ...]:\n return cls._actions\n\n def close(self) -> None:\n self.env.stop()\n\n def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:\n assert isinstance(action, int)\n action = cast(int, action)\n\n action_str = self.class_action_names()[action]\n\n self.env.step({\"action\": action_str})\n\n if action_str == END:\n self._took_end_action = True\n self._success = self._is_goal_in_range()\n self.last_action_success = self._success\n else:\n self.last_action_success = self.env.last_action_success\n\n step_result = RLStepResult(\n observation=self.get_observations(),\n reward=self.judge(),\n done=self.is_done(),\n info={\"last_action_success\": self.last_action_success},\n )\n return step_result\n\n def render(self, mode: str = \"rgb\", *args, **kwargs) -> np.ndarray:\n assert mode in [\"rgb\", \"depth\"], \"only rgb and depth rendering is implemented\"\n return self.env.current_frame[\"rgb\"]\n\n def _is_goal_in_range(self) -> bool:\n return (\n self.current_geodesic_dist_to_target() <= self.task_info[\"distance_to_goal\"]\n )\n\n def judge(self) -> float:\n reward = -0.01\n\n new_geodesic_distance = self.current_geodesic_dist_to_target()\n if self.last_geodesic_distance is None:\n self.last_geodesic_distance = new_geodesic_distance\n\n if self.last_geodesic_distance is not None:\n if (\n new_geodesic_distance is None\n or new_geodesic_distance in [float(\"-inf\"), float(\"inf\")]\n or np.isnan(new_geodesic_distance)\n ):\n new_geodesic_distance = self.last_geodesic_distance\n delta_distance_reward = self.last_geodesic_distance - new_geodesic_distance\n reward += delta_distance_reward\n self.last_geodesic_distance = new_geodesic_distance\n\n if self.is_done():\n reward += 10.0 if self._success else self.failed_end_reward\n else:\n get_logger().warning(\"Could not get geodesic distance from habitat env.\")\n\n self._rewards.append(float(reward))\n\n return float(reward)\n\n def metrics(self) -> Dict[str, Any]:\n if not self.is_done():\n return {}\n else:\n _metrics = self.env.env.get_metrics()\n metrics = {\n \"success\": 1 * self._success,\n \"ep_length\": self.num_steps_taken(),\n \"reward\": np.sum(self._rewards),\n \"spl\": _metrics[\"spl\"] if _metrics[\"spl\"] is not None else 0.0,\n \"dist_to_target\": self.current_geodesic_dist_to_target(),\n }\n self._rewards = []\n return metrics\n\n def query_expert(self, **kwargs) -> Tuple[int, bool]:\n if self._is_goal_in_range():\n return self.class_action_names().index(END), True\n\n target = self.task_info[\"target\"]\n habitat_action = self._shortest_path_follower.get_next_action(target)\n if habitat_action == HabitatSimActions.MOVE_FORWARD:\n return self.class_action_names().index(MOVE_AHEAD), True\n elif habitat_action == HabitatSimActions.TURN_LEFT:\n return self.class_action_names().index(ROTATE_LEFT), True\n elif habitat_action == HabitatSimActions.TURN_RIGHT:\n return self.class_action_names().index(ROTATE_RIGHT), True\n else:\n return 0, False\n\n\nclass ObjectNavTask(HabitatTask):\n _actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT, END, LOOK_UP, LOOK_DOWN)\n\n def __init__(\n self,\n env: HabitatEnvironment,\n sensors: List[Sensor],\n task_info: Dict[str, Any],\n max_steps: int,\n **kwargs\n ) -> None:\n super().__init__(\n env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs\n )\n self._took_end_action: bool = False\n self._success: Optional[bool] = False\n self._subsampled_locations_from_which_obj_visible = None\n\n # Get the geodesic distance to target from the environemnt and make sure it is\n # a valid value\n self.last_geodesic_distance = self.current_geodesic_dist_to_target()\n assert not (\n self.last_geodesic_distance is None\n or self.last_geodesic_distance in [float(\"-inf\"), float(\"inf\")]\n or np.isnan(self.last_geodesic_distance)\n ), \"Bad geodesic distance\"\n self._min_distance_to_goal = self.last_geodesic_distance\n self._num_invalid_actions = 0\n\n # noinspection PyProtectedMember\n self._shortest_path_follower = ShortestPathFollower(\n env.env.sim, env.env._config.TASK.SUCCESS_DISTANCE, False\n )\n self._shortest_path_follower.mode = \"geodesic_path\"\n\n self._rewards: List[float] = []\n self._metrics = None\n self.task_info[\"episode_id\"] = self._episode_id\n self.task_info[\"target_position\"] = {\n \"x\": self.task_info[\"target\"][0],\n \"y\": self.task_info[\"target\"][1],\n \"z\": self.task_info[\"target\"][2],\n }\n\n self._coverage_map = np.zeros((150, 150))\n\n @property\n def action_space(self):\n return gym.spaces.Discrete(len(self._actions))\n\n def reached_terminal_state(self) -> bool:\n return self.env.env.episode_over\n\n @classmethod\n def class_action_names(cls, **kwargs) -> Tuple[str, ...]:\n return cls._actions\n\n def action_names(self, **kwargs) -> Tuple[str, ...]:\n return self._actions\n\n def close(self) -> None:\n self.env.stop()\n\n def current_geodesic_dist_to_target(self) -> Optional[float]:\n metrics = self.env.env.get_metrics()\n if metrics[\"distance_to_goal\"] is None:\n habitat_env = self.env.env\n habitat_env.task.measurements.update_measures(\n episode=habitat_env.current_episode, action=None, task=habitat_env.task\n )\n metrics = self.env.env.get_metrics()\n\n return metrics[\"distance_to_goal\"]\n\n def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult:\n assert isinstance(action, int)\n action = cast(int, action)\n\n old_pos = self.get_observations()[\"agent_position_and_rotation\"]\n\n action_str = self.action_names()[action]\n self._actions_taken.append(action_str)\n\n self.env.step({\"action\": action_str})\n\n # if action_str != END:\n # self.env.step({\"action\": action_str})\n\n # if self.env.env.get_metrics()['distance_to_goal'] <= 0.2:\n # self._took_end_action = True\n # self._success = self.env.env.get_metrics()['distance_to_goal'] <= 0.2\n # self.last_action_success = self._success\n # else:\n # self.last_action_success = self.env.last_action_success\n\n if action_str == END:\n self._took_end_action = True\n self._success = self._is_goal_in_range()\n self.last_action_success = self._success\n else:\n self.last_action_success = self.env.last_action_success\n\n step_result = RLStepResult(\n observation=self.get_observations(),\n reward=self.judge(),\n done=self.is_done(),\n info={\"last_action_success\": self.last_action_success},\n )\n new_pos = self.get_observations()[\"agent_position_and_rotation\"]\n if np.all(old_pos == new_pos):\n self._num_invalid_actions += 1\n\n pos = self.get_observations()[\"agent_position_and_rotation\"]\n self._positions.append(\n {\"x\": pos[0], \"y\": pos[1], \"z\": pos[2], \"rotation\": pos[3]}\n )\n\n return step_result\n\n def render(self, mode: str = \"rgb\", *args, **kwargs) -> np.ndarray:\n assert mode in [\"rgb\", \"depth\"], \"only rgb and depth rendering is implemented\"\n return self.env.current_frame[\"rgb\"]\n\n def _is_goal_in_range(self) -> bool:\n # The habitat simulator will return an SPL value of 0.0 whenever the goal is not in range\n return bool(self.env.env.get_metrics()[\"spl\"])\n\n def judge(self) -> float:\n # Set default reward\n reward = -0.01\n\n # Get geodesic distance reward\n new_geodesic_distance = self.current_geodesic_dist_to_target()\n self._min_distance_to_goal = min(\n new_geodesic_distance, self._min_distance_to_goal\n )\n if (\n new_geodesic_distance is None\n or new_geodesic_distance in [float(\"-inf\"), float(\"inf\")]\n or np.isnan(new_geodesic_distance)\n ):\n new_geodesic_distance = self.last_geodesic_distance\n delta_distance_reward = self.last_geodesic_distance - new_geodesic_distance\n reward += delta_distance_reward\n\n if self._took_end_action:\n reward += 10.0 if self._success else 0.0\n\n # Get success reward\n self._rewards.append(float(reward))\n self.last_geodesic_distance = new_geodesic_distance\n\n # # Get coverage reward\n # pos = self.get_observations()[\"agent_position_and_rotation\"]\n # # align current position with center of map\n # x = int(pos[0] + 75)\n # y = int(pos[2] + 75)\n # if self._coverage_map[x, y] == 0:\n # self._coverage_map[x, y] = 1\n # reward += 0.1\n # else:\n # reward -= 0.0\n\n return float(reward)\n\n def metrics(self) -> Dict[str, Any]:\n self.task_info[\"taken_actions\"] = self._actions_taken\n self.task_info[\"action_names\"] = self.action_names()\n self.task_info[\"followed_path\"] = self._positions\n if not self.is_done():\n return {}\n else:\n _metrics = self.env.env.get_metrics()\n metrics = {\n \"success\": self._success,\n \"ep_length\": self.num_steps_taken(),\n \"total_reward\": np.sum(self._rewards),\n \"spl\": _metrics[\"spl\"] if _metrics[\"spl\"] is not None else 0.0,\n \"min_distance_to_target\": self._min_distance_to_goal,\n \"num_invalid_actions\": self._num_invalid_actions,\n \"task_info\": self.task_info,\n }\n self._rewards = []\n return metrics\n\n def query_expert(self, **kwargs) -> Tuple[int, bool]:\n if self._is_goal_in_range():\n return self.class_action_names().index(END), True\n\n target = self.task_info[\"target\"]\n action = self._shortest_path_follower.get_next_action(target)\n return action, action is not None\n" ]
[ [ "numpy.all", "numpy.sum", "numpy.isnan", "numpy.zeros" ] ]
ApfelPresse/CGSearchRace-NEAT
[ "95e832e31f5531649d7f44df2e799d2bc9d251a6" ]
[ "simulation.py" ]
[ "import math\nimport time\n\nimport numpy as np\nfrom CGSearchRace.Constants import Constants\nfrom CGSearchRace.Referee import Referee\nfrom CGSearchRace.Tracks import tracks\n\nfrom visualize_run import plot_current_frame, convert_to_gif\n\n\ndef min_max_scaler(value: float, min_value: float, max_value: float, range_from: float = -6,\n range_to: float = 6):\n width = range_to - range_from\n return (value - min_value) / (max_value - min_value) * width + range_from\n\n\ndef distance_line_and_point(x, y, check_x, check_y, angle):\n length = 10000\n endy = y + length * math.sin(angle)\n endx = x + length * math.cos(angle)\n p1 = np.array([x, y])\n p2 = np.array([endx, endy])\n p3 = np.array([check_x, check_y])\n\n d = np.linalg.norm(np.cross(p2 - p1, p1 - p3)) / np.linalg.norm(p2 - p1)\n if d < Constants.CheckpointRadius:\n d = 0\n return d\n\n\ndef simulate(net, create_gif=False):\n Constants.MAX_TIME = 300\n Constants.Laps = 2\n\n total_score = 0\n tr = [0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, -1, -2, -3, -4, -5, 31]\n # tr = [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, -1, -2, -3, -4, -5, 31]\n track_subset = [tracks[i] for i in tr]\n for i, track in enumerate(track_subset):\n ref = Referee(track)\n total_score += run_track(net, ref, create_gif)\n return -total_score\n\n\ndef distance(p1, p2):\n return math.sqrt(((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))\n\n\ndef run_track(net, ref, create_gif):\n offset = 6000\n max_x = Constants.Width + offset\n max_y = Constants.Height + offset\n max_thrust = Constants.CAR_MAX_THRUST\n max_distance = math.sqrt(max_x ** 2 + max_y ** 2)\n images = []\n\n # last_checkpoint = None\n # drive_thru_error = 0\n # activate = True\n #\n # drive_thru_error_list = []\n\n # last_offset_x = 0\n # last_offset_y = 0\n # last_thrust = 0\n\n check_distances = []\n for i in range(Constants.MAX_TIME):\n cp = ref.game.checkpoints\n cp_id1 = ref.game.get_next_checkpoint_id()\n cp_id2 = ref.game.get_next_checkpoint_id(2)\n\n # if not last_checkpoint or last_checkpoint != cp_id1:\n # last_checkpoint = cp_id1\n # drive_thru_error_list.append(drive_thru_error)\n # drive_thru_error = 0\n # activate = False\n\n dist_check1 = distance([cp[cp_id1].x, cp[cp_id1].y], [ref.game.car.x, ref.game.car.y])\n\n # if dist_check1 < 3000:\n # activate = True\n\n # if activate:\n # drive_thru_error += (dist_check1 // 10000)\n # drive_thru_error = drive_thru_error * 2\n\n input_net = create_net_input({\n # \"last_offset_x\": last_offset_x,\n # \"last_offset_y\": last_offset_y,\n # \"last_thrust\": last_thrust,\n \"max_distance\": max_distance,\n \"max_x\": max_x,\n \"max_y\": max_y,\n \"offset\": offset,\n \"angle\": ref.game.car.angle,\n \"vx\": ref.game.car.vx,\n \"vy\": ref.game.car.vy,\n \"car_x\": ref.game.car.x,\n \"car_y\": ref.game.car.y,\n \"check1_x\": cp[cp_id1].x,\n \"check1_y\": cp[cp_id1].y,\n \"check2_x\": cp[cp_id2].x,\n \"check2_y\": cp[cp_id2].y,\n \"dist_check1\": dist_check1,\n \"dist_check2\": distance([cp[cp_id2].x, cp[cp_id2].y], [ref.game.car.x, ref.game.car.y]),\n \"angle_distance_check1\": distance_line_and_point(ref.game.car.x, ref.game.car.y, cp[cp_id1].x, cp[cp_id1].y,\n ref.game.car.angle),\n })\n error_dist = int(distance([cp[cp_id1].x, cp[cp_id1].y], [ref.game.car.x, ref.game.car.y]))\n if error_dist < 2:\n error_dist = 2\n check_distances.append(error_dist)\n\n input_net = np.round(input_net, decimals=4).tolist()\n predict = net.activate(input_net)\n\n offset_x = (int(predict[0] * 3000) * 2) - 3000\n offset_y = (int(predict[1] * 3000) * 2) - 3000\n input_thrust = int(predict[2] * max_thrust)\n\n ref.game.input = f\"{cp[cp_id1].x + offset_x} {cp[cp_id1].y + offset_y} {input_thrust}\"\n ref.game_turn()\n\n if create_gif and i % 2 == 0:\n images.append(plot_current_frame(cp, cp_id1, ref.game.car))\n\n if ref.game.isDone:\n if create_gif:\n convert_to_gif(f\"track_{time.time()}\", images)\n\n # drive_te = int(np.sum(drive_thru_error_list))\n distances = int(np.sum(check_distances) // 3000)\n bonus = 100 if (i + 1) != Constants.MAX_TIME else 0\n return (i + 1) ** 3 - ref.game.currentCheckpoint ** 2 - bonus + distances\n\n\ndef create_net_input(params):\n input_net = [\n min_max_scaler(params[\"check1_x\"], -params[\"offset\"], params[\"max_x\"]),\n min_max_scaler(params[\"check1_y\"], -params[\"offset\"], params[\"max_y\"]),\n min_max_scaler(params[\"car_x\"], -params[\"offset\"], params[\"max_x\"]),\n min_max_scaler(params[\"car_y\"], -params[\"offset\"], params[\"max_y\"]),\n min_max_scaler(params[\"dist_check1\"], 0, params[\"max_distance\"]),\n min_max_scaler(params[\"dist_check2\"], 0, params[\"max_distance\"]),\n min_max_scaler(params[\"angle_distance_check1\"], 0, params[\"max_distance\"]),\n min_max_scaler(params[\"vx\"], -1000, 2000),\n min_max_scaler(params[\"vy\"], -1000, 2000)\n ]\n return input_net\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.round", "numpy.sum", "numpy.cross" ] ]
Kitsunetic/3detr
[ "2ae2e23613a5777a94ec5a6b376ce3e5a3f0ef7c" ]
[ "utils/random_cuboid.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\nimport numpy as np\n\n\ndef check_aspect(crop_range, aspect_min):\n xy_aspect = np.min(crop_range[:2]) / np.max(crop_range[:2])\n xz_aspect = np.min(crop_range[[0, 2]]) / np.max(crop_range[[0, 2]])\n yz_aspect = np.min(crop_range[1:]) / np.max(crop_range[1:])\n return (xy_aspect >= aspect_min) or (xz_aspect >= aspect_min) or (yz_aspect >= aspect_min)\n\n\nclass RandomCuboid(object):\n \"\"\"\n RandomCuboid augmentation from DepthContrast [https://arxiv.org/abs/2101.02691]\n We slightly modify this operation to account for object detection.\n This augmentation randomly crops a cuboid from the input and\n ensures that the cropped cuboid contains at least one bounding box\n \"\"\"\n\n def __init__(\n self,\n min_points,\n aspect=0.8,\n min_crop=0.5,\n max_crop=1.0,\n box_filter_policy=\"center\",\n ):\n self.aspect = aspect\n self.min_crop = min_crop\n self.max_crop = max_crop\n self.min_points = min_points\n self.box_filter_policy = box_filter_policy\n\n def __call__(self, point_cloud, target_boxes, per_point_labels=None):\n range_xyz = np.max(point_cloud[:, 0:3], axis=0) - np.min(point_cloud[:, 0:3], axis=0)\n\n for _ in range(100):\n crop_range = self.min_crop + np.random.rand(3) * (self.max_crop - self.min_crop)\n if not check_aspect(crop_range, self.aspect):\n continue\n\n sample_center = point_cloud[np.random.choice(len(point_cloud)), 0:3]\n\n new_range = range_xyz * crop_range / 2.0\n\n max_xyz = sample_center + new_range\n min_xyz = sample_center - new_range\n\n upper_idx = np.sum((point_cloud[:, 0:3] <= max_xyz).astype(np.int32), 1) == 3\n lower_idx = np.sum((point_cloud[:, 0:3] >= min_xyz).astype(np.int32), 1) == 3\n\n new_pointidx = (upper_idx) & (lower_idx)\n\n if np.sum(new_pointidx) < self.min_points:\n continue\n\n new_point_cloud = point_cloud[new_pointidx, :]\n\n # filtering policy is the only modification from DepthContrast\n if self.box_filter_policy == \"center\":\n # remove boxes whose center does not lie within the new_point_cloud\n new_boxes = target_boxes\n if target_boxes.sum() > 0: # ground truth contains no bounding boxes. Common in SUNRGBD.\n box_centers = target_boxes[:, 0:3]\n new_pc_min_max = np.min(new_point_cloud[:, 0:3], axis=0), np.max(new_point_cloud[:, 0:3], axis=0)\n keep_boxes = np.logical_and(\n np.all(box_centers >= new_pc_min_max[0], axis=1),\n np.all(box_centers <= new_pc_min_max[1], axis=1),\n )\n if keep_boxes.sum() == 0:\n # current data augmentation removes all boxes in the pointcloud. fail!\n continue\n new_boxes = target_boxes[keep_boxes]\n if per_point_labels is not None:\n new_per_point_labels = [x[new_pointidx] for x in per_point_labels]\n else:\n new_per_point_labels = None\n # if we are here, all conditions are met. return boxes\n return new_point_cloud, new_boxes, new_per_point_labels\n\n # fallback\n return point_cloud, target_boxes, per_point_labels\n" ]
[ [ "numpy.max", "numpy.random.rand", "numpy.sum", "numpy.min", "numpy.all" ] ]
JBlaschke/dials
[ "83fbc79864411b67933bd9708bc2d5f7d8f0859b" ]
[ "command_line/shadow_plot.py" ]
[ "# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1\n\nfrom __future__ import absolute_import, division, print_function\n\nimport json\nimport sys\n\nimport libtbx\nimport libtbx.phil\nfrom scitbx.array_family import flex\n\nfrom dials.util import Sorry\n\nhelp_message = \"\"\"\nGenerate a 1d or 2d goniometer detector shadow plot for a given experiment list.\n\nExamples::\n\n dials.shadow_plot models.expt\n\n dials.shadow_plot models.expt mode=2d\n\"\"\"\n\nphil_scope = libtbx.phil.parse(\n \"\"\"\noscillation_range = None\n .type = floats(size=2)\nstep_size = auto\n .type = float(value_min=0)\ny_max = None\n .type = float(value_min=0, value_max=100)\nmode = *1d 2d\n .type = choice\noutput {\n plot = scan_shadow_plot.png\n .type = path\n json = None\n .type = path\n size_inches = None\n .type = floats(value_min=0, size=2)\n}\n\"\"\"\n)\n\n\ndef run(args):\n from dials.util.options import OptionParser, flatten_experiments\n\n usage = \"dials.shadow_plot [options] models.expt\"\n\n parser = OptionParser(\n usage=usage,\n phil=phil_scope,\n read_experiments=True,\n check_format=True,\n epilog=help_message,\n )\n\n params, options = parser.parse_args(show_diff_phil=True)\n experiments = flatten_experiments(params.input.experiments)\n\n if len(experiments) == 0:\n parser.print_help()\n sys.exit(0)\n\n assert len(experiments) == 1\n imagesets = experiments.imagesets()\n\n imageset = imagesets[0]\n goniometer = imageset.get_goniometer()\n detector = imageset.get_detector()\n scan = imageset.get_scan()\n masker = imageset.masker()\n if masker is None:\n raise Sorry(\"Goniometer model does not support shadowing.\")\n angles = goniometer.get_angles()\n names = goniometer.get_names()\n scan_axis = goniometer.get_scan_axis()\n phi = angles[0]\n\n if params.step_size is libtbx.Auto:\n if params.mode == \"1d\":\n step = scan.get_oscillation()[1]\n else:\n step = 10\n else:\n step = params.step_size\n\n if params.mode == \"1d\":\n if params.oscillation_range is not None:\n start, end = params.oscillation_range\n else:\n start, end = scan.get_oscillation_range()\n\n scan_points = flex.double(libtbx.utils.frange(start, end, step=step))\n n_px_shadowed = flex.double(scan_points.size(), 0)\n n_px_tot = flex.double(scan_points.size(), 0)\n\n assert len(angles) == 3\n for i, scan_angle in enumerate(scan_points):\n shadow = masker.project_extrema(detector, scan_angle)\n for p_id in range(len(detector)):\n px_x, px_y = detector[p_id].get_image_size()\n n_px_tot[i] += px_x * px_y\n if shadow[p_id].size() < 4:\n continue\n n_px_shadowed[i] += polygon_area(shadow[p_id])\n\n else:\n kappa_values = flex.double(libtbx.utils.frange(0, 360, step=step))\n omega_values = flex.double(libtbx.utils.frange(0, 360, step=step))\n grid = flex.grid(kappa_values.size(), omega_values.size())\n n_px_shadowed = flex.double(grid, 0)\n n_px_tot = flex.double(grid, 0)\n\n assert len(angles) == 3\n for i, kappa in enumerate(kappa_values):\n for j, omega in enumerate(omega_values):\n masker.set_goniometer_angles((phi, kappa, omega))\n masker.extrema_at_scan_angle(omega)\n shadow = masker.project_extrema(detector, omega)\n for p_id in range(len(detector)):\n px_x, px_y = detector[p_id].get_image_size()\n n_px_tot[i, j] += px_x * px_y\n if shadow[p_id].size() < 4:\n continue\n n_px_shadowed[i, j] += polygon_area(shadow[p_id])\n\n fraction_shadowed = n_px_shadowed / n_px_tot\n\n if params.output.json is not None:\n if params.mode == \"2d\":\n raise Sorry(\"json output not supported for mode=2d\")\n\n print(\"Writing json output to %s\" % params.output.json)\n d = {\n \"scan_points\": list(scan_points),\n \"fraction_shadowed\": list(fraction_shadowed),\n }\n with open(params.output.json, \"w\") as f:\n json.dump(d, f)\n\n if params.output.plot is not None:\n import matplotlib\n\n matplotlib.use(\"Agg\")\n from matplotlib import pyplot as plt\n\n plt.style.use(\"ggplot\")\n\n if params.mode == \"1d\":\n plt.plot(\n scan_points.as_numpy_array(), fraction_shadowed.as_numpy_array() * 100\n )\n plt.xlabel(\"%s angle (degrees)\" % names[scan_axis])\n plt.ylabel(\"Shadowed area (%)\")\n if params.y_max is not None:\n plt.ylim(0, params.y_max)\n else:\n plt.ylim(0, plt.ylim()[1])\n else:\n fig = plt.imshow(\n fraction_shadowed.as_numpy_array() * 100, interpolation=\"bicubic\"\n )\n plt.xlabel(\"%s angle (degrees)\" % names[2])\n plt.ylabel(\"%s angle (degrees)\" % names[1])\n plt.xlim(0, 360 / step - 0.5)\n plt.ylim(0, 360 / step - 0.5)\n\n ticks = (0, 50, 100, 150, 200, 250, 300, 350)\n fig.axes.xaxis.set_major_locator(\n matplotlib.ticker.FixedLocator([k / step for k in ticks])\n )\n fig.axes.yaxis.set_major_locator(\n matplotlib.ticker.FixedLocator([k / step for k in ticks])\n )\n fig.axes.set_xticklabels([\"%.0f\" % k for k in ticks])\n fig.axes.set_yticklabels([\"%.0f\" % k for k in ticks])\n cbar = plt.colorbar()\n cbar.set_label(\"Shadowed area (%)\")\n\n if params.output.size_inches is not None:\n fig = plt.gcf()\n fig.set_size_inches(params.output.size_inches)\n plt.tight_layout()\n print(\"Saving plot to %s\" % params.output.plot)\n plt.savefig(params.output.plot)\n\n\ndef polygon_area(points):\n # http://mathworld.wolfram.com/PolygonArea.html\n x0, y0 = points.parts()\n x1 = x0[1:]\n x1.append(x0[0])\n y1 = y0[1:]\n y1.append(y0[0])\n\n return 0.5 * abs(flex.sum(x0 * y1 - x1 * y0))\n\n\nif __name__ == \"__main__\":\n run(sys.argv[1:])\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.style.use", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.gcf", "matplotlib.ticker.FixedLocator" ] ]
Ronalmoo/pytorch_tutorials
[ "962c256e41d0bdf1003d4f6f03445c9f151dcffc" ]
[ "deep_learning_with_pytorch_a_60_minute_blitz/neural_networks.py" ]
[ "# Define the network\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n # 1 input image channel, 6 output channels, 3x3 square convolution\n # kernel\n self.conv1 = nn.Conv2d(1, 6, 3)\n self.conv2 = nn.Conv2d(6, 16, 3)\n # an affine operation: y = Wx + b\n self.fc1 = nn.Linear(16 * 6 * 6, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n # Max pooling over a (2, 2) window\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n # If the size is a square you can only specify a single number\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(-1, self.num_flat_features(x))\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n\nnet = Net()\nprint(net)\n\n\nparams = list(net.parameters())\nprint(len(params))\nprint(params[0].size()) # conv1's weight\n\n# Let try a random 32X32 input.\n\ninput = torch.randn(1, 1, 32, 32)\nout = net(input)\nprint(out)\n\nnet.zero_grad()\nout.backward(torch.randn(1, 10))\n\n\n# Loss Function\n\noutput = net(input)\nprint(output)\ntarget = torch.randn(10)\nprint(target)\ntarget = target.view(1, -1)\nprint(target)\ncriterion = nn.MSELoss()\n\nloss = criterion(output, target)\nprint(loss)\n\nprint(loss.grad_fn) # MSELoss\nprint(loss.grad_fn.next_functions[0][0]) # Linear\nprint(loss.grad_fn.next_functions[0][0].next_functions[0][0]) # ReLU\n\n\n# Backprop\nnet.zero_grad() # zeros the gradient buffers of all parameters\n\nprint('conv1.bias.grad before backward')\nprint(net.conv1.bias.grad)\n\nloss.backward()\n\nprint('conv1.bias.grad after backward')\nprint(net.conv1.bias.grad)\n\n\n# Update the weights\n# The simplest update rule used in practice is the Stochastic Gradient Descent(SGD)\n\n# weight = weight - learning_rate * gradient\n\n# in python code:\nlearning_rate = 0.01\nfor f in net.parameters():\n f.data.sub_(f.grad.data * learning_rate)\n\n# Using torch.optim\n# import torch.optim as optim\n\n# create your optimizer\noptimizer = optim.SGD(net.parameters(), lr=0.01)\n\n# in your training loop:\noptimizer.zero_grad() # zero the gradient buffers\noutput = net(input)\nloss = criterion(output, target)\nloss.backward()\noptimizer.step() # Does the update\n\n" ]
[ [ "torch.nn.MSELoss", "torch.nn.Linear", "torch.nn.Conv2d", "torch.randn" ] ]
bradyrx/climpred
[ "66d22ecf7351f205f9f510f4eac25e4431fac11f" ]
[ "climpred/tests/test_metrics.py" ]
[ "import numpy as np\nimport pytest\nimport xarray as xr\nimport xskillscore as xs\nfrom xarray.testing import assert_allclose\n\nfrom climpred.bootstrap import bootstrap_perfect_model\nfrom climpred.comparisons import PM_COMPARISONS\nfrom climpred.metrics import __ALL_METRICS__ as all_metrics, Metric, __pearson_r\nfrom climpred.prediction import compute_hindcast, compute_perfect_model\n\n\ndef my_mse_function(forecast, verif, dim=None, **metric_kwargs):\n # function\n return ((forecast - verif) ** 2).mean(dim)\n\n\nmy_mse = Metric(\n name=\"mse\",\n function=my_mse_function,\n positive=True,\n probabilistic=False,\n unit_power=2,\n long_name=\"MSE\",\n aliases=[\"mSe\", \"<<<SE\"],\n)\n\nITERATIONS = 2\n\n\n@pytest.mark.parametrize(\"comparison\", PM_COMPARISONS)\ndef test_custom_metric_passed_to_compute(\n PM_da_initialized_1d, PM_da_control_1d, comparison\n):\n \"\"\"Test custom metric in compute_perfect_model.\"\"\"\n actual = compute_perfect_model(\n PM_da_initialized_1d,\n PM_da_control_1d,\n comparison=comparison,\n metric=my_mse,\n dim=\"init\",\n )\n\n expected = compute_perfect_model(\n PM_da_initialized_1d,\n PM_da_control_1d,\n comparison=comparison,\n metric=\"mse\",\n dim=\"init\",\n )\n\n assert_allclose(actual, expected)\n\n\n@pytest.mark.slow\ndef test_custom_metric_passed_to_bootstrap_compute(\n PM_da_initialized_1d, PM_da_control_1d\n):\n \"\"\"Test custom metric in bootstrap_perfect_model.\"\"\"\n comparison = \"e2c\"\n dim = \"init\"\n np.random.seed(42)\n actual = bootstrap_perfect_model(\n PM_da_initialized_1d,\n PM_da_control_1d,\n comparison=comparison,\n metric=my_mse,\n iterations=ITERATIONS,\n dim=dim,\n )\n\n expected = bootstrap_perfect_model(\n PM_da_initialized_1d,\n PM_da_control_1d,\n comparison=comparison,\n metric=\"mse\",\n iterations=ITERATIONS,\n dim=dim,\n )\n\n assert_allclose(actual, expected, rtol=0.1, atol=1)\n\n\n@pytest.mark.parametrize(\"metric\", (\"rmse\", \"mse\"))\ndef test_pm_metric_skipna(PM_da_initialized_3d, PM_da_control_3d, metric):\n \"\"\"Test skipna in compute_perfect_model.\"\"\"\n PM_da_initialized_3d = PM_da_initialized_3d.copy()\n # manipulating data\n PM_da_initialized_3d.values[1:3, 1:4, 1:4, 4:6, 4:6] = np.nan\n\n base = compute_perfect_model(\n PM_da_initialized_3d,\n PM_da_control_3d,\n metric=metric,\n skipna=False,\n dim=\"init\",\n comparison=\"m2e\",\n ).mean(\"member\")\n skipping = compute_perfect_model(\n PM_da_initialized_3d,\n PM_da_control_3d,\n metric=metric,\n skipna=True,\n dim=\"init\",\n comparison=\"m2e\",\n ).mean(\"member\")\n assert ((base - skipping) != 0.0).any()\n assert base.isel(lead=2, x=5, y=5).isnull()\n assert not skipping.isel(lead=2, x=5, y=5).isnull()\n\n\n@pytest.mark.skip(reason=\"comparisons dont work here\")\n@pytest.mark.parametrize(\"metric\", (\"rmse\", \"mse\"))\n@pytest.mark.parametrize(\"comparison\", [\"m2e\", \"m2m\"])\ndef test_pm_metric_weights_m2x(\n PM_da_initialized_3d, PM_da_control_3d, comparison, metric\n):\n \"\"\"Test init weights in compute_perfect_model.\"\"\"\n # distribute weights on initializations\n dim = \"init\"\n base = compute_perfect_model(\n PM_da_initialized_3d,\n PM_da_control_3d,\n dim=dim,\n metric=metric,\n comparison=comparison,\n )\n weights = xr.DataArray(np.arange(1, 1 + PM_da_initialized_3d[dim].size), dims=dim)\n weights = xr.DataArray(\n np.arange(\n 1, 1 + PM_da_initialized_3d[dim].size * PM_da_initialized_3d[\"member\"].size,\n ),\n dims=\"init\",\n )\n\n weighted = compute_perfect_model(\n PM_da_initialized_3d,\n PM_da_control_3d,\n dim=dim,\n comparison=comparison,\n metric=metric,\n weights=weights,\n )\n print((base / weighted).mean([\"x\", \"y\"]))\n # test for difference\n assert (xs.smape(base, weighted, [\"x\", \"y\"]) > 0.01).any()\n\n\n@pytest.mark.parametrize(\"metric\", (\"rmse\", \"mse\"))\ndef test_hindcast_metric_skipna(hind_da_initialized_3d, reconstruction_da_3d, metric):\n \"\"\"Test skipna argument in hindcast_metric.\"\"\"\n # manipulating data with nans\n hind_da_initialized_3d[0, 2, 0, 2] = np.nan\n base = compute_hindcast(\n hind_da_initialized_3d,\n reconstruction_da_3d,\n metric=metric,\n skipna=False,\n dim=\"init\",\n alignment=\"same_inits\",\n )\n skipping = compute_hindcast(\n hind_da_initialized_3d,\n reconstruction_da_3d,\n metric=metric,\n skipna=True,\n dim=\"init\",\n alignment=\"same_inits\",\n )\n div = base / skipping\n assert (div != 1).any()\n\n\n@pytest.mark.skip(reason=\"comparisons dont work here\")\n@pytest.mark.parametrize(\"metric\", (\"rmse\", \"mse\"))\n@pytest.mark.parametrize(\"comparison\", [\"e2o\", \"m2o\"])\ndef test_hindcast_metric_weights_x2r(\n hind_da_initialized_3d, reconstruction_da_3d, comparison, metric\n):\n \"\"\"Test init weights in compute_hindcast.\"\"\"\n dim = \"init\"\n base = compute_hindcast(\n hind_da_initialized_3d,\n reconstruction_da_3d,\n dim=dim,\n metric=metric,\n comparison=comparison,\n )\n weights = xr.DataArray(np.arange(1, 1 + hind_da_initialized_3d[dim].size), dims=dim)\n weights = xr.DataArray(\n np.arange(\n 1,\n 1\n + hind_da_initialized_3d[dim].size * hind_da_initialized_3d[\"member\"].size,\n ),\n dims=\"init\",\n )\n\n weighted = compute_hindcast(\n hind_da_initialized_3d,\n reconstruction_da_3d,\n dim=dim,\n comparison=comparison,\n metric=metric,\n weights=weights,\n )\n print((base / weighted).mean([\"nlon\", \"nlat\"]))\n # test for difference\n assert (xs.smape(base, weighted, [\"nlat\", \"nlon\"]) > 0.01).any()\n\n\ndef test_Metric_display():\n summary = __pearson_r.__repr__()\n assert \"Kind: deterministic\" in summary.split(\"\\n\")[4]\n\n\ndef test_no_repeating_metric_aliases():\n \"\"\"Tests that there are no repeating aliases for metrics, which would overwrite\n the earlier defined metric.\"\"\"\n METRICS = []\n for m in all_metrics:\n if m.aliases is not None:\n for a in m.aliases:\n METRICS.append(a)\n duplicates = set([x for x in METRICS if METRICS.count(x) > 1])\n print(f\"Duplicate metrics: {duplicates}\")\n assert len(duplicates) == 0\n" ]
[ [ "numpy.random.seed", "numpy.arange" ] ]
Miguel-Hombrados/GPK-pytorch
[ "ef2addc5b40fa94a9ff1d8b650ee02d9044790c7" ]
[ "predGPMT.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 28 18:50:45 2021\n\n@author: mahom\n\"\"\"\nimport torch\nimport gpytorch\nfrom to_torch import to_torch\nfrom opt_example import opt_example\ndef predGPMT(test_x,likelihood,model):\n \n test_x = to_torch(test_x)\n Ntest = torch.Tensor.size(test_x,0)\n # Make predictions\n #opt_example(model)\n model.eval()\n likelihood.eval()\n task_num = 24\n \n with torch.no_grad(): #, gpytorch.settings.fast_pred_var():\n predictions = likelihood(model(test_x))\n mean = predictions.mean\n cov = predictions.covariance_matrix\n var = cov.diag().reshape(Ntest,-1)\n\n return mean,var" ]
[ [ "torch.no_grad", "torch.Tensor.size" ] ]
mcsarge/ClassicDIY-MatrixDisplay
[ "2edc6ddbe98ef72b81d2ed25f3d2086c6f57564a" ]
[ "matrixdisplay/core.py" ]
[ "#!/usr/bin/env python\nfrom rgbmatrix import graphics\nfrom samplebase import SampleBase\nimport math\nimport time\nimport numpy as np\n\n\ndef scale_col(val, lo, hi):\n if val < lo:\n return 0\n if val > hi:\n return 255\n return 255 * (val - lo) / (hi - lo)\n\n\ndef rotate(x, y, sin, cos):\n return x * cos - y * sin, x * sin + y * cos\n\ndef drawGraph(data, theOrigin, theHeight, theWidth, o_canvas, f_color, z_color, v_color, self):\n currentData = data\n graphOrigin = theOrigin #remember 0,0 is top left corner\n graphHeight = theHeight\n graphWidth = theWidth\n graphBottom = graphOrigin[1]+graphHeight\n #valueColor = graphics.Color(255, 255, 255)\n #valueColor = v_color\n\n\n edgeSetback = (max(currentData[0:graphWidth]) - min(currentData[0:graphWidth])) * 0.05\n maxValue = max(currentData[0:graphWidth]) + edgeSetback\n minValue = min(currentData[0:graphWidth]) - edgeSetback\n valueRange = maxValue - minValue\n\n\n graphics.DrawLine(o_canvas,\n graphOrigin[0], graphOrigin[1],\n graphOrigin[0], graphOrigin[1]+graphHeight, f_color)\n graphics.DrawLine(o_canvas,\n graphOrigin[0], graphOrigin[1]+graphHeight,\n graphOrigin[0]+graphWidth, graphOrigin[1]+graphHeight, f_color)\n\n print(\"range:\", valueRange, \" edge:\", edgeSetback, \" max:\", maxValue, \" min:\", minValue)\n\n #Draw zero Line\n if (minValue < 0):\n percentOfRange = (0-minValue)/valueRange\n y = int(round(graphBottom - (percentOfRange * graphHeight)))\n print(\"Zero line:\", y)\n graphics.DrawLine(o_canvas, graphOrigin[0], y, graphOrigin[0]+graphWidth, y, z_color)\n\n #First Point\n percentOfRange = (currentData[0]-minValue)/valueRange\n y = int(round(graphBottom - (percentOfRange * graphHeight)))\n o_canvas.SetPixel(graphOrigin[0]+1, y, v_color.red, v_color.green, v_color.blue)\n lasty = y\n\n for x in range(1, graphWidth):\n percentOfRange = (currentData[x]-minValue)/valueRange\n y = int(round(graphBottom - (percentOfRange * graphHeight)))\n print(currentData[x],percentOfRange*100, y)\n graphics.DrawLine(o_canvas, x+graphOrigin[0], lasty, x+graphOrigin[0]+1, y, v_color)\n lasty = y\n\n o_canvas = self.matrix.SwapOnVSync(o_canvas)\n\n \n \n \n\n\n\nclass RunText(SampleBase):\n def __init__(self, *args, **kwargs):\n super(RunText, self).__init__(*args, **kwargs)\n self.parser.add_argument(\"-t\", \"--text\", help=\"The text to scroll on the RGB LED panel\", default=\" 88%\")\n\n \n \n \n def run(self):\n offscreen_canvas = self.matrix.CreateFrameCanvas()\n socfont = graphics.Font()\n ampsfont = graphics.Font()\n wattsfont = graphics.Font()\n socfont.LoadFont(\"../fonts/7x13B.bdf\")\n# smfont.LoadFont(\"../fonts/tom-thumb.bdf\")\n ampsfont.LoadFont(\"../fonts/clR6x12.bdf\")\n wattsfont.LoadFont(\"../fonts/5x8.bdf\")\n socColor = graphics.Color(255, 255, 255) #white?\n ampColor = graphics.Color(255, 255, 0) #white?\n wattsColor = graphics.Color(0, 175, 175) #dimyellow?\n frameColor = graphics.Color(125, 0, 125) #\n\n #General Colors\n red = graphics.Color(255, 0, 0)\n dimred = graphics.Color(125, 0, 0)\n green = graphics.Color(0, 255, 0)\n blue = graphics.Color(0, 0, 255)\n white = graphics.Color(255, 255, 255)\n dimYellow = graphics.Color(0, 175, 175) #dimyellow?\n pos = offscreen_canvas.width\n my_text = self.args.text\n soc_text = [\"S\",\"O\",\"C\"]\n amp_text = \"+2.3A\"\n watts_text = \"+900W\"\n\n\n currentData =np.array(\n [0, 0, 0, 0, -9.3, -10.9, -5.1, 0.0, 0.0, 0.0,\n 12.6, 16.1, 16.9, 18.9, 22.5, 24.5, 25.6, 25.9, 27.0, 29.0,\n 30.0, 26.3, 46.3, 54.5, 49.5, 43.0, 38.5, 35.0, 34.0,\t33.0,\n 33.0, 34.7])\n\n\n offscreen_canvas.Clear()\n len = graphics.DrawText(offscreen_canvas, socfont, 0, 0, socColor, my_text)\n left_start = offscreen_canvas.width-len-1\n offscreen_canvas.Clear()\n len = graphics.DrawText(offscreen_canvas, socfont, left_start, 9, socColor, my_text)\n len = graphics.DrawText(offscreen_canvas, ampsfont, left_start, 20, green, amp_text)\n \n len = graphics.DrawText(offscreen_canvas, ampsfont, left_start, 32\n , wattsColor, watts_text)\n# len = graphics.DrawText(offscreen_canvas, wattsfont, left_start, 25, wattsColor, watts_text)\n\n graphOrigin = [0, 0] #remember 0,0 is top left corner\n graphHeight = 32 - graphOrigin[1] - 1\n graphWidth = 32\n\n drawGraph(currentData, graphOrigin, graphHeight, graphWidth, offscreen_canvas, frameColor, dimred, blue, self)\n\n\n while True:\n# offscreen_canvas.Clear()\n# len = graphics.DrawText(offscreen_canvas, font, pos, 10, textColor, my_text)\n pos -= 1\n if (pos + len < 0):\n pos = offscreen_canvas.width\n\n time.sleep(0.05)\n# offscreen_canvas = self.matrix.SwapOnVSync(offscreen_canvas)\n\n\nclass RotatingBlockGenerator(SampleBase):\n def __init__(self, *args, **kwargs):\n super(RotatingBlockGenerator, self).__init__(*args, **kwargs)\n\n def run(self):\n cent_x = self.matrix.width / 2\n cent_y = self.matrix.height / 2\n\n rotate_square = min(self.matrix.width, self.matrix.height) * 1.41\n min_rotate = cent_x - rotate_square / 2\n max_rotate = cent_x + rotate_square / 2\n\n display_square = min(self.matrix.width, self.matrix.height) * 0.7\n min_display = cent_x - display_square / 2\n max_display = cent_x + display_square / 2\n\n deg_to_rad = 2 * 3.14159265 / 360\n rotation = 0\n\n # Pre calculate colors\n col_table = []\n for x in range(int(min_rotate), int(max_rotate)):\n col_table.insert(x, scale_col(x, min_display, max_display))\n\n offset_canvas = self.matrix.CreateFrameCanvas()\n\n while True:\n rotation += 1\n rotation %= 360\n\n # calculate sin and cos once for each frame\n angle = rotation * deg_to_rad\n sin = math.sin(angle)\n cos = math.cos(angle)\n\n for x in range(int(min_rotate), int(max_rotate)):\n for y in range(int(min_rotate), int(max_rotate)):\n # Our rotate center is always offset by cent_x\n rot_x, rot_y = rotate(x - cent_x, y - cent_x, sin, cos)\n\n if x >= min_display and x < max_display and y >= min_display and y < max_display:\n x_col = col_table[x]\n y_col = col_table[y]\n offset_canvas.SetPixel(rot_x + cent_x, rot_y + cent_y, x_col, 255 - y_col, y_col)\n else:\n offset_canvas.SetPixel(rot_x + cent_x, rot_y + cent_y, 0, 0, 0)\n\n offset_canvas = self.matrix.SwapOnVSync(offset_canvas)\n\n\n# Main function\nif __name__ == \"__main__\":\n# program = RotatingBlockGenerator()\n program = RunText()\n if (not program.process()):\n program.print_help()\n" ]
[ [ "numpy.array" ] ]
aslansd/pyGLMHMM
[ "5930e1322435431c5835b2b3f241b2ca0d2fb887" ]
[ "src/pyGLMHMM/LBFGS.py" ]
[ "import torch\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom copy import deepcopy\nfrom functools import reduce\nfrom numba import jit\nfrom torch.optim import Optimizer\n\n#%% Helper Functions for L-BFGS\ndef is_legal(v):\n \"\"\"\n Checks that tensor is not NaN or Inf.\n\n Inputs:\n v (tensor): tensor to be checked\n\n \"\"\"\n legal = not torch.isnan(v).any() and not torch.isinf(v).any()\n\n return legal\n\ndef polyinterp(points, x_min_bound=None, x_max_bound=None, plot=False):\n \"\"\"\n Gives the minimizer and minimum of the interpolating polynomial over given points\n based on function and derivative information. Defaults to bisection if no critical\n points are valid.\n\n Based on polyinterp.m Matlab function in minFunc by Mark Schmidt with some slight\n modifications.\n\n Implemented by: Hao-Jun Michael Shi and Dheevatsa Mudigere\n Last edited 12/6/18.\n\n Inputs:\n points (nparray): two-dimensional array with each point of form [x f g]\n x_min_bound (float): minimum value that brackets minimum (default: minimum of points)\n x_max_bound (float): maximum value that brackets minimum (default: maximum of points)\n plot (bool): plot interpolating polynomial\n\n Outputs:\n x_sol (float): minimizer of interpolating polynomial\n F_min (float): minimum of interpolating polynomial\n\n Note:\n . Set f or g to np.nan if they are unknown\n\n \"\"\"\n no_points = points.shape[0]\n order = np.sum(1 - np.isnan(points[:,1:3]).astype('int')) - 1\n\n x_min = np.min(points[:, 0])\n x_max = np.max(points[:, 0])\n\n # compute bounds of interpolation area\n if(x_min_bound is None):\n x_min_bound = x_min\n if(x_max_bound is None):\n x_max_bound = x_max\n\n # explicit formula for quadratic interpolation\n if no_points == 2 and order == 2 and plot is False:\n # Solution to quadratic interpolation is given by:\n # a = -(f1 - f2 - g1(x1 - x2))/(x1 - x2)^2\n # x_min = x1 - g1/(2a)\n # if x1 = 0, then is given by:\n # x_min = - (g1*x2^2)/(2(f2 - f1 - g1*x2))\n\n if(points[0, 0] == 0):\n x_sol = -points[0, 2]*points[1, 0]**2/(2*(points[1, 1] - points[0, 1] - points[0, 2]*points[1, 0]))\n else:\n a = -(points[0, 1] - points[1, 1] - points[0, 2]*(points[0, 0] - points[1, 0]))/(points[0, 0] - points[1, 0])**2\n x_sol = points[0, 0] - points[0, 2]/(2*a)\n\n x_sol = np.minimum(np.maximum(x_min_bound, x_sol), x_max_bound)\n\n # explicit formula for cubic interpolation\n elif no_points == 2 and order == 3 and plot is False:\n # Solution to cubic interpolation is given by:\n # d1 = g1 + g2 - 3((f1 - f2)/(x1 - x2))\n # d2 = sqrt(d1^2 - g1*g2)\n # x_min = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2))\n d1 = points[0, 2] + points[1, 2] - 3*((points[0, 1] - points[1, 1])/(points[0, 0] - points[1, 0])) \n if d1**2 >= points[0, 2]*points[1, 2]:\n d2 = np.sqrt(d1**2 - points[0, 2]*points[1, 2])\n x_sol = points[1, 0] - (points[1, 0] - points[0, 0])*((points[1, 2] + d2 - d1)/(points[1, 2] - points[0, 2] + 2*d2))\n x_sol = np.minimum(np.maximum(x_min_bound, x_sol), x_max_bound)\n else:\n x_sol = (x_max_bound + x_min_bound)/2\n\n # solve linear system\n else:\n # define linear constraints\n A = np.zeros((0, order+1))\n b = np.zeros((0, 1))\n\n # add linear constraints on function values\n for i in range(no_points):\n if not np.isnan(points[i, 1]):\n constraint = np.zeros((1, order+1))\n for j in range(order, -1, -1):\n constraint[0, order - j] = points[i, 0]**j\n A = np.append(A, constraint, 0)\n b = np.append(b, points[i, 1])\n\n # add linear constraints on gradient values\n for i in range(no_points):\n if not np.isnan(points[i, 2]):\n constraint = np.zeros((1, order+1))\n for j in range(order):\n constraint[0, j] = (order-j)*points[i,0]**(order-j-1)\n A = np.append(A, constraint, 0)\n b = np.append(b, points[i, 2])\n\n # check if system is solvable\n if(A.shape[0] != A.shape[1] or np.linalg.matrix_rank(A) != A.shape[0]):\n x_sol = (x_min_bound + x_max_bound)/2\n f_min = np.Inf\n else:\n # solve linear system for interpolating polynomial\n coeff = np.linalg.solve(A, b)\n\n # compute critical points\n dcoeff = np.zeros(order)\n for i in range(len(coeff) - 1):\n dcoeff[i] = coeff[i]*(order-i)\n\n crit_pts = np.array([x_min_bound, x_max_bound])\n crit_pts = np.append(crit_pts, points[:, 0])\n\n if not np.isinf(dcoeff).any():\n roots = np.roots(dcoeff)\n crit_pts = np.append(crit_pts, roots)\n\n # test critical points\n f_min = np.Inf\n x_sol = (x_min_bound + x_max_bound)/2 # defaults to bisection\n for crit_pt in crit_pts:\n if np.isreal(crit_pt) and crit_pt >= x_min_bound and crit_pt <= x_max_bound:\n F_cp = np.polyval(coeff, crit_pt)\n if np.isreal(F_cp) and F_cp < f_min:\n x_sol = np.real(crit_pt)\n f_min = np.real(F_cp)\n\n if(plot):\n plt.figure()\n x = np.arange(x_min_bound, x_max_bound, (x_max_bound - x_min_bound)/10000)\n f = np.polyval(coeff, x)\n plt.plot(x, f)\n plt.plot(x_sol, f_min, 'x')\n\n return x_sol\n\n#%% L-BFGS Optimizer\nclass LBFGS(Optimizer):\n \"\"\"\n Implements the L-BFGS algorithm. Compatible with multi-batch and full-overlap\n L-BFGS implementations and (stochastic) Powell damping. Partly based on the \n original L-BFGS implementation in PyTorch, Mark Schmidt's minFunc MATLAB code, \n and Michael Overton's weak Wolfe line search MATLAB code.\n\n Implemented by: Hao-Jun Michael Shi and Dheevatsa Mudigere\n Last edited 12/6/18.\n\n Warnings:\n . Does not support per-parameter options and parameter groups.\n . All parameters have to be on a single device.\n\n Inputs:\n lr (float): steplength or learning rate (default: 1)\n history_size (int): update history size (default: 10)\n line_search (str): designates line search to use (default: 'Wolfe')\n Options:\n 'None': uses steplength designated in algorithm\n 'Armijo': uses Armijo backtracking line search\n 'Wolfe': uses Armijo-Wolfe bracketing line search\n dtype: data type (default: torch.float)\n debug (bool): debugging mode\n\n References:\n [1] Berahas, Albert S., Jorge Nocedal, and Martin Takác. \"A Multi-Batch L-BFGS \n Method for Machine Learning.\" Advances in Neural Information Processing \n Systems. 2016.\n [2] Bollapragada, Raghu, et al. \"A Progressive Batching L-BFGS Method for Machine \n Learning.\" International Conference on Machine Learning. 2018.\n [3] Lewis, Adrian S., and Michael L. Overton. \"Nonsmooth Optimization via Quasi-Newton\n Methods.\" Mathematical Programming 141.1-2 (2013): 135-163.\n [4] Liu, Dong C., and Jorge Nocedal. \"On the Limited Memory BFGS Method for \n Large Scale Optimization.\" Mathematical Programming 45.1-3 (1989): 503-528.\n [5] Nocedal, Jorge. \"Updating Quasi-Newton Matrices With Limited Storage.\" \n Mathematics of Computation 35.151 (1980): 773-782.\n [6] Nocedal, Jorge, and Stephen J. Wright. \"Numerical Optimization.\" Springer New York,\n 2006.\n [7] Schmidt, Mark. \"minFunc: Unconstrained Differentiable Multivariate Optimization \n in Matlab.\" Software available at http://www.cs.ubc.ca/~schmidtm/Software/minFunc.html \n (2005).\n [8] Schraudolph, Nicol N., Jin Yu, and Simon Günter. \"A Stochastic Quasi-Newton \n Method for Online Convex Optimization.\" Artificial Intelligence and Statistics. \n 2007.\n [9] Wang, Xiao, et al. \"Stochastic Quasi-Newton Methods for Nonconvex Stochastic \n Optimization.\" SIAM Journal on Optimization 27.2 (2017): 927-956.\n\n \"\"\"\n\n def __init__(self, params, lr=1, history_size=10, line_search='Wolfe', \n dtype=torch.float, debug=False):\n\n # ensure inputs are valid\n if not 0.0 <= lr:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if not 0 <= history_size:\n raise ValueError(\"Invalid history size: {}\".format(history_size))\n if line_search not in ['Armijo', 'Wolfe', 'None']:\n raise ValueError(\"Invalid line search: {}\".format(line_search))\n\n defaults = dict(lr=lr, history_size=history_size, line_search=line_search, \n dtype=dtype, debug=debug)\n super(LBFGS, self).__init__(params, defaults)\n\n if len(self.param_groups) != 1:\n raise ValueError(\"L-BFGS doesn't support per-parameter options \"\n \"(parameter groups)\")\n\n self._params = self.param_groups[0]['params']\n self._numel_cache = None\n\n state = self.state['global_state']\n state.setdefault('n_iter', 0)\n state.setdefault('curv_skips', 0)\n state.setdefault('fail_skips', 0)\n state.setdefault('H_diag',1)\n state.setdefault('fail', True)\n\n state['old_dirs'] = []\n state['old_stps'] = []\n\n def _numel(self):\n if self._numel_cache is None:\n self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0)\n return self._numel_cache\n\n def _gather_flat_grad(self):\n views = []\n for p in self._params:\n if p.grad is None:\n view = p.data.new(p.data.numel()).zero_()\n elif p.grad.data.is_sparse:\n view = p.grad.data.to_dense().view(-1)\n else:\n view = p.grad.data.view(-1)\n views.append(view)\n return torch.cat(views, 0)\n\n def _add_update(self, step_size, update):\n offset = 0\n for p in self._params:\n numel = p.numel()\n # view as to avoid deprecated pointwise semantics\n p.data.add_(step_size, update[offset:offset + numel].view_as(p.data))\n offset += numel\n assert offset == self._numel()\n\n def _copy_params(self):\n current_params = []\n for param in self._params:\n current_params.append(deepcopy(param.data))\n return current_params\n\n def _load_params(self, current_params):\n i = 0\n for param in self._params:\n param.data[:] = current_params[i]\n i += 1\n\n def line_search(self, line_search):\n \"\"\"\n Switches line search option.\n \n Inputs:\n line_search (str): designates line search to use\n Options:\n 'None': uses steplength designated in algorithm\n 'Armijo': uses Armijo backtracking line search\n 'Wolfe': uses Armijo-Wolfe bracketing line search\n \n \"\"\"\n \n group = self.param_groups[0]\n group['line_search'] = line_search\n \n return\n\n def two_loop_recursion(self, vec):\n \"\"\"\n Performs two-loop recursion on given vector to obtain Hv.\n\n Inputs:\n vec (tensor): 1-D tensor to apply two-loop recursion to\n\n Output:\n r (tensor): matrix-vector product Hv\n\n \"\"\"\n\n group = self.param_groups[0]\n history_size = group['history_size']\n\n state = self.state['global_state']\n old_dirs = state.get('old_dirs') # change in gradients\n old_stps = state.get('old_stps') # change in iterates\n H_diag = state.get('H_diag')\n\n # compute the product of the inverse Hessian approximation and the gradient\n num_old = len(old_dirs)\n\n if 'rho' not in state:\n state['rho'] = [None] * history_size\n state['alpha'] = [None] * history_size\n rho = state['rho']\n alpha = state['alpha']\n\n for i in range(num_old):\n rho[i] = 1. / old_stps[i].dot(old_dirs[i])\n\n q = vec\n for i in range(num_old - 1, -1, -1):\n alpha[i] = old_dirs[i].dot(q) * rho[i]\n q.add_(-alpha[i], old_stps[i])\n\n # multiply by initial Hessian \n # r/d is the final direction\n r = torch.mul(q, H_diag)\n for i in range(num_old):\n beta = old_stps[i].dot(r) * rho[i]\n r.add_(alpha[i] - beta, old_dirs[i])\n\n return r\n\n def curvature_update(self, flat_grad, eps=1e-2, damping=False):\n \"\"\"\n Performs curvature update.\n\n Inputs:\n flat_grad (tensor): 1-D tensor of flattened gradient for computing \n gradient difference with previously stored gradient\n eps (float): constant for curvature pair rejection or damping (default: 1e-2)\n damping (bool): flag for using Powell damping (default: False)\n \"\"\"\n\n assert len(self.param_groups) == 1\n\n # load parameters\n if(eps <= 0):\n raise(ValueError('Invalid eps; must be positive.'))\n\n group = self.param_groups[0]\n history_size = group['history_size']\n debug = group['debug']\n\n # variables cached in state (for tracing)\n state = self.state['global_state']\n fail = state.get('fail')\n \n # check if line search failed\n if not fail:\n \n d = state.get('d')\n t = state.get('t')\n old_dirs = state.get('old_dirs')\n old_stps = state.get('old_stps')\n H_diag = state.get('H_diag')\n prev_flat_grad = state.get('prev_flat_grad')\n Bs = state.get('Bs')\n \n # compute y's\n y = flat_grad.sub(prev_flat_grad)\n s = d.mul(t)\n sBs = s.dot(Bs)\n ys = y.dot(s) # y*s\n\n # update L-BFGS matrix\n if ys > eps*sBs or damping == True:\n \n # perform Powell damping\n if damping == True and ys < eps*sBs:\n if debug:\n print('Applying Powell damping...')\n theta = ((1-eps)*sBs)/(sBs - ys)\n y = theta*y + (1-theta)*Bs\n \n # updating memory\n if len(old_dirs) == history_size:\n # shift history by one (limited-memory)\n old_dirs.pop(0)\n old_stps.pop(0)\n \n # store new direction/step\n old_dirs.append(s)\n old_stps.append(y)\n \n # update scale of initial Hessian approximation\n H_diag = ys / y.dot(y) # (y*y)\n \n state['old_dirs'] = old_dirs\n state['old_stps'] = old_stps\n state['H_diag'] = H_diag\n\n else:\n # save skip\n state['curv_skips'] += 1\n if debug:\n print('Curvature pair skipped due to failed criterion')\n\n else:\n # save skip\n state['fail_skips'] += 1\n if debug:\n print('Line search failed; curvature pair update skipped')\n\n return\n\n def _step(self, p_k, g_Ok, g_Sk=None, options={}):\n \"\"\"\n Performs a single optimization step.\n\n Inputs:\n p_k (tensor): 1-D tensor specifying search direction\n g_Ok (tensor): 1-D tensor of flattened gradient over overlap O_k used\n for gradient differencing in curvature pair update\n g_Sk (tensor): 1-D tensor of flattened gradient over full sample S_k\n used for curvature pair damping or rejection criterion,\n if None, will use g_Ok (default: None)\n options (dict): contains options for performing line search\n\n Options for Armijo backtracking line search:\n 'closure' (callable): reevaluates model and returns function value\n 'current_loss' (tensor): objective value at current iterate (default: F(x_k))\n 'gtd' (tensor): inner product g_Ok'd in line search (default: g_Ok'd)\n 'eta' (tensor): factor for decreasing steplength > 0 (default: 2)\n 'c1' (tensor): sufficient decrease constant in (0, 1) (default: 1e-4)\n 'max_ls' (int): maximum number of line search steps permitted (default: 10)\n 'interpolate' (bool): flag for using interpolation (default: True)\n 'inplace' (bool): flag for inplace operations (default: True)\n 'ls_debug' (bool): debugging mode for line search\n\n Options for Wolfe line search:\n 'closure' (callable): reevaluates model and returns function value\n 'current_loss' (tensor): objective value at current iterate (default: F(x_k))\n 'gtd' (tensor): inner product g_Ok'd in line search (default: g_Ok'd)\n 'eta' (float): factor for extrapolation (default: 2)\n 'c1' (float): sufficient decrease constant in (0, 1) (default: 1e-4)\n 'c2' (float): curvature condition constant in (0, 1) (default: 0.9)\n 'max_ls' (int): maximum number of line search steps permitted (default: 10)\n 'interpolate' (bool): flag for using interpolation (default: True)\n 'inplace' (bool): flag for inplace operations (default: True)\n 'ls_debug' (bool): debugging mode for line search\n\n Outputs (depends on line search):\n . No line search:\n t (float): steplength\n . Armijo backtracking line search:\n F_new (tensor): loss function at new iterate\n t (tensor): final steplength\n ls_step (int): number of backtracks\n closure_eval (int): number of closure evaluations\n desc_dir (bool): descent direction flag\n True: p_k is descent direction with respect to the line search\n function\n False: p_k is not a descent direction with respect to the line\n search function\n fail (bool): failure flag\n True: line search reached maximum number of iterations, failed\n False: line search succeeded\n . Wolfe line search:\n F_new (tensor): loss function at new iterate\n g_new (tensor): gradient at new iterate\n t (float): final steplength\n ls_step (int): number of backtracks\n closure_eval (int): number of closure evaluations\n grad_eval (int): number of gradient evaluations\n desc_dir (bool): descent direction flag\n True: p_k is descent direction with respect to the line search\n function\n False: p_k is not a descent direction with respect to the line\n search function\n fail (bool): failure flag\n True: line search reached maximum number of iterations, failed\n False: line search succeeded\n\n Notes:\n . If encountering line search failure in the deterministic setting, one\n should try increasing the maximum number of line search steps max_ls.\n\n \"\"\"\n\n assert len(self.param_groups) == 1\n\n # load parameter options\n group = self.param_groups[0]\n lr = group['lr']\n line_search = group['line_search']\n dtype = group['dtype']\n debug = group['debug']\n\n # variables cached in state (for tracing)\n state = self.state['global_state']\n d = state.get('d')\n t = state.get('t')\n prev_flat_grad = state.get('prev_flat_grad')\n Bs = state.get('Bs')\n\n # keep track of nb of iterations\n state['n_iter'] += 1\n\n # set search direction\n d = p_k\n\n # modify previous gradient\n if prev_flat_grad is None:\n prev_flat_grad = g_Ok.clone()\n else:\n prev_flat_grad.copy_(g_Ok)\n\n # set initial step size\n t = lr\n\n # closure evaluation counter\n closure_eval = 0\n\n if g_Sk is None:\n g_Sk = g_Ok.clone()\n\n # perform Armijo backtracking line search\n if(line_search == 'Armijo'):\n\n # load options\n if(options):\n if('closure' not in options.keys()):\n raise(ValueError('closure option not specified.'))\n else:\n closure = options['closure']\n\n if('gtd' not in options.keys()):\n gtd = g_Ok.dot(d)\n else:\n gtd = options['gtd']\n\n if('current_loss' not in options.keys()):\n F_k = closure()\n closure_eval += 1\n else:\n F_k = options['current_loss']\n\n if('eta' not in options.keys()):\n eta = 2\n elif(options['eta'] <= 0):\n raise(ValueError('Invalid eta; must be positive.'))\n else:\n eta = options['eta']\n\n if('c1' not in options.keys()):\n c1 = 1e-4\n elif(options['c1'] >= 1 or options['c1'] <= 0):\n raise(ValueError('Invalid c1; must be strictly between 0 and 1.'))\n else:\n c1 = options['c1']\n\n if('max_ls' not in options.keys()):\n max_ls = 10\n elif(options['max_ls'] <= 0):\n raise(ValueError('Invalid max_ls; must be positive.'))\n else:\n max_ls = options['max_ls']\n\n if('interpolate' not in options.keys()):\n interpolate = True\n else:\n interpolate = options['interpolate']\n\n if('inplace' not in options.keys()):\n inplace = True\n else:\n inplace = options['inplace']\n \n if('ls_debug' not in options.keys()):\n ls_debug = False\n else:\n ls_debug = options['ls_debug']\n\n else:\n raise(ValueError('Options are not specified; need closure evaluating function.'))\n \n # begin print for debug mode\n if ls_debug:\n print('==================================== Begin Armijo line search ===================================')\n print('F(x): %.8e g*d: %.8e' %(F_k, gtd))\n \n # check if search direction is descent direction\n if gtd >= 0:\n desc_dir = False\n if debug:\n print('Not a descent direction!')\n else:\n desc_dir = True\n \n ### Matlab Armijo Backtrack line search \n \n f = F_k\n g = self._gather_flat_grad() \n F_new, g_new, t, closure_eval, fail = self._line_search_armijo_backtrack(t, d, f, f, g, gtd, c1, closure) \n ls_step = np.nan\n \n# ### PyTorch Armijo Backtrack line search\n#\n# # initialize values\n# if(interpolate):\n# if(torch.cuda.is_available()):\n# F_prev = torch.tensor(np.nan, dtype=dtype).cuda()\n# else:\n# F_prev = torch.tensor(np.nan, dtype=dtype)\n#\n# ls_step = 0\n# t_prev = 0 # old steplength\n# fail = False # failure flag \n#\n# # store values if not in-place\n# if not inplace:\n# current_params = self._copy_params()\n#\n# # update and evaluate at new point\n# self._add_update(t, d)\n# F_new = closure()\n# closure_eval += 1\n#\n# # print info if debugging\n# if(ls_debug):\n# print('LS Step: %d t: %.8e F(x+td): %.8e F-c1*t*g*d: %.8e F(x): %.8e'\n# %(ls_step, t, F_new, F_k + c1*t*gtd, F_k))\n#\n# # check Armijo condition\n# while F_new > F_k + c1*t*gtd or not is_legal(F_new):\n#\n# # check if maximum number of iterations reached\n# if(ls_step >= max_ls):\n# if inplace:\n# self._add_update(-t, d)\n# else:\n# self._load_params(current_params)\n#\n# t = 0\n# F_new = closure()\n# closure_eval += 1\n# fail = True\n# break\n#\n# else:\n# # store current steplength\n# t_new = t\n#\n# # compute new steplength\n#\n# # if first step or not interpolating, then multiply by factor\n# if(ls_step == 0 or not interpolate or not is_legal(F_new)):\n# t = t/eta\n#\n# # if second step, use function value at new point along with \n# # gradient and function at current iterate\n# elif(ls_step == 1 or not is_legal(F_prev)):\n# t = polyinterp(np.array([[0, F_k.item(), gtd.item()], [t_new, F_new.item(), np.nan]]))\n#\n# # otherwise, use function values at new point, previous point,\n# # and gradient and function at current iterate\n# else:\n# t = polyinterp(np.array([[0, F_k.item(), gtd.item()], [t_new, F_new.item(), np.nan], \n# [t_prev, F_prev.item(), np.nan]]))\n#\n# # if values are too extreme, adjust t\n# if(interpolate):\n# if(t < 1e-3*t_new):\n# t = 1e-3*t_new\n# elif(t > 0.6*t_new):\n# t = 0.6*t_new\n#\n# # store old point\n# F_prev = F_new\n# t_prev = t_new\n#\n# # update iterate and reevaluate\n# if inplace:\n# self._add_update(t-t_new, d)\n# else:\n# self._load_params(current_params)\n# self._add_update(t, d)\n#\n# F_new = closure()\n# closure_eval += 1\n# ls_step += 1 # iterate\n# \n# # print info if debugging\n# if(ls_debug):\n# print('LS Step: %d t: %.8e F(x+td): %.8e F-c1*t*g*d: %.8e F(x): %.8e'\n# %(ls_step, t, F_new, F_k + c1*t*gtd, F_k))\n\n # store Bs\n if Bs is None:\n Bs = (g_Sk.mul(-t)).clone()\n else:\n Bs.copy_(g_Sk.mul(-t))\n \n # print final steplength\n if ls_debug:\n print('Final Steplength:', t)\n print('===================================== End Armijo line search ====================================')\n\n state['d'] = d\n state['prev_flat_grad'] = prev_flat_grad\n state['t'] = t\n state['Bs'] = Bs\n state['fail'] = fail\n\n return F_new, t, ls_step, closure_eval, desc_dir, fail\n\n # perform weak Wolfe line search\n elif(line_search == 'Wolfe'):\n\n # load options\n if(options):\n if('closure' not in options.keys()):\n raise(ValueError('closure option not specified.'))\n else:\n closure = options['closure']\n\n if('current_loss' not in options.keys()):\n F_k = closure()\n closure_eval += 1\n else:\n F_k = options['current_loss']\n\n if('gtd' not in options.keys()):\n gtd = g_Ok.dot(d)\n else:\n gtd = options['gtd']\n\n if('eta' not in options.keys()):\n eta = 2\n elif(options['eta'] <= 1):\n raise(ValueError('Invalid eta; must be greater than 1.'))\n else:\n eta = options['eta']\n\n if('c1' not in options.keys()):\n c1 = 1e-4\n elif(options['c1'] >= 1 or options['c1'] <= 0):\n raise(ValueError('Invalid c1; must be strictly between 0 and 1.'))\n else:\n c1 = options['c1']\n\n if('c2' not in options.keys()):\n c2 = 0.9\n elif(options['c2'] >= 1 or options['c2'] <= 0):\n raise(ValueError('Invalid c2; must be strictly between 0 and 1.'))\n elif(options['c2'] <= c1):\n raise(ValueError('Invalid c2; must be strictly larger than c1.'))\n else:\n c2 = options['c2']\n\n if('max_ls' not in options.keys()):\n max_ls = 10\n elif(options['max_ls'] <= 0):\n raise(ValueError('Invalid max_ls; must be positive.'))\n else:\n max_ls = options['max_ls']\n\n if('interpolate' not in options.keys()):\n interpolate = True\n else:\n interpolate = options['interpolate']\n\n if('inplace' not in options.keys()):\n inplace = True\n else:\n inplace = options['inplace']\n \n if('ls_debug' not in options.keys()):\n ls_debug = False\n else:\n ls_debug = options['ls_debug']\n\n else:\n raise(ValueError('Options are not specified; need closure evaluating function.'))\n \n # begin print for debug mode\n if ls_debug:\n print('==================================== Begin Wolfe line search ====================================')\n print('F(x): %.8e g*d: %.8e' %(F_k, gtd))\n\n # check if search direction is descent direction\n if gtd >= 0:\n desc_dir = False\n if debug:\n print('Not a descent direction!')\n else:\n desc_dir = True \n \n ### Strong Wolfe line search \n \n f = F_k\n g = self._gather_flat_grad()\n F_new, g_new, t, ls_step, closure_eval, grad_eval, fail = self._line_search_strong_wolfe(t, d, f, g, gtd, c1, c2, max_ls, closure)\n \n# ### Weak Wolfe line search\n# \n# # store values if not in-place\n# if not inplace:\n# current_params = self._copy_params()\n# \n# # update and evaluate at new point\n# self._add_update(t, d)\n# F_new = closure()\n# closure_eval += 1\n# \n# # initialize counters\n# ls_step = 0\n# grad_eval = 0 # tracks gradient evaluations\n# t_prev = 0 # old steplength\n#\n# # initialize bracketing variables and flag\n# alpha = 0\n# beta = float('Inf')\n# fail = False\n#\n# # initialize values for line search\n# if(interpolate):\n# F_a = F_k\n# g_a = gtd\n#\n# if(torch.cuda.is_available()):\n# F_b = torch.tensor(np.nan, dtype=dtype).cuda()\n# g_b = torch.tensor(np.nan, dtype=dtype).cuda()\n# else:\n# F_b = torch.tensor(np.nan, dtype=dtype)\n# g_b = torch.tensor(np.nan, dtype=dtype)\n#\n# # main loop\n# while True:\n#\n# # check if maximum number of line search steps have been reached\n# if(ls_step >= max_ls):\n# if inplace:\n# self._add_update(-t, d)\n# else:\n# self._load_params(current_params)\n#\n# t = 0\n# F_new = closure()\n# F_new.backward()\n# g_new = self._gather_flat_grad()\n# closure_eval += 1\n# grad_eval += 1\n# fail = True\n# break\n#\n# # print info if debugging\n# if(ls_debug):\n# print('LS Step: %d t: %.8e alpha: %.8e beta: %.8e' \n# %(ls_step, t, alpha, beta))\n# print('Armijo: F(x+td): %.8e F-c1*t*g*d: %.8e F(x): %.8e'\n# %(F_new, F_k + c1*t*gtd, F_k))\n#\n# # check Armijo condition\n# if(F_new > F_k + c1*t*gtd):\n#\n# # set upper bound\n# beta = t\n# t_prev = t\n#\n# # update interpolation quantities\n# if(interpolate):\n# F_b = F_new\n# if(torch.cuda.is_available()):\n# g_b = torch.tensor(np.nan, dtype=dtype).cuda()\n# else:\n# g_b = torch.tensor(np.nan, dtype=dtype)\n#\n# else:\n#\n# # compute gradient\n# F_new.backward()\n# g_new = self._gather_flat_grad()\n# grad_eval += 1\n# gtd_new = g_new.dot(d)\n# \n# # print info if debugging\n# if(ls_debug):\n# print('Wolfe: g(x+td)*d: %.8e c2*g*d: %.8e gtd: %.8e'\n# %(gtd_new, c2*gtd, gtd))\n#\n# # check curvature condition\n# if(gtd_new < c2*gtd):\n#\n# # set lower bound\n# alpha = t\n# t_prev = t\n#\n# # update interpolation quantities\n# if(interpolate):\n# F_a = F_new\n# g_a = gtd_new\n#\n# else:\n# break\n#\n# # compute new steplength\n#\n# # if first step or not interpolating, then bisect or multiply by factor\n# if(not interpolate or not is_legal(F_b)):\n# if(beta == float('Inf')):\n# t = eta*t\n# else:\n# t = (alpha + beta)/2.0\n#\n# # otherwise interpolate between a and b\n# else:\n# t = polyinterp(np.array([[alpha, F_a.item(), g_a.item()],[beta, F_b.item(), g_b.item()]]))\n#\n# # if values are too extreme, adjust t\n# if(beta == float('Inf')):\n# if(t > 2*eta*t_prev):\n# t = 2*eta*t_prev\n# elif(t < eta*t_prev):\n# t = eta*t_prev\n# else:\n# if(t < alpha + 0.2*(beta - alpha)):\n# t = alpha + 0.2*(beta - alpha)\n# elif(t > (beta - alpha)/2.0):\n# t = (beta - alpha)/2.0\n#\n# # if we obtain nonsensical value from interpolation\n# if(t <= 0):\n# t = (beta - alpha)/2.0\n#\n# # update parameters\n# if inplace:\n# self._add_update(t - t_prev, d)\n# else:\n# self._load_params(current_params)\n# self._add_update(t, d)\n#\n# # evaluate closure\n# F_new = closure()\n# closure_eval += 1\n# ls_step += 1\n\n # store Bs\n if Bs is None:\n Bs = (g_Sk.mul(-t)).clone()\n else:\n Bs.copy_(g_Sk.mul(-t))\n \n # print final steplength\n if ls_debug:\n print('Final Steplength:', t)\n print('===================================== End Wolfe line search =====================================')\n\n state['d'] = d\n state['prev_flat_grad'] = prev_flat_grad\n state['t'] = t\n state['Bs'] = Bs\n state['fail'] = fail\n\n return F_new, g_new, t, ls_step, closure_eval, grad_eval, desc_dir, fail\n\n else:\n\n # perform update\n self._add_update(t, d)\n\n # store Bs\n if Bs is None:\n Bs = (g_Sk.mul(-t)).clone()\n else:\n Bs.copy_(g_Sk.mul(-t))\n\n state['d'] = d\n state['prev_flat_grad'] = prev_flat_grad\n state['t'] = t\n state['Bs'] = Bs\n state['fail'] = False\n\n return t\n \n def step(self, p_k, g_Ok, g_Sk=None, options={}):\n return self._step(p_k, g_Ok, g_Sk, options)\n \n @jit\n def _line_search_armijo_backtrack(self, t, d, f, fr, g, gtd, c1, funObj):\n \"\"\"\n Implements Matlab Armijo Backtrack line search\n \n \"\"\"\n \n fail = False\n \n # Evaluate the objective and gradient at the initial step\n current_params = self._copy_params()\n self._add_update(t, d)\n f_new = funObj()\n f_new.backward()\n g_new = self._gather_flat_grad()\n \n funEvals = 1\n \n while f_new > fr + c1 * t * gtd or not is_legal(f_new):\n temp = t\n \n if not is_legal(f_new):\n # Ignore value of new point\n t = 0.5 * t\n elif not is_legal(g_new):\n # Use function value at new point, but not its derivative\n # Backtracking w/ quadratic interpolation based on two points\n t = polyinterp(np.array([[0, f.item(), gtd.item()], [t, f_new.item(), np.nan]]), 0, t)\n else:\n # Use function value and derivative at new point\n # Backtracking w/ cubic interpolation w/ derivative\n t = polyinterp(np.array([[0, f.item(), gtd.item()], [t, f_new.item(), g_new.dot(d).item()]]), 0, t)\n \n # Adjust if change in t is too small/large\n if t < temp * 1e-3:\n t = temp * 1e-3\n elif t > temp * 0.6:\n t = temp * 0.6\n \n self._load_params(current_params)\n self._add_update(t, d)\n f_new = funObj()\n f_new.backward()\n g_new = self._gather_flat_grad()\n \n funEvals = funEvals + 1;\n \n # Check whether step size has become too small\n if torch.max(torch.abs(t * d)) <= 1e-9:\n fail = True\n \n t = 0\n f_new = f\n g_new = g\n \n break\n \n self._load_params(current_params)\n self._add_update(t, d)\n \n return f_new, g_new, t, funEvals, fail\n \n @jit\n def _line_search_strong_wolfe(self, t, d, f, g, gtd, c1, c2, max_ls, funObj):\n \"\"\"\n Implements MATLAB Strong Wolfe line search\n \n \"\"\"\n \n fail = False\n \n # Evaluate the objective and gradient at the initial step\n current_params = self._copy_params()\n self._add_update(t, d)\n f_new = funObj()\n f_new.backward()\n g_new = self._gather_flat_grad()\n \n funEvals = 1\n gtd_new = g_new.dot(d)\n \n # Bracket an interval containing a point satisfying the Wolfe criteria\n t_prev = 0\n f_prev = f\n g_prev = g\n gtd_prev = gtd\n \n nrmD = torch.norm(d)\n done = 0\n LSiter = 0\n \n while LSiter < max_ls:\n # Bracketing phase:\n if not is_legal(f_new) or not is_legal(g_new):\n t = (t + t_prev) / 2\n \n # Do Armijo\n f_new, g_new, t, Armijo_FunEvals, fail = self._line_search_armijo_backtrack(t, d, f, f, g, gtd, c1, funObj)\n funEvals = funEvals + Armijo_FunEvals\n \n return f_new, g_new, t, LSiter, funEvals, 0, fail\n \n if (f_new > f + c1 * t * gtd) or (LSiter > 1 and f_new >= f_prev):\n bracket = [t_prev, t]\n bracketFval = [f_prev.item(), f_new.item()]\n bracketGval = [g_prev.numpy(), g_new.numpy()]\n break\n elif torch.abs(gtd_new) <= -c2 * gtd:\n bracket = t\n bracketFval = f_new.item()\n bracketGval = g_new.numpy()\n done = 1\n break\n elif gtd_new >= 0:\n bracket = [t_prev, t]\n bracketFval = [f_prev.item(), f_new.item()]\n bracketGval = [g_prev.numpy(), g_new.numpy()]\n break\n \n temp = t_prev\n t_prev = t\n minStep = t + 0.01 * (t - temp)\n maxStep = t * 10\n \n t = polyinterp(np.array([[temp, f_prev.item(), gtd_prev.item()], [t, f_new.item(), gtd_new.item()]]), minStep, maxStep)\n \n f_prev = f_new\n g_prev = g_new\n gtd_prev = gtd_new\n \n self._load_params(current_params)\n self._add_update(t, d)\n f_new = funObj()\n f_new.backward()\n g_new = self._gather_flat_grad()\n \n funEvals = funEvals + 1\n gtd_new = g_new.dot(d)\n \n LSiter = LSiter + 1\n \n if LSiter == max_ls:\n bracket = [0, t]\n bracketFval = [f.item(), f_new.item()]\n bracketGval = [g.numpy(), g_new.numpy()]\n \n # Zoom phase:\n # We now either have a point satisfying the criteria, or a bracket surrounding a point satisfying the criteria\n # Refine the bracket until we find a point satisfying the criteria\n insufProgress = 0\n \n while done == 0 and LSiter < max_ls:\n # Find high and low points in bracket\n f_LO = np.amin(bracketFval)\n LOpos = np.argmin(bracketFval)\n HIpos = -LOpos + 1\n \n # Compute new trial value\n t = polyinterp(np.array([[bracket[0], bracketFval[0], np.dot(bracketGval[0], d.numpy())], [bracket[1], bracketFval[1], np.dot(bracketGval[1], d.numpy())]]))\n \n # Test that we are making sufficient progress\n if min(max(bracket) - t, t - min(bracket)) / (max(bracket) - min(bracket)) < 0.1:\n if insufProgress or t >= max(bracket) or t <= min(bracket):\n if abs(t - max(bracket)) < abs(t - min(bracket)):\n t = max(bracket) - 0.1 * (max(bracket) - min(bracket))\n else:\n t = min(bracket) + 0.1 * (max(bracket) - min(bracket))\n insufProgress = 0\n else:\n insufProgress = 1\n else:\n insufProgress = 0\n \n # Evaluate new point\n self._load_params(current_params)\n self._add_update(t, d)\n f_new = funObj()\n f_new.backward()\n g_new = self._gather_flat_grad()\n \n funEvals = funEvals + 1\n gtd_new = g_new.dot(d)\n \n LSiter = LSiter + 1\n \n armijo = f_new < f + c1 * t * gtd\n \n if not armijo or f_new >= f_LO:\n # Armijo condition not satisfied or not lower than lowest point\n bracket[HIpos] = t\n bracketFval[HIpos] = f_new.item()\n bracketGval[HIpos] = g_new.numpy()\n else:\n if torch.abs(gtd_new) <= -c2 * gtd:\n # Wolfe conditions satisfied\n done = 1\n elif gtd_new * (bracket[HIpos] - bracket[LOpos]) >= 0:\n # Old HI becomes new LO\n bracket[HIpos] = bracket[LOpos]\n bracketFval[HIpos] = bracketFval[LOpos]\n bracketGval[HIpos] = bracketGval[LOpos]\n \n # New point becomes new LO\n bracket[LOpos] = t\n bracketFval[LOpos] = f_new.item()\n bracketGval[LOpos] = g_new.numpy()\n \n if done == 0 and abs(bracket[0] - bracket[1]) * nrmD < 1e-9:\n break\n \n if LSiter == max_ls:\n fail = True\n \n f_LO = np.amin(bracketFval)\n LOpos = np.argmin(bracketFval)\n \n if isinstance(bracket, list):\n t = bracket[LOpos]\n f_new = torch.tensor(bracketFval[LOpos])\n g_new = torch.tensor(bracketGval[LOpos])\n else:\n t = bracket\n f_new = torch.tensor(bracketFval)\n g_new = torch.tensor(bracketGval)\n \n return f_new, g_new, t, LSiter, funEvals, funEvals, fail\n\n#%% Full-Batch (Deterministic) L-BFGS Optimizer (Wrapper)\nclass FullBatchLBFGS(LBFGS):\n \"\"\"\n Implements full-batch or deterministic L-BFGS algorithm. Compatible with\n Powell damping. Can be used when evaluating a deterministic function and\n gradient. Wraps the LBFGS optimizer. Performs the two-loop recursion,\n updating, and curvature updating in a single step.\n\n Implemented by: Hao-Jun Michael Shi and Dheevatsa Mudigere\n Last edited 11/15/18.\n\n Warnings:\n . Does not support per-parameter options and parameter groups.\n . All parameters have to be on a single device.\n\n Inputs:\n lr (float): steplength or learning rate (default: 1)\n history_size (int): update history size (default: 10)\n line_search (str): designates line search to use (default: 'Wolfe')\n Options:\n 'None': uses steplength designated in algorithm\n 'Armijo': uses Armijo backtracking line search\n 'Wolfe': uses Armijo-Wolfe bracketing line search\n dtype: data type (default: torch.float)\n debug (bool): debugging mode\n\n \"\"\"\n\n def __init__(self, params, lr=1, history_size=10, line_search='Wolfe', \n dtype=torch.float, debug=False):\n super(FullBatchLBFGS, self).__init__(params, lr, history_size, line_search, \n dtype, debug)\n\n def step(self, options={}):\n \"\"\"\n Performs a single optimization step.\n\n Inputs:\n options (dict): contains options for performing line search\n \n General Options:\n 'eps' (float): constant for curvature pair rejection or damping (default: 1e-2)\n 'damping' (bool): flag for using Powell damping (default: False)\n\n Options for Armijo backtracking line search:\n 'closure' (callable): reevaluates model and returns function value\n 'current_loss' (tensor): objective value at current iterate (default: F(x_k))\n 'gtd' (tensor): inner product g_Ok'd in line search (default: g_Ok'd)\n 'eta' (tensor): factor for decreasing steplength > 0 (default: 2)\n 'c1' (tensor): sufficient decrease constant in (0, 1) (default: 1e-4)\n 'max_ls' (int): maximum number of line search steps permitted (default: 10)\n 'interpolate' (bool): flag for using interpolation (default: True)\n 'inplace' (bool): flag for inplace operations (default: True)\n 'ls_debug' (bool): debugging mode for line search\n\n Options for Wolfe line search:\n 'closure' (callable): reevaluates model and returns function value\n 'current_loss' (tensor): objective value at current iterate (default: F(x_k))\n 'gtd' (tensor): inner product g_Ok'd in line search (default: g_Ok'd)\n 'eta' (float): factor for extrapolation (default: 2)\n 'c1' (float): sufficient decrease constant in (0, 1) (default: 1e-4)\n 'c2' (float): curvature condition constant in (0, 1) (default: 0.9)\n 'max_ls' (int): maximum number of line search steps permitted (default: 10)\n 'interpolate' (bool): flag for using interpolation (default: True)\n 'inplace' (bool): flag for inplace operations (default: True)\n 'ls_debug' (bool): debugging mode for line search\n\n Outputs (depends on line search):\n . No line search:\n t (float): steplength\n . Armijo backtracking line search:\n F_new (tensor): loss function at new iterate\n t (tensor): final steplength\n ls_step (int): number of backtracks\n closure_eval (int): number of closure evaluations\n desc_dir (bool): descent direction flag\n True: p_k is descent direction with respect to the line search\n function\n False: p_k is not a descent direction with respect to the line\n search function\n fail (bool): failure flag\n True: line search reached maximum number of iterations, failed\n False: line search succeeded\n . Wolfe line search:\n F_new (tensor): loss function at new iterate\n g_new (tensor): gradient at new iterate\n t (float): final steplength\n ls_step (int): number of backtracks\n closure_eval (int): number of closure evaluations\n grad_eval (int): number of gradient evaluations\n desc_dir (bool): descent direction flag\n True: p_k is descent direction with respect to the line search\n function\n False: p_k is not a descent direction with respect to the line\n search function\n fail (bool): failure flag\n True: line search reached maximum number of iterations, failed\n False: line search succeeded\n\n Notes:\n . If encountering line search failure in the deterministic setting, one\n should try increasing the maximum number of line search steps max_ls.\n\n \"\"\"\n \n # load options for damping and eps\n if('damping' not in options.keys()):\n damping = False\n else:\n damping = options['damping']\n \n if('eps' not in options.keys()):\n eps = 1e-2\n else:\n eps = options['eps']\n \n # gather gradient\n grad = self._gather_flat_grad()\n \n # update curvature if after 1st iteration\n state = self.state['global_state']\n if(state['n_iter'] > 0):\n self.curvature_update(grad, eps, damping)\n\n # compute search direction\n p = self.two_loop_recursion(-grad)\n\n # take step\n return self._step(p, grad, options=options)" ]
[ [ "torch.cat", "numpy.argmin", "torch.isnan", "numpy.min", "numpy.max", "torch.mul", "numpy.linalg.matrix_rank", "torch.norm", "numpy.polyval", "torch.abs", "numpy.isreal", "torch.tensor", "numpy.arange", "numpy.sqrt", "numpy.append", "numpy.array", "numpy.zeros", "numpy.real", "matplotlib.pyplot.figure", "torch.isinf", "numpy.amin", "numpy.isinf", "numpy.isnan", "numpy.roots", "matplotlib.pyplot.plot", "numpy.linalg.solve", "numpy.maximum" ] ]
lygztq/Knowledge-Distillation-Wheel
[ "0b1891052fb8998bf78e6c8ca8c76db85759c203" ]
[ "model/KD_model.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport os, sys\nfrom data_utils.DataManager import DataManager\n\nclass KDModel(object):\n def __init__(self, dataset_name, teacher_model, students_model):\n \"\"\"\n The class of Knowledge Distillation Model\n\n dataset_name: The name of your dataset, candidates are 'CIFAR-10', 'CIFAR-100', 'MNIST' \n (You can add new dataset by modifying the DataManager class).\n\n teacher_model: The model function of your teacher model, signature should be \n teacherName(input_tensor, trainable, is_train, temp)\n - input_tensor: Input of your teacher model\n - trainable: If true, we will train your teacher model.\n - is_train: If true means that we are in training process, this will determine dropout.\n - temp: The temperature parameter.\n \n student_model: The model function of your student model, signature should be\n studentName(input_tensor, is_train), the meaning of parameters is same as above.\n \"\"\"\n self.data_manager = DataManager(dataset_name)\n self.dataset_name = dataset_name\n self.teacher_model = teacher_model\n self.student_model = students_model\n\n def _writeRecord(self, path, name, data):\n \"\"\"\n Write the record of some variables during some process into a file.\n :param path: The path store your record files.\n :param name: The name of file.\n :param data: A list contains data you want to store.\n \"\"\"\n file_path = os.path.join(path, name)\n with open(file_path, 'w') as f:\n for item in data:\n f.write(str(item)+'\\t')\n f.write('\\n')\n\n def TrainTeacher(self, model_name, **kwargs):\n \"\"\"\n Train your teacher model.\n model_name: the name of your model. You can use the hyper-parameters with dataset name to name your model.\n\n kwargs:\n - batch_size: Size of batch\n - model_save_path: The path saving your teacher model value\n - num_epoch: How many epochs in training process.\n - basic_learning_rate: The initial learning rate\n - record_save_path: The path saving training process record.\n - is_dev(dev_mode): Development mode(i.e. using small dataset)\n - learning_rate_decay: The decay rate of learning rate, here we use exp decay.\n - reg_scale: The l2 regularization strength.\n - verbose: Print some debug information.\n \"\"\"\n batch_size = kwargs.pop(\"batch_size\", 64)\n model_save_path = kwargs.pop('model_save_path', \"./checkpoints/teacher/\")\n num_epoch = kwargs.pop(\"num_epoch\", 10)\n basic_learning_rate = kwargs.pop(\"basic_learning_rate\", 5e-4)\n record_save_path = kwargs.pop(\"record_save_path\", \"./records/teacher\")\n is_dev = kwargs.pop(\"dev_mode\", False)\n learning_rate_decay = kwargs.pop(\"learning_rate_decay\", 0.01)\n reg_scale = kwargs.pop(\"reg_scale\", 1e-1)\n verbose = kwargs.pop(\"verbose\", False)\n\n # Do some check\n if not os.path.exists(model_save_path):\n os.makedirs(model_save_path)\n if not os.path.exists(record_save_path):\n os.makedirs(record_save_path)\n model_save_path = os.path.join(model_save_path, \"{}.ckpt\".format(model_name))\n \n tf.reset_default_graph()\n \n # Get training dataset\n if is_dev:\n train_data, train_label = self.data_manager.dev_data, self.data_manager.dev_label\n else:\n train_data, train_label = self.data_manager.train_data, self.data_manager.train_label\n \n num_train_data = train_data.shape[0]\n\n # The input of teacher model\n X = tf.placeholder(train_data.dtype, [None]+list(train_data.shape[1:]), name=\"input_data\")\n y = tf.placeholder(train_label.dtype, [None]+list(train_label.shape[1:]), name=\"input_label\")\n is_train = tf.placeholder(tf.bool, name=\"is_train\")\n \n dataset = tf.data.Dataset.from_tensor_slices((X, y))\n dataset = dataset.shuffle(buffer_size=8000)\n batched_dataset = dataset.batch(batch_size)\n\n iterator = batched_dataset.make_initializable_iterator()\n batch_data, batch_label = iterator.get_next()\n\n # Get the teacher model\n #logits, probs = self.teacher_models[self.dataset_name](batch_data, is_train=is_train, reg_scale=reg_scale)\n regularizer = tf.contrib.layers.l2_regularizer(scale=reg_scale)\n with tf.variable_scope('teacher_model', regularizer=regularizer):\n logits, probs = self.teacher_model(batch_data, is_train=is_train)\n result = tf.argmax(logits, axis=1)\n correct_num = tf.reduce_sum(tf.cast(tf.equal(result, tf.argmax(batch_label, axis=1)), tf.float32))\n saver = tf.train.Saver()\n\n # Training part\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=batch_label, name=\"cross_entropy_loss\"))\n reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, 'teacher_model'))\n loss += reg_loss\n global_step = tf.get_variable('global_step', initializer=0.0, trainable=False)\n learning_rate = tf.train.natural_exp_decay(\n basic_learning_rate, global_step,\n decay_rate=learning_rate_decay,\n name='learning_rate', decay_steps=1\n )\n \n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss)\n global_step_add = tf.assign_add(global_step, 1)\n\n train_acc_hist = []\n val_acc_hist = []\n train_loss_hist = []\n best_acc = 0.0\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(num_epoch):\n sess.run(iterator.initializer, feed_dict={X:train_data, y:train_label})\n cnt = 0\n total_correct_cnt = 0\n total_loss, acc = 0.0, 0.0\n while True:\n try:\n curr_loss, train, right_num, curr_result = sess.run(\n [loss, train_op, correct_num, result],\n feed_dict={is_train: True}\n )\n total_correct_cnt += right_num\n total_loss += curr_loss\n cnt += 1\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / num_train_data\n last_loss = total_loss / cnt \n if verbose:\n div = \"===========================\"\n print(\"{}\\nEpoch {}/{}\\t\\tloss: {}\\t\\tacc: {}\".format(div, i+1, num_epoch, last_loss, acc))\n train_acc_hist.append(acc)\n train_loss_hist.append(last_loss)\n sess.run([global_step_add])\n if verbose:\n last_global_step, last_learning_rate = sess.run([global_step, learning_rate])\n print(\"learning_rate: {}\".format(last_learning_rate))\n break\n \n # Validation\n sess.run(iterator.initializer, feed_dict={X:self.data_manager.val_data, y:self.data_manager.val_label})\n acc = 0.0\n total_correct_cnt = 0\n while True:\n try:\n right_num = sess.run([correct_num], feed_dict={is_train:False})\n total_correct_cnt += right_num[0]\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / self.data_manager.val_data.shape[0]\n if verbose:\n print(\"Validation acc: {}\".format(acc))\n val_acc_hist.append(acc)\n if acc > best_acc:\n best_acc = acc\n saver.save(sess, model_save_path)\n break\n # Write train process record\n self._writeRecord(record_save_path, \"{}_train_accuracy\".format(model_name), train_acc_hist)\n self._writeRecord(record_save_path, \"{}_validation_accuracy\".format(model_name), val_acc_hist)\n self._writeRecord(record_save_path, \"{}_train_loss\".format(model_name), train_loss_hist)\n if verbose:\n print(\"Finish Training Teacher Model! The Best Validation Accuracy is: {}\".format(best_acc))\n\n \n def TestTeacher(self, model_name, **kwargs):\n \"\"\"\n Test your teacher model.\n\n model_name: The name of your pretrained teacher model.\n kwargs:\n - batch_size: Size of batch\n - model_save_path: The path that you store your pretrained teacher model.\n - record_save_path: The path saving training process record.\n - verbose: Print some debug information.\n \"\"\"\n batch_size = kwargs.pop(\"batch_size\", 256)\n model_save_path = kwargs.pop(\"model_save_path\", \"./checkpoints/teacher/\")\n record_save_path = kwargs.pop(\"record_save_path\", \"./records/teacher\")\n verbose = kwargs.pop(\"verbose\", False)\n\n # Do some check\n if not os.path.exists(model_save_path):\n raise RuntimeError(\"No pretrained model exists in '{}'\".format(model_save_path))\n if not os.path.exists(record_save_path):\n os.makedirs(record_save_path)\n model_save_path = os.path.join(model_save_path, \"{}.ckpt\".format(model_name))\n\n tf.reset_default_graph()\n\n # Get dataset\n test_data, test_label = self.data_manager.test_data, self.data_manager.test_label\n num_test_data = test_data.shape[0]\n\n X = tf.placeholder(test_data.dtype, shape=[None]+list(test_data.shape[1:]), name=\"input_data\")\n y = tf.placeholder(test_label.dtype, shape=[None]+list(test_label.shape[1:]), name=\"input_label\")\n is_train = tf.placeholder(tf.bool, name=\"is_train\")\n\n dataset = tf.data.Dataset.from_tensor_slices((X, y))\n batched_dataset = dataset.batch(batch_size)\n \n iterator = batched_dataset.make_initializable_iterator()\n batch_data, batch_label = iterator.get_next()\n\n # Get the teacher model\n with tf.variable_scope('teacher_model'):\n logits, probs = self.teacher_model(batch_data, is_train=is_train)\n result = tf.argmax(logits, axis=1)\n correct_num = tf.reduce_sum(tf.cast(tf.equal(result, tf.argmax(batch_label, axis=1)), tf.float32))\n saver = tf.train.Saver()\n\n # Test process\n with tf.Session() as sess:\n sess.run(iterator.initializer, feed_dict={X:test_data, y:test_label})\n saver.restore(sess, model_save_path)\n total_correct_cnt = 0\n while True:\n try:\n right_num = sess.run([correct_num], feed_dict={is_train:False})\n total_correct_cnt += right_num[0]\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / num_test_data\n if verbose:\n print(\"Test accuracy: {}\".format(acc))\n break\n acc_hist = [acc]\n self._writeRecord(record_save_path, \"{}_test_accuracy\".format(model_name), acc_hist)\n\n\n def TrainStudent(self, model_name, teacher_model_name, **kwargs):\n \"\"\"\n Train your student model.\n \n model_name: The name of your student model.\n teacher_model_name: The name of your teacher model.\n \n kwargs:\n - batch_size: Size of batch\n - model_save_path: The path saving your teacher model value\n - teacher_model_path: The path that contains your pretrained teacher model.\n - temp: The temperature parameter.\n - num_epoch: How many epochs in training process.\n - basic_learning_rate: The initial learning rate\n - record_save_path: The path saving training process record.\n - is_dev(dev_mode): Development mode(i.e. using small dataset)\n - learning_rate_decay: The decay rate of learning rate, here we use exp decay.\n - reg_scale: The l2 regularization strength.\n - soft_target_scale: The mix weight of soft target denoted by lambda, loss = hard_target_loss + lambda * soft_target_loss \n - verbose: Print some debug information.\n \"\"\"\n batch_size = kwargs.pop(\"batch_size\", 64)\n model_save_path = kwargs.pop('model_save_path', \"./checkpoints/student/\")\n teacher_model_path = kwargs.pop(\"teacher_model_path\", \"./checkpoints/teacher/\")\n temp = kwargs.pop(\"temp\", 10)\n num_epoch = kwargs.pop(\"num_epoch\", 20)\n basic_learning_rate = kwargs.pop(\"basic_learning_rate\", 5e-4)\n record_save_path = kwargs.pop(\"record_save_path\", \"./records/student\")\n is_dev = kwargs.pop(\"dev_mode\", False)\n learning_rate_decay = kwargs.pop(\"learning_rate_decay\", 0.01)\n reg_scale = kwargs.pop(\"reg_scale\", 1e-1)\n soft_target_scale = kwargs.pop(\"soft_target_scale\", 1)\n verbose = kwargs.pop(\"verbose\", False)\n\n # Do some check\n if not os.path.exists(teacher_model_path):\n raise RuntimeError(\"Cannot find pretrained teacher model in '{}'\".format(teacher_model_path))\n if not os.path.exists(model_save_path):\n os.makedirs(model_save_path)\n if not os.path.exists(record_save_path):\n os.makedirs(record_save_path)\n\n model_save_path = os.path.join(model_save_path, \"{}.ckpt\".format(model_name))\n teacher_model_path = os.path.join(teacher_model_path, \"{}.ckpt\".format(teacher_model_name))\n\n tf.reset_default_graph()\n \n # Get training dataset\n if is_dev:\n train_data, train_label = self.data_manager.dev_data, self.data_manager.dev_label\n else:\n train_data, train_label = self.data_manager.train_data, self.data_manager.train_label\n \n num_train_data = train_data.shape[0]\n\n # The input of model\n X = tf.placeholder(train_data.dtype, [None]+list(train_data.shape[1:]), name=\"input_data\")\n y = tf.placeholder(train_label.dtype, [None]+list(train_label.shape[1:]), name=\"input_label\")\n is_train = tf.placeholder(tf.bool, name=\"is_train\")\n \n dataset = tf.data.Dataset.from_tensor_slices((X, y))\n dataset = dataset.shuffle(buffer_size=8000)\n batched_dataset = dataset.batch(batch_size)\n\n iterator = batched_dataset.make_initializable_iterator()\n batch_data, batch_label = iterator.get_next()\n\n # Get the teacher and student model\n regularizer = tf.contrib.layers.l2_regularizer(scale=reg_scale)\n with tf.variable_scope('student_model', regularizer=regularizer):\n logits, probs = self.student_model(batch_data, is_train=is_train)\n\n with tf.variable_scope('teacher_model'):\n teacher_logits, teacher_probs = self.teacher_model(batch_data, is_train=False, trainable=False, temp=temp)\n\n result = tf.argmax(logits, axis=1)\n correct_num = tf.reduce_sum(tf.cast(tf.equal(result, tf.argmax(batch_label, axis=1)), tf.float32))\n\n teacher_variabels = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"teacher_model\")\n student_variabels = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"student_model\")\n teacher_loader = tf.train.Saver(teacher_variabels)\n student_saver = tf.train.Saver(student_variabels)\n \n # Training part\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=batch_label, name=\"hard_loss\"))\n reg_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, 'teacher_model'))\n loss += reg_loss\n soft_target_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=teacher_probs, name=\"soft_loss\"))\n loss += soft_target_scale * soft_target_loss\n \n global_step = tf.get_variable('global_step', initializer=0.0, trainable=False)\n learning_rate = tf.train.natural_exp_decay(\n basic_learning_rate, global_step,\n decay_rate=learning_rate_decay,\n name='learning_rate', decay_steps=1\n )\n \n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(loss)\n global_step_add = tf.assign_add(global_step, 1)\n\n train_acc_hist = []\n val_acc_hist = []\n train_loss_hist = []\n best_acc = 0.0\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n teacher_loader.restore(sess, teacher_model_path)\n for i in range(num_epoch):\n sess.run(iterator.initializer, feed_dict={X:train_data, y:train_label})\n cnt = 0\n total_correct_cnt = 0\n total_loss, acc = 0.0, 0.0\n while True:\n try:\n curr_loss, train, right_num, curr_result = sess.run(\n [loss, train_op, correct_num, result],\n feed_dict={is_train: True}\n )\n total_correct_cnt += right_num\n total_loss += curr_loss\n cnt += 1\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / num_train_data\n last_loss = total_loss / cnt \n if verbose:\n div = \"===========================\"\n print(\"{}\\nEpoch {}/{}\\t\\tloss: {}\\t\\tacc: {}\".format(div, i+1, num_epoch, last_loss, acc))\n train_acc_hist.append(acc)\n train_loss_hist.append(last_loss)\n sess.run([global_step_add])\n if verbose:\n last_global_step, last_learning_rate = sess.run([global_step, learning_rate])\n print(\"learning_rate: {}\".format(last_learning_rate))\n break\n \n # Validation\n sess.run(iterator.initializer, feed_dict={X:self.data_manager.val_data, y:self.data_manager.val_label})\n acc = 0.0\n total_correct_cnt = 0\n while True:\n try:\n right_num = sess.run([correct_num], feed_dict={is_train:False})\n total_correct_cnt += right_num[0]\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / self.data_manager.val_data.shape[0]\n if verbose:\n print(\"Validation acc: {}\".format(acc))\n val_acc_hist.append(acc)\n if acc > best_acc:\n best_acc = acc\n student_saver.save(sess, model_save_path)\n break\n # Write train process record\n self._writeRecord(record_save_path, \"{}_train_accuracy\".format(model_name), train_acc_hist)\n self._writeRecord(record_save_path, \"{}_validation_accuracy\".format(model_name), val_acc_hist)\n self._writeRecord(record_save_path, \"{}_train_loss\".format(model_name), train_loss_hist)\n if verbose:\n print(\"Finish Training Student Model! The Best Validation Accuracy is: {}\".format(best_acc))\n\n def TestStudent(self, model_name, **kwargs):\n \"\"\"\n Test your student model.\n\n model_name: The name of your student model.\n kwargs:\n - batch_size: Size of batch\n - model_save_path: The path that you store your pretrained student model.\n - record_save_path: The path saving training process record.\n - verbose: Print some debug information.\n \"\"\"\n batch_size = kwargs.pop(\"batch_size\", 256)\n model_save_path = kwargs.pop(\"model_save_path\", \"./checkpoints/student/\")\n record_save_path = kwargs.pop(\"record_save_path\", \"./records/student\")\n verbose = kwargs.pop(\"verbose\", False)\n\n # Do some check\n if not os.path.exists(model_save_path):\n raise RuntimeError(\"No pretrained model exists in '{}'\".format(model_save_path))\n if not os.path.exists(record_save_path):\n os.makedirs(record_save_path)\n model_save_path = os.path.join(model_save_path, \"{}.ckpt\".format(model_name))\n\n tf.reset_default_graph()\n\n # Get dataset\n test_data, test_label = self.data_manager.test_data, self.data_manager.test_label\n num_test_data = test_data.shape[0]\n\n X = tf.placeholder(test_data.dtype, shape=[None]+list(test_data.shape[1:]), name=\"input_data\")\n y = tf.placeholder(test_label.dtype, shape=[None]+list(test_label.shape[1:]), name=\"input_label\")\n is_train = tf.placeholder(tf.bool, name=\"is_train\")\n\n dataset = tf.data.Dataset.from_tensor_slices((X, y))\n batched_dataset = dataset.batch(batch_size)\n \n iterator = batched_dataset.make_initializable_iterator()\n batch_data, batch_label = iterator.get_next()\n\n # Get the student model\n with tf.variable_scope('student_model'):\n logits, probs = self.student_model(batch_data, is_train=is_train)\n result = tf.argmax(logits, axis=1)\n correct_num = tf.reduce_sum(tf.cast(tf.equal(result, tf.argmax(batch_label, axis=1)), tf.float32))\n saver = tf.train.Saver()\n\n # Test process\n with tf.Session() as sess:\n sess.run(iterator.initializer, feed_dict={X:test_data, y:test_label})\n saver.restore(sess, model_save_path)\n total_correct_cnt = 0\n while True:\n try:\n right_num = sess.run([correct_num], feed_dict={is_train:False})\n total_correct_cnt += right_num[0]\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / num_test_data\n if verbose:\n print(\"Test accuracy: {}\".format(acc))\n break\n acc_hist = [acc]\n self._writeRecord(record_save_path, \"{}_test_accuracy\".format(model_name), acc_hist)\n\n\n\n\n \n \n\n\n\n\n\n \n \n \n \n\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.train.AdamOptimizer", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.argmax", "tensorflow.reset_default_graph", "tensorflow.train.Saver", "tensorflow.Session", "tensorflow.assign_add", "tensorflow.variable_scope", "tensorflow.placeholder", "tensorflow.get_variable", "tensorflow.train.natural_exp_decay", "tensorflow.global_variables_initializer", "tensorflow.contrib.layers.l2_regularizer", "tensorflow.get_collection" ] ]
ricklentz/deep-object-reid
[ "bf4d30d78e4a34847496d0efb50d98541f5274f9" ]
[ "torchreid/ops/non_local.py" ]
[ "# Copyright (C) 2020-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n#\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass NonLocalModule(nn.Module):\n def __init__(self, in_channels, embed_dim=None, embed_factor=4, spatial_sub_sample=False):\n super().__init__()\n\n assert embed_factor >= 1\n self.embed_dim = embed_dim if embed_dim is not None else in_channels // embed_factor\n\n self.theta = self._conv_1x1(in_channels, self.embed_dim)\n self.phi = nn.Sequential(\n nn.MaxPool2d(kernel_size=(2, 2)) if spatial_sub_sample else nn.Sequential(),\n self._conv_1x1(in_channels, self.embed_dim))\n self.g = nn.Sequential(\n nn.MaxPool2d(kernel_size=(2, 2)) if spatial_sub_sample else nn.Sequential(),\n self._conv_1x1(in_channels, self.embed_dim))\n self.W = nn.Sequential(\n self._conv_1x1(self.embed_dim, in_channels),\n nn.BatchNorm2d(in_channels)\n )\n\n self._init_params()\n\n @staticmethod\n def _conv_1x1(in_channels, out_channels):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0),\n nn.BatchNorm2d(out_channels)\n )\n\n def _init_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n theta = self.theta(x)\n phi = self.phi(x)\n g = self.g(x)\n\n theta = theta.view(theta.shape[:2] + (-1,))\n phi = phi.view(phi.shape[:2] + (-1,))\n g = g.view(g.shape[:2] + (-1,))\n\n theta_phi = torch.matmul(theta.transpose(1, 2), phi)\n attention = F.softmax(theta_phi, dim=2)\n\n y = torch.matmul(g, attention)\n y = y.view(y.shape[:2] + x.shape[2:])\n\n out = self.W(y) + x\n\n return out\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.Sequential", "torch.nn.init.constant_", "torch.nn.BatchNorm2d", "torch.nn.init.kaiming_normal_", "torch.nn.Conv2d", "torch.nn.functional.softmax", "torch.matmul" ] ]
riverhxz/wood_board
[ "043c1721ab0cc7d305c746b9843fcf348b565408" ]
[ "yolo3/utils.py" ]
[ "\"\"\"Miscellaneous utility functions.\"\"\"\n\nfrom functools import reduce\n\nfrom PIL import Image\nimport numpy as np\nfrom matplotlib.colors import rgb_to_hsv, hsv_to_rgb\n\nimport imgaug.augmenters as iaa\nfrom imgaug.augmenters import Sometimes\nimport numpy as np\nimport imgaug as ia\nimport imgaug.augmenters as iaa\nfrom imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage\n\n\ndef get_classes(classes_path):\n '''loads the classes'''\n with open(classes_path) as f:\n class_names = f.readlines()\n class_names = [c.strip() for c in class_names]\n return class_names\n\n\ndef get_anchors(anchors_path):\n '''loads the anchors from a file'''\n with open(anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n return np.array(anchors).reshape(-1, 2)\n\n\ndef compose(*funcs):\n \"\"\"Compose arbitrarily many functions, evaluated left to right.\n\n Reference: https://mathieularose.com/function-composition-in-python/\n \"\"\"\n # return lambda x: reduce(lambda v, f: f(v), funcs, x)\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError('Composition of empty sequence not supported.')\n\n\ndef letterbox_image(image, size):\n '''resize image with unchanged aspect ratio using padding'''\n iw, ih = image.size\n w, h = size\n scale = min(w / iw, h / ih)\n nw = int(iw * scale)\n nh = int(ih * scale)\n\n image = image.resize((nw, nh), Image.BICUBIC)\n new_image = Image.new('RGB', size, (128, 128, 128))\n new_image.paste(image, ((w - nw) // 2, (h - nh) // 2))\n return new_image\n\n\ndef rand(a=0, b=1):\n return np.random.rand() * (b - a) + a\n\n\ndef get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5,\n proc_img=True):\n '''random preprocessing for real-time data augmentation'''\n line = annotation_line.split()\n image = Image.open(line[0])\n iw, ih = image.size\n h, w = input_shape\n box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])\n\n if not random:\n # resize image\n scale = min(w / iw, h / ih)\n nw = int(iw * scale)\n nh = int(ih * scale)\n dx = (w - nw) // 2\n dy = (h - nh) // 2\n image_data = 0\n if proc_img:\n image = image.resize((nw, nh), Image.BICUBIC)\n new_image = Image.new('RGB', (w, h), (128, 128, 128))\n new_image.paste(image, (dx, dy))\n image_data = np.array(new_image) / 255.\n\n # correct boxes\n box_data = np.zeros((max_boxes, 5))\n if len(box) > 0:\n np.random.shuffle(box)\n if len(box) > max_boxes: box = box[:max_boxes]\n box[:, [0, 2]] = box[:, [0, 2]] * scale + dx\n box[:, [1, 3]] = box[:, [1, 3]] * scale + dy\n box_data[:len(box)] = box\n\n return image_data, box_data\n\n # resize image\n new_ar = w / h * rand(1 - jitter, 1 + jitter) / rand(1 - jitter, 1 + jitter)\n scale = rand(.25, 2)\n if new_ar < 1:\n nh = int(scale * h)\n nw = int(nh * new_ar)\n else:\n nw = int(scale * w)\n nh = int(nw / new_ar)\n image = image.resize((nw, nh), Image.BICUBIC)\n\n # place image\n dx = int(rand(0, w - nw))\n dy = int(rand(0, h - nh))\n new_image = Image.new('RGB', (w, h), (128, 128, 128))\n new_image.paste(image, (dx, dy))\n image = new_image\n\n # flip image or not\n flip = rand() < .5\n if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)\n\n # distort image\n hue = rand(-hue, hue)\n sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)\n val = rand(1, val) if rand() < .5 else 1 / rand(1, val)\n x = rgb_to_hsv(np.array(image) / 255.)\n x[..., 0] += hue\n x[..., 0][x[..., 0] > 1] -= 1\n x[..., 0][x[..., 0] < 0] += 1\n x[..., 1] *= sat\n x[..., 2] *= val\n x[x > 1] = 1\n x[x < 0] = 0\n image_data = hsv_to_rgb(x) # numpy array, 0 to 1\n\n # correct boxes\n box_data = np.zeros((max_boxes, 5))\n if len(box) > 0:\n np.random.shuffle(box)\n box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx\n box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy\n if flip: box[:, [0, 2]] = w - box[:, [2, 0]]\n box[:, 0:2][box[:, 0:2] < 0] = 0\n box[:, 2][box[:, 2] > w] = w\n box[:, 3][box[:, 3] > h] = h\n box_w = box[:, 2] - box[:, 0]\n box_h = box[:, 3] - box[:, 1]\n box = box[np.logical_and(box_w > 1, box_h > 1)] # discard invalid box\n if len(box) > max_boxes: box = box[:max_boxes]\n box_data[:len(box)] = box\n\n return image_data, box_data\n\n\n# Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,\n# e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.\nsometimes = lambda aug: iaa.Sometimes(0.8, aug)\n\nseq = iaa.Sequential(\n [\n # apply the following augmenters to most images\n iaa.Fliplr(0.5), # horizontally flip 50% of all images\n iaa.Flipud(0.2), # vertically flip 20% of all images\n # crop images by -5% to 10% of their height/width\n sometimes(iaa.CropAndPad(\n percent=(-0.05, 0.1),\n pad_mode=ia.ALL,\n pad_cval=(0, 255)\n )),\n sometimes(iaa.Affine(\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis\n # translate_percent={\"x\": (-0.2, 0.2), \"y\": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)\n rotate=(-45, 45), # rotate by -45 to +45 degrees\n shear=(-16, 16), # shear by -16 to +16 degrees\n order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)\n cval=(0, 255), # if mode is constant, use a cval between 0 and 255\n mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)\n )),\n # execute 0 to 5 of the following (less important) augmenters per image\n # don't execute all of them, as that would often be way too strong\n iaa.SomeOf((0, 5),\n [\n # sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation\n iaa.OneOf([\n iaa.GaussianBlur((0, 1.0)), # blur images with a sigma between 0 and 3.0\n # iaa.AverageBlur(k=(2, 4)), # blur image using local means with kernel sizes between 2 and 7\n # iaa.MedianBlur(k=(2, 4)), # blur image using local medians with kernel sizes between 2 and 7\n ]),\n iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images\n iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images\n # # search either for all edges or for directed edges,\n # # blend the result with the original image using a blobby mask\n iaa.SimplexNoiseAlpha(iaa.OneOf([\n iaa.EdgeDetect(alpha=(0.5, 1.0)),\n iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),\n ])),\n # iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # add gaussian noise to images\n iaa.OneOf([\n iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels\n iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),\n ]),\n iaa.Invert(0.05, per_channel=True), # invert color channels\n iaa.Add((-10, 10), per_channel=0.5), # change brightness of images (by -10 to 10 of original value)\n iaa.AddToHueAndSaturation((-20, 20)), # change hue and saturation\n # # either change the brightness of the whole image (sometimes\n # # per channel) or change the brightness of subareas\n iaa.OneOf([\n iaa.Multiply((0.5, 1.5), per_channel=0.5),\n iaa.FrequencyNoiseAlpha(\n exponent=(-4, 0),\n first=iaa.Multiply((0.5, 1.5), per_channel=True),\n second=iaa.ContrastNormalization((0.5, 2.0))\n )\n ]),\n iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast\n iaa.Grayscale(alpha=(0.0, 1.0)),\n # sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)\n # sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))), # sometimes move parts of the image around\n sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1))),\n sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.03), nb_rows=4, nb_cols=4)),\n sometimes(iaa.ElasticTransformation(sigma=5.0)),\n\n ],\n random_order=True\n )\n ],\n random_order=True\n)\n\n\ndef get_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5,\n proc_img=True):\n line = annotation_line.split()\n image = Image.open(line[0])\n iw, ih = image.size\n h, w = input_shape\n box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])\n return image, box\n\n\ndef get_random_data_1(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5,\n proc_img=True):\n '''random preprocessing for real-time data augmentation'''\n line = annotation_line.split()\n image = Image.open(line[0])\n iw, ih = image.size\n h, w = input_shape\n box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])\n\n if not random:\n # resize image\n scale = min(w / iw, h / ih)\n nw = int(iw * scale)\n nh = int(ih * scale)\n dx = (w - nw) // 2\n dy = (h - nh) // 2\n image_data = 0\n if proc_img:\n image = image.resize((nw, nh), Image.BICUBIC)\n new_image = Image.new('RGB', (w, h), (128, 128, 128))\n new_image.paste(image, (dx, dy))\n image_data = np.array(new_image) / 255.\n\n # correct boxes\n box_data = np.zeros((max_boxes, 5))\n if len(box) > 0:\n np.random.shuffle(box)\n if len(box) > max_boxes: box = box[:max_boxes]\n box[:, [0, 2]] = box[:, [0, 2]] * scale + dx\n box[:, [1, 3]] = box[:, [1, 3]] * scale + dy\n box_data[:len(box)] = box\n\n return image_data, box_data\n\n img_data = np.array(image)\n bbs = BoundingBoxesOnImage([\n BoundingBox(x1, y1, x2, y2, label) for x1, y1, x2, y2, label in box\n ], shape=img_data.shape\n )\n image = np.array(image)\n image_rescaled = ia.imresize_single_image(image, input_shape)\n bbs_rescaled = bbs.on(image_rescaled)\n\n shaped_img, shaped_bbx = seq(image=image_rescaled, bounding_boxes=bbs_rescaled)\n # boxed = shaped_bbx.draw_on_image(shaped_img)\n shaped_bbx = [[x.x1, x.y1, x.x2, x.y2, x.label] for x in bbs_rescaled.bounding_boxes]\n box = np.array(shaped_bbx)\n box_data = np.zeros((max_boxes, 5))\n if len(box) > 0:\n np.random.shuffle(box)\n if len(box) > max_boxes: box = box[:max_boxes]\n box[:, [0, 2]] = box[:, [0, 2]]\n box[:, [1, 3]] = box[:, [1, 3]]\n box_data[:len(box)] = box\n return shaped_img, box_data\n" ]
[ [ "numpy.array", "numpy.random.rand", "numpy.zeros", "matplotlib.colors.hsv_to_rgb", "numpy.random.shuffle", "numpy.logical_and" ] ]
sebi06/napari_zeiss
[ "95386f259eafb2ae6a98838dcfd4abe9f95f2366" ]
[ "tools/napari_tools.py" ]
[ "# -*- coding: utf-8 -*-\n\n#################################################################\n# File : napari_tools.py\n# Version : 0.0.3\n# Author : czsrh\n# Date : 09.06.2021\n# Institution : Carl Zeiss Microscopy GmbH\n#\n# Disclaimer: This tool is purely experimental. Feel free to\n# use it at your own risk.\n#\n# Copyright (c) 2021 Carl Zeiss AG, Germany. All Rights Reserved.\n#################################################################\n\ntry:\n import napari\nexcept ModuleNotFoundError as error:\n print(error.__class__.__name__ + \": \" + error.msg)\n\nfrom PyQt5.QtWidgets import (\n\n QHBoxLayout,\n QVBoxLayout,\n QFileSystemModel,\n QFileDialog,\n QTreeView,\n QDialogButtonBox,\n QWidget,\n QTableWidget,\n QTableWidgetItem,\n QCheckBox,\n QAbstractItemView,\n QComboBox,\n QPushButton,\n QLineEdit,\n QLabel,\n QGridLayout\n\n)\n\nfrom PyQt5.QtCore import Qt, QDir, QSortFilterProxyModel\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QFont\n\nimport tools.czifile_tools as czt\n#import tools.imgfile_tools as imf\nimport zarr\nimport dask\nimport dask.array as da\nimport numpy as np\n\n\nclass TableWidget(QWidget):\n\n def __init__(self):\n\n super(QWidget, self).__init__()\n\n self.layout = QVBoxLayout(self)\n self.mdtable = QTableWidget()\n self.layout.addWidget(self.mdtable)\n self.mdtable.setShowGrid(True)\n self.mdtable.setHorizontalHeaderLabels(['Parameter', 'Value'])\n header = self.mdtable.horizontalHeader()\n header.setDefaultAlignment(Qt.AlignLeft)\n\n def update_metadata(self, metadata):\n\n # number of rows is set to number of metadata entries\n row_count = len(metadata)\n col_count = 2\n self.mdtable.setColumnCount(col_count)\n self.mdtable.setRowCount(row_count)\n\n row = 0\n\n # update the table with the entries from metadata dictionary\n for key, value in metadata.items():\n newkey = QTableWidgetItem(key)\n self.mdtable.setItem(row, 0, newkey)\n newvalue = QTableWidgetItem(str(value))\n self.mdtable.setItem(row, 1, newvalue)\n row += 1\n\n # fit columns to content\n self.mdtable.resizeColumnsToContents()\n\n def update_style(self):\n\n # define font size and type\n fnt = QFont()\n fnt.setPointSize(11)\n fnt.setBold(True)\n fnt.setFamily('Arial')\n\n # update both header items\n item1 = QtWidgets.QTableWidgetItem('Parameter')\n item1.setForeground(QtGui.QColor(25, 25, 25))\n item1.setFont(fnt)\n self.mdtable.setHorizontalHeaderItem(0, item1)\n\n item2 = QtWidgets.QTableWidgetItem('Value')\n item2.setForeground(QtGui.QColor(25, 25, 25))\n item2.setFont(fnt)\n self.mdtable.setHorizontalHeaderItem(1, item2)\n\n\ndef show_napari(viewer, array, metadata,\n blending='additive',\n adjust_contrast=True,\n gamma=0.85,\n add_mdtable=True,\n rename_sliders=False):\n \"\"\"Show the multidimensional array using the Napari viewer\n\n :param viewer: Instnave of the napari viewer\n :type array: NapariViewer\n :param array: multidimensional NumPy.Array containing the pixeldata\n :type array: NumPy.Array\n :param metadata: dictionary with CZI or OME-TIFF metadata\n :type metadata: dict\n :param blending: NapariViewer option for blending, defaults to 'additive'\n :type blending: str, optional\n :param gamma: NapariViewer value for Gamma, defaults to 0.85\n :type gamma: float, optional\n :param rename_sliders: name slider with correct labels output, defaults to False\n :type verbose: bool, optional\n \"\"\"\n\n # create list for the napari layers\n napari_layers = []\n\n # create scalefcator with all ones\n scalefactors = [1.0] * len(array.shape)\n\n # use the dimension string from AICSImageIO 6D\n #dimpos = czt.get_dimpositions(metadata['Axes_aics'])\n dimpos = czt.get_dimpositions(metadata['czi_dims'])\n\n # get the scalefactors from the metadata\n scalef = czt.get_scalefactor(metadata)\n\n # modify the tuple for the scales for napari\n scalefactors[dimpos['Z']] = scalef['zx']\n\n # remove C dimension from scalefactor\n scalefactors_ch = scalefactors.copy()\n del scalefactors_ch[dimpos['C']]\n\n # add widget for metadata\n if add_mdtable:\n\n # create widget for the metadata\n mdbrowser = TableWidget()\n\n viewer.window.add_dock_widget(mdbrowser,\n name='mdbrowser',\n area='right')\n\n # add the metadata and adapt the table display\n mdbrowser.update_metadata(metadata)\n mdbrowser.update_style()\n\n # add all channels as layers\n for ch in range(metadata['SizeC']):\n\n try:\n # get the channel name\n chname = metadata['Channels'][ch]\n except KeyError as e:\n print(e)\n # or use CH1 etc. as string for the name\n chname = 'CH' + str(ch + 1)\n\n # cut out channel\n channel = slicedim(array, ch, dimpos['C'])\n\n # actually show the image array\n print('Adding Channel :', chname)\n print('Shape Channel :', ch, channel.shape)\n print('Scaling Factors :', scalefactors_ch)\n\n if adjust_contrast:\n sc = calc_scaling(channel, corr_max=1.15)\n print('Display Scaling', sc)\n\n # add channel to napari viewer\n new_layer = viewer.add_image(channel,\n name=chname,\n scale=scalefactors_ch,\n contrast_limits=sc,\n blending=blending,\n gamma=gamma)\n\n if not adjust_contrast:\n # add channel to napari viewer\n new_layer = viewer.add_image(channel,\n name=chname,\n scale=scalefactors_ch,\n blending=blending,\n gamma=gamma)\n\n napari_layers.append(new_layer)\n\n if rename_sliders:\n\n print('Renaming the Sliders based on the Dimension String ....')\n\n # get the label of the sliders (as a tuple) ad rename it\n #viewer.dims.axis_labels = napari_rename_sliders(viewer.dims.axis_labels, metadata['Axes_aics'])\n viewer.dims.axis_labels = napari_rename_sliders(viewer.dims.axis_labels, metadata['czi_dims'])\n\n return napari_layers\n\n\ndef napari_rename_sliders(sliders, axes_aics):\n \"\"\"Rename the sliders of the Napari viewer accoring to the dimensions.\n\n :param sliders: Tupe containing the slider label\n :type sliders: tuple\n :param axes_aics: Dimension string using AICSImageIO\n :type axes_aics: str\n :return: Tuple with new slider labels\n :rtype: tuple\n \"\"\"\n\n # get the positions of dimension entries after removing C dimension\n dimpos_viewer = czt.get_dimpositions(axes_aics)\n\n # update the labels with the correct dimension strings\n slidernames = ['B', 'H', 'V', 'M', 'S', 'T', 'Z']\n\n # convert to list()\n tmp_sliders = list(sliders)\n\n for s in slidernames:\n try:\n if dimpos_viewer[s] >= 0:\n\n # assign the dimension labels\n tmp_sliders[dimpos_viewer[s]] = s\n\n # convert back to tuple\n sliders = tuple(tmp_sliders)\n except KeyError:\n print('No', s, 'Dimension found')\n\n return sliders\n\n\ndef slicedim(array, dimindex, posdim):\n \"\"\"slice out a specific channel without (!) dropping the dimension\n # of the array to conserve the dimorder string\n # this should work for Numpy.Array, Dask and ZARR ...\n\n :param array: The array to be sliced\n :type array: Numpy.Array, dask.Array, zarr.Array\n :param dimindex: index to be sliced out at a given dimension\n :type dimindex: int\n :param posdim: index of the dimension where the slicing should take place\n :type posdim: int\n :return: sliced array\n :rtype: Numpy.Array, dask.array, zarr.array\n \"\"\"\n\n if posdim == 0:\n array_sliced = array[dimindex:dimindex + 1, ...]\n if posdim == 1:\n array_sliced = array[:, dimindex:dimindex + 1, ...]\n if posdim == 2:\n array_sliced = array[:, :, dimindex:dimindex + 1, ...]\n if posdim == 3:\n array_sliced = array[:, :, :, dimindex:dimindex + 1, ...]\n if posdim == 4:\n array_sliced = array[:, :, :, :, dimindex:dimindex + 1, ...]\n if posdim == 5:\n array_sliced = array[:, :, :, :, :, dimindex:dimindex + 1, ...]\n\n \"\"\"\n # old way to it differently\n\n if isinstance(array, da.Array):\n print('Extract Channel as Dask.Array')\n channel = slicedimC(array, ch, dimpos['C'])\n # channel = array.compute().take(ch, axis=dimpos['C'])\n if isinstance(array, zarr.Array):\n print('Extract Channel as Dask.Array')\n channel = slicedimC(array, ch, dimpos['C'])\n if isinstance(array, np.ndarray):\n # use normal numpy if not\n print('Extract Channel as NumPy.Array')\n channel = array.take(ch, axis=dimpos['C'])\n \"\"\"\n\n return array_sliced\n\n\ndef calc_scaling(data, corr_min=1.0,\n offset_min=0,\n corr_max=0.85,\n offset_max=0):\n \"\"\"Calculate the scaling for better display\n\n :param data: Calculate min / max scaling\n :type data: Numpy.Array\n :param corr_min: correction factor for minvalue, defaults to 1.0\n :type corr_min: float, optional\n :param offset_min: offset for min value, defaults to 0\n :type offset_min: int, optional\n :param corr_max: correction factor for max value, defaults to 0.85\n :type corr_max: float, optional\n :param offset_max: offset for max value, defaults to 0\n :type offset_max: int, optional\n :return: list with [minvalue, maxvalue]\n :rtype: list\n \"\"\"\n\n if isinstance(data, zarr.Array):\n minvalue = np.min(data)\n maxvalue = np.max(data)\n\n elif isinstance(data, da.Array):\n minvalue = data.compute().min()\n maxvalue = data.compute().max()\n\n else: # get min-max values for initial scaling\n minvalue = data.min()\n maxvalue = data.max()\n\n minvalue = np.round((minvalue + offset_min) * corr_min)\n maxvalue = np.round((maxvalue + offset_max) * corr_max)\n\n print('Scaling:', minvalue, maxvalue)\n\n return [minvalue, maxvalue]\n" ]
[ [ "numpy.round", "numpy.max", "numpy.min" ] ]
AIasd/leaderboard
[ "8f2152b364f530f06b2a0e7905e601d25f682f52" ]
[ "team_code/base_agent.py" ]
[ "import time\n\nimport cv2\nimport carla\n\nfrom leaderboard.autoagents import autonomous_agent\nfrom team_code.planner import RoutePlanner\n\nfrom srunner.scenariomanager.carla_data_provider import CarlaDataProvider\nimport numpy as np\nfrom leaderboard.utils.route_manipulation import interpolate_trajectory\n\nfrom carla_specific_utils.carla_specific import get_angle, norm_2d, get_bbox, angle_from_center_view_fov\nfrom carla_specific_utils.carla_specific_tools import visualize_route\nimport os\nimport math\nimport pathlib\n\nclass BaseAgent(autonomous_agent.AutonomousAgent):\n def setup(self, path_to_conf_file):\n self.track = autonomous_agent.Track.SENSORS\n self.config_path = path_to_conf_file\n self.step = -1\n self.record_every_n_step = 2000\n self.wall_start = time.time()\n self.initialized = False\n\n parent_folder = os.environ['SAVE_FOLDER']\n string = pathlib.Path(os.environ['ROUTES']).stem\n self.save_path = pathlib.Path(parent_folder) / string\n\n def _init(self):\n self._command_planner = RoutePlanner(7.5, 25.0, 257)\n self._command_planner.set_route(self._global_plan, True)\n self.initialized = True\n\n\n self._vehicle = CarlaDataProvider.get_hero_actor()\n self._world = self._vehicle.get_world()\n self._map = CarlaDataProvider.get_map()\n\n self.min_d = 10000\n self.offroad_d = 10000\n self.wronglane_d = 10000\n self.dev_dist = 0\n self.d_angle_norm = 1\n\n\n\n\n\n\n def _get_position(self, tick_data):\n gps = tick_data['gps']\n gps = (gps - self._command_planner.mean) * self._command_planner.scale\n\n return gps\n\n def sensors(self):\n return [\n {\n 'type': 'sensor.camera.rgb',\n 'x': 1.3, 'y': 0.0, 'z': 1.3,\n 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,\n 'width': 256, 'height': 144, 'fov': 90,\n 'id': 'rgb'\n },\n {\n 'type': 'sensor.camera.rgb',\n 'x': 1.2, 'y': -0.25, 'z': 1.3,\n 'roll': 0.0, 'pitch': 0.0, 'yaw': -45.0,\n 'width': 256, 'height': 144, 'fov': 90,\n 'id': 'rgb_left'\n },\n {\n 'type': 'sensor.camera.rgb',\n 'x': 1.2, 'y': 0.25, 'z': 1.3,\n 'roll': 0.0, 'pitch': 0.0, 'yaw': 45.0,\n 'width': 256, 'height': 144, 'fov': 90,\n 'id': 'rgb_right'\n },\n {\n 'type': 'sensor.other.imu',\n 'x': 0.0, 'y': 0.0, 'z': 0.0,\n 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,\n 'sensor_tick': 0.05,\n 'id': 'imu'\n },\n {\n 'type': 'sensor.other.gnss',\n 'x': 0.0, 'y': 0.0, 'z': 0.0,\n 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,\n 'sensor_tick': 0.01,\n 'id': 'gps'\n },\n {\n 'type': 'sensor.speedometer',\n 'reading_frequency': 20,\n 'id': 'speed'\n },\n # addition\n {\n 'type': 'sensor.camera.semantic_segmentation',\n 'x': 0.0, 'y': 0.0, 'z': 100.0,\n 'roll': 0.0, 'pitch': -90.0, 'yaw': 0.0,\n 'width': 512, 'height': 512, 'fov': 5 * 10.0,\n 'id': 'map'\n },\n {\n 'type': 'sensor.camera.rgb',\n 'x': -6, 'y': 0.0, 'z': 3,\n 'roll': 0.0, 'pitch': -20.0, 'yaw': 0.0,\n 'width': 256, 'height': 144, 'fov': 90,\n 'id': 'rgb_with_car'\n },\n {\n 'type': 'sensor.other.radar',\n 'x': 2, 'y': 0.0, 'z': 1,\n 'roll': 0.0, 'pitch': 5.0, 'yaw': 0.0,\n 'horizontal_fov': 35, 'vertical_fov': 20, 'range': 20,\n 'id': 'radar_central'\n }\n ]\n\n def tick(self, input_data):\n self.step += 1\n\n rgb = cv2.cvtColor(input_data['rgb'][1][:, :, :3], cv2.COLOR_BGR2RGB)\n rgb_left = cv2.cvtColor(input_data['rgb_left'][1][:, :, :3], cv2.COLOR_BGR2RGB)\n rgb_right = cv2.cvtColor(input_data['rgb_right'][1][:, :, :3], cv2.COLOR_BGR2RGB)\n gps = input_data['gps'][1][:2]\n speed = input_data['speed'][1]['speed']\n compass = input_data['imu'][1][-1]\n\n return {\n 'rgb': rgb,\n 'rgb_left': rgb_left,\n 'rgb_right': rgb_right,\n 'gps': gps,\n 'speed': speed,\n 'compass': compass\n }\n\n\n\n # def set_trajectory(self, trajectory):\n # self.trajectory = trajectory\n\n def set_args(self, args):\n self.deviations_path = os.path.join(args.deviations_folder, 'deviations.txt')\n self.args = args\n # print('\\n'*10, 'self.args.record_every_n_step', self.args.record_every_n_step, '\\n'*10)\n self.record_every_n_step = self.args.record_every_n_step\n\n\n def record_other_actor_info_for_causal_analysis(self, ego_control_and_speed_info):\n def get_loc_and_ori(agent):\n agent_tra = agent.get_transform()\n agent_loc = agent_tra.location\n agent_rot = agent_tra.rotation\n return agent_loc.x, agent_loc.y, agent_rot.yaw\n\n data_row = []\n if ego_control_and_speed_info:\n data_row += ego_control_and_speed_info\n\n x, y, yaw = get_loc_and_ori(self._vehicle)\n data_row += [x, y, yaw]\n\n other_actor_info_path = os.path.join(self.args.deviations_folder, 'other_actor_info.txt')\n\n actors = self._world.get_actors()\n vehicle_list = actors.filter('*vehicle*')\n pedestrian_list = actors.filter('*walker*')\n\n\n\n\n for i, pedestrian in enumerate(pedestrian_list):\n d_angle_norm = angle_from_center_view_fov(pedestrian, self._vehicle, fov=90)\n if d_angle_norm == 0:\n within_view = True\n else:\n within_view = False\n\n x, y, yaw = get_loc_and_ori(pedestrian)\n data_row.extend([x, y, yaw, within_view])\n\n for i, vehicle in enumerate(vehicle_list):\n if vehicle.id == self._vehicle.id:\n continue\n\n d_angle_norm = angle_from_center_view_fov(vehicle, self._vehicle, fov=90)\n if d_angle_norm == 0:\n within_view = True\n else:\n within_view = False\n\n x, y, yaw = get_loc_and_ori(vehicle)\n data_row.extend([x, y, yaw, within_view])\n\n with open(other_actor_info_path, 'a') as f_out:\n f_out.write(','.join([str(d) for d in data_row])+'\\n')\n\n\n\n def gather_info(self, ego_control_and_speed_info=None):\n # if self.step % 1 == 0:\n # self.record_other_actor_info_for_causal_analysis(ego_control_and_speed_info)\n\n\n ego_bbox = get_bbox(self._vehicle)\n ego_front_bbox = ego_bbox[:2]\n\n\n actors = self._world.get_actors()\n vehicle_list = actors.filter('*vehicle*')\n pedestrian_list = actors.filter('*walker*')\n\n min_d = 10000\n d_angle_norm = 1\n for i, vehicle in enumerate(vehicle_list):\n if vehicle.id == self._vehicle.id:\n continue\n\n d_angle_norm_i = angle_from_center_view_fov(vehicle, self._vehicle, fov=90)\n d_angle_norm = np.min([d_angle_norm, d_angle_norm_i])\n if d_angle_norm_i == 0:\n other_bbox = get_bbox(vehicle)\n for other_b in other_bbox:\n for ego_b in ego_bbox:\n d = norm_2d(other_b, ego_b)\n # print('vehicle', i, 'd', d)\n min_d = np.min([min_d, d])\n\n\n for i, pedestrian in enumerate(pedestrian_list):\n d_angle_norm_i = angle_from_center_view_fov(pedestrian, self._vehicle, fov=90)\n d_angle_norm = np.min([d_angle_norm, d_angle_norm_i])\n if d_angle_norm_i == 0:\n pedestrian_location = pedestrian.get_transform().location\n for ego_b in ego_front_bbox:\n d = norm_2d(pedestrian_location, ego_b)\n # print('pedestrian', i, 'd', d)\n min_d = np.min([min_d, d])\n\n\n if min_d < self.min_d:\n self.min_d = min_d\n with open(self.deviations_path, 'a') as f_out:\n f_out.write('min_d,'+str(self.min_d)+'\\n')\n\n\n if d_angle_norm < self.d_angle_norm:\n self.d_angle_norm = d_angle_norm\n with open(self.deviations_path, 'a') as f_out:\n f_out.write('d_angle_norm,'+str(self.d_angle_norm)+'\\n')\n\n\n\n angle_th = 120\n\n current_location = CarlaDataProvider.get_location(self._vehicle)\n current_transform = CarlaDataProvider.get_transform(self._vehicle)\n current_waypoint = self._map.get_waypoint(current_location, project_to_road=False, lane_type=carla.LaneType.Any)\n ego_forward = current_transform.get_forward_vector()\n ego_forward = np.array([ego_forward.x, ego_forward.y])\n ego_forward /= np.linalg.norm(ego_forward)\n ego_right = current_transform.get_right_vector()\n ego_right = np.array([ego_right.x, ego_right.y])\n ego_right /= np.linalg.norm(ego_right)\n\n\n lane_center_waypoint = self._map.get_waypoint(current_location, lane_type=carla.LaneType.Any)\n lane_center_transform = lane_center_waypoint.transform\n lane_center_location = lane_center_transform.location\n lane_forward = lane_center_transform.get_forward_vector()\n lane_forward = np.array([lane_forward.x, lane_forward.y])\n lane_forward /= np.linalg.norm(lane_forward)\n lane_right = current_transform.get_right_vector()\n lane_right = np.array([lane_right.x, lane_right.y])\n lane_right /= np.linalg.norm(lane_right)\n\n\n\n dev_dist = current_location.distance(lane_center_location)\n # normalized to [0, 1]. 0 - same direction, 1 - opposite direction\n\n # print('ego_forward, lane_forward, np.dot(ego_forward, lane_forward)', ego_forward, lane_forward, np.dot(ego_forward, lane_forward))\n dev_angle = math.acos(np.clip(np.dot(ego_forward, lane_forward), -1, 1)) / np.pi\n # smoothing and integrate\n dev_dist *= (dev_angle + 0.5)\n\n if dev_dist > self.dev_dist:\n self.dev_dist = dev_dist\n with open(self.deviations_path, 'a') as f_out:\n f_out.write('dev_dist,'+str(self.dev_dist)+'\\n')\n\n\n\n # print(current_location, current_waypoint.lane_type, current_waypoint.is_junction)\n # print(lane_center_location, lane_center_waypoint.lane_type, lane_center_waypoint.is_junction)\n\n def get_d(coeff, dir, dir_label):\n\n n = 1\n while n*coeff < 7:\n new_loc = carla.Location(current_location.x + n*coeff*dir[0], current_location.y + n*coeff*dir[1], 0)\n # print(coeff, dir, dir_label)\n # print(dir_label, 'current_location, dir, new_loc', current_location, dir, new_loc)\n new_wp = self._map.get_waypoint(new_loc,project_to_road=False, lane_type=carla.LaneType.Any)\n\n if not (new_wp and new_wp.lane_type in [carla.LaneType.Driving, carla.LaneType.Parking, carla.LaneType.Bidirectional] and np.abs(new_wp.transform.rotation.yaw%360 - lane_center_waypoint.transform.rotation.yaw%360) < angle_th):\n # if new_wp and new_wp.lane_type in [carla.LaneType.Driving, carla.LaneType.Parking, carla.LaneType.Bidirectional]:\n # print('new_wp.transform.rotation.yaw, lane_center_waypoint.transform.rotation.yaw', new_wp.transform.rotation.yaw, lane_center_waypoint.transform.rotation.yaw)\n break\n else:\n n += 1\n # if new_wp:\n # print(n, new_wp.transform.rotation.yaw)\n\n d = new_loc.distance(current_location)\n # print(d, new_loc, current_location)\n\n\n with open(self.deviations_path, 'a') as f_out:\n if new_wp and new_wp.lane_type in [carla.LaneType.Driving, carla.LaneType.Parking, carla.LaneType.Bidirectional]:\n # print(dir_label, 'wronglane_d', d)\n if d < self.wronglane_d:\n self.wronglane_d = d\n f_out.write('wronglane_d,'+str(self.wronglane_d)+'\\n')\n # print(dir_label, 'current_location, dir, new_loc', current_location, dir, new_loc, 'wronglane_d,'+str(self.wronglane_d)+'\\n')\n else:\n # if not new_wp:\n # s = 'None wp'\n # else:\n # s = new_wp.lane_type\n # print(dir_label, 'offroad_d', d, s, coeff)\n # if new_wp:\n # print(dir_label, 'lanetype', new_wp.lane_type)\n if d < self.offroad_d:\n self.offroad_d = d\n f_out.write('offroad_d,'+str(self.offroad_d)+'\\n')\n # print(dir_label, 'current_location, dir, new_loc', current_location, dir, new_loc, 'offroad_d,'+str(self.offroad_d)+'\\n')\n\n\n\n\n if current_waypoint and not current_waypoint.is_junction:\n get_d(-0.1, lane_right, 'left')\n get_d(0.1, lane_right, 'right')\n get_d(-0.1, ego_right, 'ego_left')\n get_d(0.1, ego_right, 'ego_right')\n get_d(0.1, ego_forward, 'ego_forward')\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.dot", "numpy.min", "numpy.abs" ] ]
opoplawski/scipy
[ "582d59caabb4a2a6fcdd06b512dcd14daf7ca6b2" ]
[ "scipy/weave/accelerate_tools.py" ]
[ "#**************************************************************************#\n#* FILE ************** accelerate_tools.py ************************#\n#**************************************************************************#\n#* Author: Patrick Miller February 9 2002 *#\n#**************************************************************************#\n\"\"\"\naccelerate_tools contains the interface for on-the-fly building of\nC++ equivalents to Python functions.\n\"\"\"\n#**************************************************************************#\nfrom __future__ import absolute_import, print_function\n\nfrom types import InstanceType, XRangeType\nimport inspect\nimport scipy.weave.md5_load as md5\nimport scipy.weave as weave\nfrom numpy.testing import assert_\n\nfrom .bytecodecompiler import CXXCoder,Type_Descriptor,Function_Descriptor\n\n\ndef CStr(s):\n \"Hacky way to get legal C string from Python string\"\n if s is None:\n return '\"\"'\n assert_(isinstance(s, str), msg=\"only None and string allowed\")\n r = repr('\"'+s) # Better for embedded quotes\n return '\"'+r[2:-1]+'\"'\n\n\n##################################################################\n# CLASS INSTANCE #\n##################################################################\nclass Instance(Type_Descriptor):\n cxxtype = 'PyObject*'\n\n def __init__(self,prototype):\n self.prototype = prototype\n\n def check(self,s):\n return \"PyInstance_Check(%s)\" % s\n\n def inbound(self,s):\n return s\n\n def outbound(self,s):\n return s,0\n\n def get_attribute(self,name):\n proto = getattr(self.prototype,name)\n T = lookup_type(proto)\n code = 'tempPY = PyObject_GetAttrString(%%(rhs)s,\"%s\");\\n' % name\n convert = T.inbound('tempPY')\n code += '%%(lhsType)s %%(lhs)s = %s;\\n' % convert\n return T,code\n\n def set_attribute(self,name):\n proto = getattr(self.prototype,name)\n T = lookup_type(proto)\n convert,owned = T.outbound('%(rhs)s')\n code = 'tempPY = %s;' % convert\n if not owned:\n code += ' Py_INCREF(tempPY);'\n code += ' PyObject_SetAttrString(%%(lhs)s,\"%s\",tempPY);' % name\n code += ' Py_DECREF(tempPY);\\n'\n return T,code\n\n##################################################################\n# CLASS BASIC #\n##################################################################\n\n\nclass Basic(Type_Descriptor):\n owned = 1\n\n def check(self,s):\n return \"%s(%s)\" % (self.checker,s)\n\n def inbound(self,s):\n return \"%s(%s)\" % (self.inbounder,s)\n\n def outbound(self,s):\n return \"%s(%s)\" % (self.outbounder,s),self.owned\n\n\nclass Basic_Number(Basic):\n def literalizer(self,s):\n return str(s)\n\n def binop(self,symbol,a,b):\n assert_(symbol in ['+','-','*','/'], msg=symbol)\n return '%s %s %s' % (a,symbol,b),self\n\n\nclass Integer(Basic_Number):\n cxxtype = \"long\"\n checker = \"PyInt_Check\"\n inbounder = \"PyInt_AsLong\"\n outbounder = \"PyInt_FromLong\"\n\n\nclass Double(Basic_Number):\n cxxtype = \"double\"\n checker = \"PyFloat_Check\"\n inbounder = \"PyFloat_AsDouble\"\n outbounder = \"PyFloat_FromDouble\"\n\n\nclass String(Basic):\n cxxtype = \"char*\"\n checker = \"PyString_Check\"\n inbounder = \"PyString_AsString\"\n outbounder = \"PyString_FromString\"\n\n def literalizer(self,s):\n return CStr(s)\n\n# -----------------------------------------------\n# Singletonize the type names\n# -----------------------------------------------\nInteger = Integer()\nDouble = Double()\nString = String()\n\nimport numpy as np\n\n\nclass Vector(Type_Descriptor):\n cxxtype = 'PyArrayObject*'\n refcount = 1\n dims = 1\n module_init_code = 'import_array();\\n'\n inbounder = \"(PyArrayObject*)\"\n outbounder = \"(PyObject*)\"\n owned = 0 # Convertion is by casting!\n\n prerequisites = Type_Descriptor.prerequisites + \\\n ['#include \"numpy/arrayobject.h\"']\n dims = 1\n\n def check(self,s):\n return \"PyArray_Check(%s) && ((PyArrayObject*)%s)->nd == %d && ((PyArrayObject*)%s)->descr->type_num == %s\" % (\n s,s,self.dims,s,self.typecode)\n\n def inbound(self,s):\n return \"%s(%s)\" % (self.inbounder,s)\n\n def outbound(self,s):\n return \"%s(%s)\" % (self.outbounder,s),self.owned\n\n def getitem(self,A,v,t):\n assert_(self.dims == len(v), msg='Expect dimension %d' % self.dims)\n code = '*((%s*)(%s->data' % (self.cxxbase,A)\n for i in range(self.dims):\n # assert that ''t[i]'' is an integer\n code += '+%s*%s->strides[%d]' % (v[i],A,i)\n code += '))'\n return code,self.pybase\n\n def setitem(self,A,v,t):\n return self.getitem(A,v,t)\n\n\nclass matrix(Vector):\n dims = 2\n\n\nclass IntegerVector(Vector):\n typecode = 'PyArray_INT'\n cxxbase = 'int'\n pybase = Integer\n\n\nclass Integermatrix(matrix):\n typecode = 'PyArray_INT'\n cxxbase = 'int'\n pybase = Integer\n\n\nclass LongVector(Vector):\n typecode = 'PyArray_LONG'\n cxxbase = 'long'\n pybase = Integer\n\n\nclass Longmatrix(matrix):\n typecode = 'PyArray_LONG'\n cxxbase = 'long'\n pybase = Integer\n\n\nclass DoubleVector(Vector):\n typecode = 'PyArray_DOUBLE'\n cxxbase = 'double'\n pybase = Double\n\n\nclass Doublematrix(matrix):\n typecode = 'PyArray_DOUBLE'\n cxxbase = 'double'\n pybase = Double\n\n\n##################################################################\n# CLASS XRANGE #\n##################################################################\nclass XRange(Type_Descriptor):\n cxxtype = 'XRange'\n prerequisites = ['''\n class XRange {\n public:\n XRange(long aLow, long aHigh, long aStep=1)\n : low(aLow),high(aHigh),step(aStep)\n {\n }\n XRange(long aHigh)\n : low(0),high(aHigh),step(1)\n {\n }\n long low;\n long high;\n long step;\n };''']\n\n# -----------------------------------------------\n# Singletonize the type names\n# -----------------------------------------------\nIntegerVector = IntegerVector()\nIntegermatrix = Integermatrix()\nLongVector = LongVector()\nLongmatrix = Longmatrix()\nDoubleVector = DoubleVector()\nDoublematrix = Doublematrix()\nXRange = XRange()\n\n\ntypedefs = {\n int: Integer,\n float: Double,\n str: String,\n (np.ndarray,1,int): IntegerVector,\n (np.ndarray,2,int): Integermatrix,\n (np.ndarray,1,np.long): LongVector,\n (np.ndarray,2,np.long): Longmatrix,\n (np.ndarray,1,float): DoubleVector,\n (np.ndarray,2,float): Doublematrix,\n XRangeType: XRange,\n }\n\nimport math\nfunctiondefs = {\n (len,(String,)):\n Function_Descriptor(code='strlen(%s)',return_type=Integer),\n\n (len,(LongVector,)):\n Function_Descriptor(code='PyArray_Size((PyObject*)%s)',return_type=Integer),\n\n (float,(Integer,)):\n Function_Descriptor(code='(double)(%s)',return_type=Double),\n\n (range,(Integer,Integer)):\n Function_Descriptor(code='XRange(%s)',return_type=XRange),\n\n (range,(Integer)):\n Function_Descriptor(code='XRange(%s)',return_type=XRange),\n\n (math.sin,(Double,)):\n Function_Descriptor(code='sin(%s)',return_type=Double),\n\n (math.cos,(Double,)):\n Function_Descriptor(code='cos(%s)',return_type=Double),\n\n (math.sqrt,(Double,)):\n Function_Descriptor(code='sqrt(%s)',return_type=Double),\n }\n\n\n##################################################################\n# FUNCTION LOOKUP_TYPE #\n##################################################################\ndef lookup_type(x):\n T = type(x)\n try:\n return typedefs[T]\n except:\n if isinstance(T,np.ndarray):\n return typedefs[(T,len(x.shape),x.dtype.char)]\n elif issubclass(T, InstanceType):\n return Instance(x)\n else:\n raise NotImplementedError(T)\n\n##################################################################\n# class ACCELERATE #\n##################################################################\n\n\nclass accelerate(object):\n\n def __init__(self, function, *args, **kw):\n assert_(inspect.isfunction(function))\n self.function = function\n self.module = inspect.getmodule(function)\n if self.module is None:\n import __main__\n self.module = __main__\n self.__call_map = {}\n\n def __cache(self,*args):\n raise TypeError\n\n def __call__(self,*args):\n try:\n return self.__cache(*args)\n except TypeError:\n # Figure out type info -- Do as tuple so its hashable\n signature = tuple(map(lookup_type,args))\n\n # If we know the function, call it\n try:\n fast = self.__call_map[signature]\n except:\n fast = self.singleton(signature)\n self.__cache = fast\n self.__call_map[signature] = fast\n return fast(*args)\n\n def signature(self,*args):\n # Figure out type info -- Do as tuple so its hashable\n signature = tuple(map(lookup_type,args))\n return self.singleton(signature)\n\n def singleton(self,signature):\n identifier = self.identifier(signature)\n\n # Generate a new function, then call it\n f = self.function\n\n # See if we have an accelerated version of module\n try:\n print('lookup',self.module.__name__+'_weave')\n accelerated_module = __import__(self.module.__name__+'_weave')\n print('have accelerated',self.module.__name__+'_weave')\n fast = getattr(accelerated_module,identifier)\n return fast\n except ImportError:\n accelerated_module = None\n except AttributeError:\n pass\n\n P = self.accelerate(signature,identifier)\n\n E = weave.ext_tools.ext_module(self.module.__name__+'_weave')\n E.add_function(P)\n E.generate_file()\n weave.build_tools.build_extension(self.module.__name__+'_weave.cpp',verbose=2)\n\n if accelerated_module:\n raise NotImplementedError('Reload')\n else:\n accelerated_module = __import__(self.module.__name__+'_weave')\n\n fast = getattr(accelerated_module,identifier)\n return fast\n\n def identifier(self,signature):\n # Build an MD5 checksum\n f = self.function\n co = f.func_code\n identifier = str(signature) + \\\n str(co.co_argcount) + \\\n str(co.co_consts) + \\\n str(co.co_varnames) + \\\n co.co_code\n return 'F'+md5.md5(identifier).hexdigest()\n\n def accelerate(self,signature,identifier):\n P = Python2CXX(self.function,signature,name=identifier)\n return P\n\n def code(self,*args):\n if len(args) != self.function.func_code.co_argcount:\n raise TypeError('%s() takes exactly %d arguments (%d given)' %\n (self.function.__name__,\n self.function.func_code.co_argcount,\n len(args)))\n signature = tuple(map(lookup_type,args))\n ident = self.function.__name__\n return self.accelerate(signature,ident).function_code()\n\n\n##################################################################\n# CLASS PYTHON2CXX #\n##################################################################\nclass Python2CXX(CXXCoder):\n def typedef_by_value(self,v):\n T = lookup_type(v)\n if T not in self.used:\n self.used.append(T)\n return T\n\n def function_by_signature(self,signature):\n descriptor = functiondefs[signature]\n if descriptor.return_type not in self.used:\n self.used.append(descriptor.return_type)\n return descriptor\n\n def __init__(self,f,signature,name=None):\n # Make sure function is a function\n assert_(inspect.isfunction(f))\n # and check the input type signature\n assert_(reduce(lambda x,y: x and y,\n map(lambda x: isinstance(x,Type_Descriptor),\n signature),\n 1), msg='%s not all type objects' % signature)\n self.arg_specs = []\n self.customize = weave.base_info.custom_info()\n\n CXXCoder.__init__(self,f,signature,name)\n\n return\n\n def function_code(self):\n code = self.wrapped_code()\n for T in self.used:\n if T is not None and T.module_init_code:\n self.customize.add_module_init_code(T.module_init_code)\n return code\n\n def python_function_definition_code(self):\n return '{ \"%s\", wrapper_%s, METH_VARARGS, %s },\\n' % (\n self.name,\n self.name,\n CStr(self.function.__doc__))\n" ]
[ [ "scipy.weave.build_tools.build_extension", "scipy.weave.base_info.custom_info", "numpy.testing.assert_", "scipy.weave.ext_tools.ext_module", "scipy.weave.md5_load.md5" ] ]
5AGE-zhang/TocoDecoy
[ "6ce34d717d965e7d36cb8d2b1b527d2fded7c2d3" ]
[ "dataset_generation/0_molecular_generation/ddc_pub/ddc_v3_unbiased.py" ]
[ "import os\nos.environ[\n \"TF_CPP_MIN_LOG_LEVEL\"\n] = \"3\" # Suppress UserWarning of TensorFlow while loading the model\n\nimport numpy as np\nfrom datetime import datetime\nfrom functools import wraps\nimport shutil, zipfile, tempfile, pickle\n\nfrom tensorflow.keras.layers import (\n Input,\n Concatenate,\n Dense,\n TimeDistributed,\n BatchNormalization,\n)\nfrom tensorflow.compat.v1.keras.layers import (\n CuDNNLSTM as LSTM,\n) \nfrom tensorflow.keras.models import Model, load_model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau, LearningRateScheduler\nfrom tensorflow.keras.utils import multi_gpu_model, plot_model\n\n# Custom dependencies\nfrom molvecgen import SmilesVectorizer\n\nfrom ddc_pub.generators import SmilesGenerator2\nfrom ddc_pub.custom_callbacks import ModelAndHistoryCheckpoint, LearningRateSchedule\n\n\ndef timed(func):\n \"\"\"\n Timer decorator to benchmark functions.\n \"\"\"\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n tstart = datetime.now()\n result = func(*args, **kwargs)\n elapsed = (datetime.now() - tstart).microseconds / 1e6\n print(\"Elapsed time: %.3f seconds.\" % elapsed)\n return result\n\n return wrapper\n\n\nclass DDC:\n def __init__(self, **kwargs):\n \"\"\"\n # Arguments\n kwargs:\n x : model input - np.ndarray of np.bytes_ or np.float64\n y : model output - np.ndarray of np.bytes_\n model_name : model filename to load - string\n dataset_info : dataset information including name, maxlen and charset - hdf5\n noise_std : standard deviation of the noise layer in the latent space - float\n lstm_dim : size of LSTM RNN layers - int\n dec_layers : number of decoder layers - int\n td_dense_dim : size of TD Dense layers inbetween the LSTM ones\n to suppress network size - int\n batch_size : the network's batch size - int\n codelayer_dim: dimensionality of the latent space or number of descriptors - int\n \n \n # Examples of __init__ usage\n To *train* a blank model with encoder (autoencoder):\n model = ddc.DDC(x = mols,\n y = mols,\n dataset_info = info,\n noise_std = 0.1,\n lstm_dim = 256,\n dec_layers = 3,\n td_dense_dim = 0,\n batch_size = 128,\n codelayer_dim = 128)\n \n To *train* a blank model without encoder:\n model = ddc.DDC(x = descriptors,\n y = mols,\n dataset_info = info,\n noise_std = 0.1,\n lstm_dim = 256,\n dec_layers = 3,\n td_dense_dim = 0,\n batch_size = 128)\n \n To *re-train* a saved model with encoder (autoencoder):\n model = ddc.DDC(x = mols,\n y = mols,\n model_name = saved_model_name)\n \n To *re-train* a saved model without encoder:\n model = ddc.DDC(x = descriptors,\n y = mols,\n model_name = saved_model_name)\n \n To *test* a saved model:\n model = ddc.DDC(model_name = saved_model_name)\n\n \"\"\"\n\n # Identify the mode to start the model in\n if \"x\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Check input type.\")\n self.__input_type = \"other\" # other molecular descriptors\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n\n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build data generators\n self.__build_generators(x)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n\n if self.mode == \"retrain\":\n # Build data generators\n self.__build_generators(x)\n\n \n\n # Show the resulting full model\n print(self.model.summary())\n\n \"\"\"\n Architecture properties.\n \"\"\"\n\n @property\n def lstm_dim(self):\n return self.__lstm_dim\n\n @property\n def h_activation(self):\n return self.__h_activation\n\n @property\n def bn(self):\n return self.__bn\n\n @property\n def bn_momentum(self):\n return self.__bn_momentum\n\n @property\n def noise_std(self):\n return self.__noise_std\n\n @property\n def td_dense_dim(self):\n return self.__td_dense_dim\n\n @property\n def batch_size(self):\n return self.__batch_size\n\n @property\n def dec_layers(self):\n return self.__dec_layers\n\n @property\n def codelayer_dim(self):\n return self.__codelayer_dim\n\n @property\n def steps_per_epoch(self):\n return self.__steps_per_epoch\n\n @property\n def validation_steps(self):\n return self.__validation_steps\n\n @property\n def input_shape(self):\n return self.__input_shape\n\n @property\n def dec_dims(self):\n return self.__dec_dims\n\n @property\n def dec_input_shape(self):\n return self.__dec_input_shape\n\n @property\n def output_len(self):\n return self.__output_len\n\n @property\n def output_dims(self):\n return self.__output_dims\n\n @property\n def batch_input_length(self):\n return self.__batch_input_length\n\n #@batch_input_length.setter\n #def batch_input_length(self, value):\n # self.__batch_input_length = value\n # self.__build_sample_model(batch_input_length=value)\n\n \"\"\"\n Models.\n \"\"\"\n\n @property\n def sample_model(self):\n return self.__sample_model\n\n @property\n def multi_sample_model(self):\n return self.__multi_sample_model\n\n @property\n def model(self):\n return self.__model\n\n \"\"\"\n Train properties.\n \"\"\"\n\n @property\n def epochs(self):\n return self.__epochs\n\n @property\n def clipvalue(self):\n return self.__clipvalue\n\n @property\n def lr(self):\n return self.__lr\n\n @property\n def h(self):\n return self.__h\n\n \"\"\"\n Other properties.\n \"\"\"\n\n @property\n def mode(self):\n return self.__mode\n\n @property\n def dataset_name(self):\n return self.__dataset_name\n\n @property\n def model_name(self):\n return self.__model_name\n\n @property\n def input_type(self):\n return self.__input_type\n\n @property\n def maxlen(self):\n return self.__maxlen\n\n @property\n def charset(self):\n return self.__charset\n\n @property\n def smilesvec1(self):\n return self.__smilesvec1\n\n @property\n def smilesvec2(self):\n return self.__smilesvec2\n\n @property\n def train_gen(self):\n return self.__train_gen\n\n @property\n def valid_gen(self):\n return self.__valid_gen\n\n \"\"\"\n Private methods.\n \"\"\"\n\n def __build_generators(self, x, split=0.81050343):\n \"\"\"\n Build data generators to be used in (re)training.\n \"\"\"\n\n # Split dataset into train and validation sets\n cut = int(split * len(x))\n x_train = x[:cut]\n x_valid = x[cut:]\n\n self.__train_gen = SmilesGenerator2(\n x_train,\n None,\n self.smilesvec1,\n self.smilesvec2,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n self.__valid_gen = SmilesGenerator2(\n x_valid,\n None,\n self.smilesvec1,\n self.smilesvec2,\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n # Calculate number of batches per training/validation epoch\n train_samples = len(x_train)\n valid_samples = len(x_valid)\n self.__steps_per_epoch = train_samples // self.batch_size\n self.__validation_steps = valid_samples // self.batch_size\n\n print(\n \"Model received %d train samples and %d validation samples.\"\n % (train_samples, valid_samples)\n )\n\n def __build_model(self):\n \"\"\"\n RNN that generates random SMILES strings.\n \"\"\"\n\n # This is the start character padded OHE smiles for teacher forcing\n decoder_inputs = Input(shape=self.dec_input_shape, name=\"Decoder_Inputs\")\n\n # I/O tensor of the LSTM layers\n x = decoder_inputs\n\n for dec_layer in range(self.dec_layers):\n # RNN layer\n decoder_lstm = LSTM(\n self.lstm_dim,\n return_sequences=True,\n name=\"Decoder_LSTM_\" + str(dec_layer),\n )\n\n x = decoder_lstm(x)\n\n if self.bn:\n x = BatchNormalization(\n momentum=self.bn_momentum, name=\"BN_Decoder_\" + str(dec_layer)\n )(x)\n\n # Squeeze LSTM interconnections using Dense layers\n if self.td_dense_dim > 0:\n x = TimeDistributed(\n Dense(self.td_dense_dim), name=\"Time_Distributed_\" + str(dec_layer)\n )(x)\n\n # Final Dense layer to return soft labels (probabilities)\n outputs = Dense(self.output_dims, activation=\"softmax\", name=\"Dense_Decoder\")(x)\n\n # Define the batch_model\n self.__model = Model(inputs=[decoder_inputs], outputs=[outputs])\n\n # Name it!\n self.__model._name = \"model\"\n\n def __build_sample_model(self, batch_input_length) -> dict:\n \"\"\"\n Model that predicts a single OHE character.\n This model is generated from the modified config file of the self.batch_model.\n\n Returns:\n The dictionary of the configuration.\n \"\"\"\n\n self.__batch_input_length = batch_input_length\n\n # Get the configuration of the batch_model\n config = self.model.get_config()\n\n # Keep only the \"Decoder_Inputs\" as single input to the sample_model\n config[\"input_layers\"] = [config[\"input_layers\"][0]]\n\n # Find decoder states that are used as inputs in batch_model and remove them\n idx_list = []\n for idx, layer in enumerate(config[\"layers\"]):\n\n if \"Decoder_State_\" in layer[\"name\"]:\n idx_list.append(idx)\n\n # Pop the layer from the layer list\n # Revert indices to avoid re-arranging after deleting elements\n for idx in sorted(idx_list, reverse=True):\n config[\"layers\"].pop(idx)\n\n # Remove inbound_nodes dependencies of remaining layers on deleted ones\n for layer in config[\"layers\"]:\n idx_list = []\n\n try:\n for idx, inbound_node in enumerate(layer[\"inbound_nodes\"][0]):\n if \"Decoder_State_\" in inbound_node[0]:\n idx_list.append(idx)\n # Catch the exception for first layer (Decoder_Inputs) that has empty list of inbound_nodes[0]\n except:\n pass\n\n # Pop the inbound_nodes from the list\n # Revert indices to avoid re-arranging\n for idx in sorted(idx_list, reverse=True):\n layer[\"inbound_nodes\"][0].pop(idx)\n\n # Change the batch_shape of input layer\n config[\"layers\"][0][\"config\"][\"batch_input_shape\"] = (\n batch_input_length,\n 1,\n self.dec_input_shape[-1],\n )\n\n # Finally, change the statefulness of the RNN layers\n for layer in config[\"layers\"]:\n if \"Decoder_LSTM_\" in layer[\"name\"]:\n layer[\"config\"][\"stateful\"] = True\n # layer[\"config\"][\"return_sequences\"] = True\n\n # Define the sample_model using the modified config file\n sample_model = Model.from_config(config)\n\n # Copy the trained weights from the trained batch_model to the untrained sample_model\n for layer in sample_model.layers:\n # Get weights from the batch_model\n weights = self.model.get_layer(layer.name).get_weights()\n # Set the weights to the sample_model\n sample_model.get_layer(layer.name).set_weights(weights)\n\n if batch_input_length == 1:\n self.__sample_model = sample_model\n\n elif batch_input_length > 1:\n self.__multi_sample_model = sample_model\n\n return config\n\n def __load(self, model_name):\n \"\"\"\n Load complete model from a zip file.\n To be called within __init__.\n \"\"\"\n\n print(\"Loading model.\")\n tstart = datetime.now()\n\n # Temporary directory to extract the zipped information\n with tempfile.TemporaryDirectory() as dirpath:\n\n # Unzip the directory that contains the saved model(s)\n with zipfile.ZipFile(model_name + \".zip\", \"r\") as zip_ref:\n zip_ref.extractall(dirpath)\n\n # Load metadata\n metadata = pickle.load(open(dirpath + \"/metadata.pickle\", \"rb\"))\n\n # Re-load metadata\n self.__dict__.update(metadata)\n\n # Load the model\n self.__model = load_model(dirpath + \"/model.h5\")\n\n # Build sample_model out of the trained batch_model\n self.__build_sample_model(batch_input_length=1) # Single-output model\n self.__build_sample_model(\n batch_input_length=256\n ) # Multi-output model\n\n print(\"Loading finished in %i seconds.\" % ((datetime.now() - tstart).seconds))\n\n \"\"\"\n Public methods.\n \"\"\"\n\n def fit(\n self,\n model_name,\n epochs,\n lr,\n mini_epochs,\n patience,\n gpus=1,\n workers=1,\n use_multiprocessing=False,\n verbose=2,\n max_queue_size=10,\n clipvalue=0,\n save_period=5,\n checkpoint_dir=\"/\",\n lr_decay=False,\n lr_warmup=False,\n sch_epoch_to_start=500,\n sch_last_epoch=999,\n sch_lr_init=1e-3,\n sch_lr_final=1e-6,\n ):\n \"\"\"\n Fit the full model to the training data.\n Supports multi-gpu training if gpus set to >1.\n \n # Arguments\n kwargs:\n model_name : base name for the checkpoints - string\n epochs : number of epochs to train in total - int\n lr : initial learning rate of the training - float\n mini_epochs : number of dividends of an epoch (==1 means no mini_epochs) - int\n patience : minimum consecutive mini_epochs of stagnated learning rate to consider \n before lowering it - int\n gpus : number of gpus to use for multi-gpu training (==1 means single gpu) - int\n workers : number of CPU workers - int\n use_multiprocessing: flag for Keras multiprocessing - boolean\n verbose : verbosity of the training - int\n max_queue_size : max size of the generator queue - int\n clipvalue : value of gradient clipping - float\n save_period : mini_epochs every which to checkpoint the model - int\n checkpoint_dir : directory to store the checkpoints - string\n lr_decay : flag to use exponential decay of learning rate - boolean\n lr_warmup : flag to use warmup for transfer learning - boolean\n \"\"\"\n\n # Get parameter values if specified\n self.__epochs = epochs\n self.__lr = lr\n self.__clipvalue = clipvalue\n\n # Optimizer\n if clipvalue > 0:\n print(\"Using gradient clipping %.2f.\" % clipvalue)\n opt = Adam(lr=self.lr, clipvalue=self.clipvalue)\n\n else:\n opt = Adam(lr=self.lr)\n\n checkpoint_file = (\n checkpoint_dir + \"%s--{epoch:02d}--{val_loss:.4f}--{lr:.7f}\" % model_name\n )\n\n # If model is untrained, history is blank\n try:\n history = self.h\n\n # Else, append the history\n except:\n history = {}\n\n mhcp = ModelAndHistoryCheckpoint(\n filepath=checkpoint_file,\n model_dict=self.__dict__,\n monitor=\"val_loss\",\n verbose=1,\n mode=\"min\",\n period=save_period,\n history=history\n )\n\n # Training history\n self.__h = mhcp.history\n\n if lr_decay:\n lr_schedule = LearningRateSchedule(\n epoch_to_start=sch_epoch_to_start,\n last_epoch=sch_last_epoch,\n lr_init=sch_lr_init,\n lr_final=sch_lr_final,\n )\n\n lr_scheduler = LearningRateScheduler(\n schedule=lr_schedule.exp_decay, verbose=1\n )\n\n callbacks = [lr_scheduler, mhcp]\n\n elif lr_warmup:\n lr_schedule = LearningRateSchedule(\n epoch_to_start=sch_epoch_to_start,\n last_epoch=sch_last_epoch,\n lr_init=sch_lr_init,\n lr_final=sch_lr_final,\n )\n\n lr_scheduler = LearningRateScheduler(\n schedule=lr_schedule.warmup, verbose=1\n )\n\n callbacks = [lr_scheduler, mhcp]\n\n else:\n rlr = ReduceLROnPlateau(\n monitor=\"val_loss\",\n factor=0.5,\n patience=patience,\n min_lr=1e-6,\n verbose=1,\n min_delta=1e-4,\n )\n\n callbacks = [rlr, mhcp]\n\n # Inspect training parameters at the start of the training\n self.summary()\n\n # Parallel training on multiple GPUs\n if gpus > 1:\n parallel_model = multi_gpu_model(self.model, gpus=gpus)\n parallel_model.compile(loss=\"categorical_crossentropy\", optimizer=opt)\n # This `fit` call will be distributed on all GPUs.\n # Each GPU will process (batch_size/gpus) samples per batch.\n parallel_model.fit_generator(\n self.train_gen,\n steps_per_epoch=self.steps_per_epoch / mini_epochs,\n epochs=mini_epochs * self.epochs,\n validation_data=self.valid_gen,\n validation_steps=self.validation_steps / mini_epochs,\n callbacks=callbacks,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n verbose=verbose,\n ) # 1 to show progress bar\n\n elif gpus == 1:\n self.model.compile(loss=\"categorical_crossentropy\", optimizer=opt)\n self.model.fit_generator(\n self.train_gen,\n steps_per_epoch=self.steps_per_epoch / mini_epochs,\n epochs=mini_epochs * self.epochs,\n validation_data=self.valid_gen,\n validation_steps=self.validation_steps / mini_epochs,\n callbacks=callbacks,\n max_queue_size=10,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n verbose=verbose,\n ) # 1 to show progress bar\n\n # Build sample_model out of the trained batch_model\n self.__build_sample_model(batch_input_length=1) # Single-output model\n self.__build_sample_model(\n batch_input_length=self.batch_size\n ) # Multi-output model\n\n \n # @timed\n def predict(self, temp=1, rng_seed=None):\n \"\"\"\n Generate a single SMILES string.\n \n The states of the RNN are set based on the latent input.\n \n Careful, \"latent\" must be: the output of self.transform()\n or\n an array of molecular descriptors.\n \n If temp>0, multinomial sampling is used instead of selecting \n the single most probable character at each step.\n \n If temp==1, multinomial sampling without temperature scaling is used.\n \n Returns:\n A single SMILES string and its NLL.\n \"\"\"\n \n # Pass rng_seed for repeatable sampling \n if rng_seed is not None:\n np.random.seed(rng_seed)\n # Reset the states between predictions because RNN is stateful!\n self.sample_model.reset_states()\n\n # Prepare the input char\n startidx = self.smilesvec1._char_to_int[self.smilesvec1.startchar]\n samplevec = np.zeros((1, 1, self.smilesvec1.dims[-1]))\n samplevec[0, 0, startidx] = 1\n smiles = \"\"\n # Initialize Negative Log-Likelihood (NLL)\n NLL = 0\n # Loop and predict next char\n for i in range(1000):\n o = self.sample_model.predict(samplevec)\n # Multinomial sampling with temperature scaling\n if temp:\n temp = abs(temp) # Handle negative values\n nextCharProbs = np.log(o) / temp\n nextCharProbs = np.exp(nextCharProbs)\n nextCharProbs = (\n nextCharProbs / nextCharProbs.sum() - 1e-8\n ) # Re-normalize for float64 to make exactly 1.0 for np.random.multinomial\n sampleidx = np.random.multinomial(\n 1, nextCharProbs.squeeze(), 1\n ).argmax()\n\n # Else, select the most probable character\n else:\n sampleidx = np.argmax(o)\n\n samplechar = self.smilesvec1._int_to_char[sampleidx]\n if samplechar != self.smilesvec1.endchar:\n # Append the new character\n smiles += samplechar\n samplevec = np.zeros((1, 1, self.smilesvec1.dims[-1]))\n samplevec[0, 0, sampleidx] = 1\n # Calculate negative log likelihood for the selected character given the sequence so far\n NLL -= np.log(o[0][0][sampleidx])\n else:\n return smiles, NLL\n\n # @timed\n def predict_batch(self, temp=1, rng_seed=None):\n \"\"\"\n Generate multiple random SMILES strings.\n \n If temp>0, multinomial sampling is used instead of selecting \n the single most probable character at each step.\n \n If temp==1, multinomial sampling without temperature scaling is used.\n \n Low temp leads to elimination of characters with low conditional probabilities.\n \"\"\"\n \n # Pass rng_seed for repeatable sampling \n if rng_seed is not None:\n np.random.seed(rng_seed) \n # Reset the states between predictions because RNN is stateful!\n self.multi_sample_model.reset_states()\n\n # Index of input char \"^\"\n startidx = self.smilesvec1._char_to_int[self.smilesvec1.startchar]\n # Vectorize the input char for all SMILES\n samplevec = np.zeros((self.batch_input_length, 1, self.smilesvec1.dims[-1]))\n samplevec[:, 0, startidx] = 1\n # Initialize arrays to store SMILES, their NLLs and their status\n smiles = np.array([\"\"] * self.batch_input_length, dtype=object)\n NLL = np.zeros((self.batch_input_length,))\n finished = np.array([False] * self.batch_input_length)\n\n # Loop and predict next char\n for i in range(1000):\n o = self.multi_sample_model.predict(\n samplevec, batch_size=self.batch_input_length\n ).squeeze()\n\n # Multinomial sampling with temperature scaling\n if temp:\n temp = abs(temp) # No negative values\n nextCharProbs = np.log(o) / temp\n nextCharProbs = np.exp(nextCharProbs) # .squeeze()\n\n # Normalize probabilities\n nextCharProbs = (nextCharProbs.T / nextCharProbs.sum(axis=1) - 1e-8).T\n sampleidc = np.asarray(\n [\n np.random.multinomial(1, nextCharProb, 1).argmax()\n for nextCharProb in nextCharProbs\n ]\n )\n\n else:\n sampleidc = np.argmax(o, axis=1)\n\n samplechars = [self.smilesvec1._int_to_char[idx] for idx in sampleidc]\n\n for idx, samplechar in enumerate(samplechars):\n if not finished[idx]:\n if samplechar != self.smilesvec1.endchar:\n # Append the SMILES with the next character\n smiles[idx] += self.smilesvec1._int_to_char[sampleidc[idx]]\n samplevec = np.zeros(\n (self.batch_input_length, 1, self.smilesvec1.dims[-1])\n )\n # One-Hot Encode the character\n # samplevec[:,0,sampleidc] = 1\n for count, sampleidx in enumerate(sampleidc):\n samplevec[count, 0, sampleidx] = 1\n # Calculate negative log likelihood for the selected character given the sequence so far\n NLL[idx] -= np.log(o[idx][sampleidc[idx]])\n else:\n finished[idx] = True\n # print(\"SMILES has finished at %i\" %i)\n\n # If all SMILES are finished, i.e. the endchar \"$\" has been generated, stop the generation\n if finished.sum() == len(finished):\n return smiles, NLL\n\n @timed\n def get_smiles_nll(self, smiles_ref) -> float:\n \"\"\"\n Calculate the NLL of a given SMILES string if its descriptors are used as RNN states.\n \n Returns:\n The NLL of sampling a given SMILES string.\n \"\"\"\n\n # Reset the states between predictions because RNN is stateful!\n self.sample_model.reset_states()\n\n # Prepare the input char\n startidx = self.smilesvec1._char_to_int[self.smilesvec1.startchar]\n samplevec = np.zeros((1, 1, self.smilesvec1.dims[-1]))\n samplevec[0, 0, startidx] = 1\n\n # Initialize Negative Log-Likelihood (NLL)\n NLL = 0\n # Loop and predict next char\n for i in range(1000):\n o = self.sample_model.predict(samplevec)\n\n samplechar = smiles_ref[i]\n sampleidx = self.smilesvec1._char_to_int[samplechar]\n\n if i != len(smiles_ref) - 1:\n samplevec = np.zeros((1, 1, self.smilesvec1.dims[-1]))\n samplevec[0, 0, sampleidx] = 1\n # Calculate negative log likelihood for the selected character given the sequence so far\n NLL -= np.log(o[0][0][sampleidx])\n else:\n return NLL\n\n @timed\n def get_smiles_nll_batch(self, smiles_ref) -> list:\n \"\"\"\n Calculate the individual NLL for a batch of known SMILES strings.\n Batch size is equal to self.batch_input_length so reset it if needed.\n \n Returns:\n NLL of sampling all listed SMILES.\n \"\"\"\n\n # Reset the states between predictions because RNN is stateful!\n self.multi_sample_model.reset_states()\n\n # Index of input char \"^\"\n startidx = self.smilesvec1._char_to_int[self.smilesvec1.startchar]\n # Vectorize the input char for all SMILES\n samplevec = np.zeros((self.batch_input_length, 1, self.smilesvec1.dims[-1]))\n samplevec[:, 0, startidx] = 1\n # Initialize arrays to store NLLs and flag if a SMILES is finished\n NLL = np.zeros((self.batch_input_length,))\n finished = np.array([False] * self.batch_input_length)\n\n # Loop and predict next char\n for i in range(1000):\n o = self.multi_sample_model.predict(\n samplevec, batch_size=self.batch_input_length\n ).squeeze()\n samplechars = []\n\n for smiles in smiles_ref:\n try:\n samplechars.append(smiles[i])\n except:\n # This is a finished SMILES, so \"i\" exceeds dimensions\n samplechars.append(\"$\")\n\n sampleidc = np.asarray(\n [self.smilesvec1._char_to_int[char] for char in samplechars]\n )\n\n for idx, samplechar in enumerate(samplechars):\n if not finished[idx]:\n if i != len(smiles_ref[idx]) - 1:\n samplevec = np.zeros(\n (self.batch_input_length, 1, self.smilesvec1.dims[-1])\n )\n # One-Hot Encode the character\n for count, sampleidx in enumerate(sampleidc):\n samplevec[count, 0, sampleidx] = 1\n # Calculate negative log likelihood for the selected character given the sequence so far\n NLL[idx] -= np.log(o[idx][sampleidc[idx]])\n else:\n finished[idx] = True\n\n # If all SMILES are finished, i.e. the endchar \"$\" has been generated, stop the generation\n if finished.sum() == len(finished):\n return NLL\n\n def summary(self):\n \"\"\"\n Echo the training configuration for inspection.\n \"\"\"\n\n print(\n \"\\nModel trained with dataset %s that has maxlen=%d and charset=%s for %d epochs.\"\n % (self.dataset_name, self.maxlen, self.charset, self.epochs)\n )\n\n print(\n \"noise_std: %.6f, lstm_dim: %d, dec_layers: %d, td_dense_dim: %d, batch_size: %d, codelayer_dim: %d, lr: %.6f.\"\n % (\n self.noise_std,\n self.lstm_dim,\n self.dec_layers,\n self.td_dense_dim,\n self.batch_size,\n self.codelayer_dim,\n self.lr,\n )\n )\n\n def get_graphs(self):\n \"\"\"\n Export the graphs of the model and its submodels to png files.\n Requires \"pydot\" and \"graphviz\" to be installed (pip install graphviz && pip install pydot).\n \"\"\"\n\n try:\n from keras.utils import plot_model\n from keras.utils.vis_utils import model_to_dot\n\n # from IPython.display import SVG\n\n plot_model(self.model, to_file=\"model.png\")\n\n print(\"Model exported to png.\")\n\n except:\n print(\"Check pydot and graphviz installation.\")\n\n @timed\n def save(self, model_name):\n \"\"\"\n Save model in a zip file.\n \"\"\"\n\n with tempfile.TemporaryDirectory() as dirpath:\n\n # Save the Keras model\n self.model.save(dirpath + \"/model.h5\")\n\n # Exclude unpicklable and unwanted attributes\n excl_attr = [\n \"_DDC__mode\", # excluded because it is always identified within self.__init__()\n \"_DDC__train_gen\", # unpicklable\n \"_DDC__valid_gen\", # unpicklable\n \"_DDC__sample_model\", # unpicklable\n \"_DDC__multi_sample_model\", # unpicklable\n \"_DDC__model\",\n ] # unpicklable\n\n # Cannot deepcopy self.__dict__ because of Keras' thread lock so this is\n # bypassed by popping and re-inserting the unpicklable attributes\n to_add = {}\n # Remove unpicklable attributes\n for attr in excl_attr:\n to_add[attr] = self.__dict__.pop(attr, None)\n\n # Pickle metadata, i.e. almost everything but the Keras models and generators\n pickle.dump(self.__dict__, open(dirpath + \"/metadata.pickle\", \"wb\"))\n\n # Zip directory with its contents\n shutil.make_archive(model_name, \"zip\", dirpath)\n\n # Finally, re-load the popped elements for the model to be usable\n for attr in excl_attr:\n self.__dict__[attr] = to_add[attr]\n\n print(\"Model saved.\")\n" ]
[ [ "numpy.array", "numpy.asarray", "numpy.zeros", "tensorflow.keras.layers.Input", "numpy.random.seed", "numpy.log", "numpy.exp", "tensorflow.keras.models.Model.from_config", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.models.load_model", "tensorflow.keras.callbacks.LearningRateScheduler", "tensorflow.keras.utils.multi_gpu_model", "numpy.argmax", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.callbacks.ReduceLROnPlateau", "numpy.random.multinomial" ] ]
mengmengliu1998/qd-3dt
[ "9fcd1c0b165793e259deb46a64fcbbdc33735f2f" ]
[ "scripts/nuscenes/eval/tracking/evaluate.py" ]
[ "# nuScenes dev-kit.\n# Code written by Holger Caesar, Caglayan Dicle and Oscar Beijbom, 2019.\n\nimport argparse\nimport json\nimport os\nimport time\nfrom typing import Tuple, List, Dict, Any\n\nimport numpy as np\n\nfrom nuscenes import NuScenes\nfrom nuscenes.eval.common.config import config_factory\nfrom nuscenes.eval.common.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes\nfrom nuscenes.eval.tracking.data_classes import TrackingMetrics, TrackingMetricDataList, TrackingConfig, TrackingBox, \\\n TrackingMetricData\nfrom nuscenes.eval.tracking.algo import TrackingEvaluation\nfrom nuscenes.eval.tracking.loaders import create_tracks\nfrom nuscenes.eval.tracking.utils import print_final_metrics\nfrom nuscenes.eval.tracking.constants import AVG_METRIC_MAP, MOT_METRIC_MAP, LEGACY_METRICS\nfrom nuscenes.eval.tracking.render import recall_metric_curve, summary_plot\n\n\nclass TrackingEval:\n \"\"\"\n This is the official nuScenes tracking evaluation code.\n Results are written to the provided output_dir.\n\n Here is an overview of the functions in this method:\n - init: Loads GT annotations and predictions stored in JSON format and filters the boxes.\n - run: Performs evaluation and dumps the metric data to disk.\n - render: Renders various plots and dumps to disk.\n\n We assume that:\n - Every sample_token is given in the results, although there may be not predictions for that sample.\n\n Please see https://www.nuscenes.org/tracking for more details.\n \"\"\"\n def __init__(self,\n config: TrackingConfig,\n result_path: str,\n eval_set: str,\n output_dir: str,\n nusc_version: str,\n nusc_dataroot: str,\n verbose: bool = True,\n render_classes: List[str] = None):\n \"\"\"\n Initialize a TrackingEval object.\n :param config: A TrackingConfig object.\n :param result_path: Path of the nuScenes JSON result file.\n :param eval_set: The dataset split to evaluate on, e.g. train, val or test.\n :param output_dir: Folder to save plots and results to.\n :param nusc_version: The version of the NuScenes dataset.\n :param nusc_dataroot: Path of the nuScenes dataset on disk.\n :param verbose: Whether to print to stdout.\n :param render_classes: Classes to render to disk or None.\n \"\"\"\n self.cfg = config\n self.result_path = result_path\n self.eval_set = eval_set\n self.output_dir = output_dir\n self.verbose = verbose\n self.render_classes = render_classes\n\n # Check result file exists.\n assert os.path.exists(result_path), 'Error: The result file does not exist!'\n\n # Make dirs.\n self.plot_dir = os.path.join(self.output_dir, 'plots')\n if not os.path.isdir(self.output_dir):\n os.makedirs(self.output_dir)\n if not os.path.isdir(self.plot_dir):\n os.makedirs(self.plot_dir)\n\n # Initialize NuScenes object.\n # We do not store it in self to let garbage collection take care of it and save memory.\n nusc = NuScenes(version=nusc_version, verbose=verbose, dataroot=nusc_dataroot)\n\n # Load data.\n if verbose:\n print('Initializing nuScenes tracking evaluation')\n pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, TrackingBox,\n verbose=verbose)\n gt_boxes = load_gt(nusc, self.eval_set, TrackingBox, verbose=verbose)\n\n assert set(pred_boxes.sample_tokens) == set(gt_boxes.sample_tokens), \\\n \"Samples in split don't match samples in predicted tracks.\"\n\n # Add center distances.\n pred_boxes = add_center_dist(nusc, pred_boxes)\n gt_boxes = add_center_dist(nusc, gt_boxes)\n\n # Filter boxes (distance, points per box, etc.).\n if verbose:\n print('Filtering tracks')\n pred_boxes = filter_eval_boxes(nusc, pred_boxes, self.cfg.class_range, verbose=verbose)\n if verbose:\n print('Filtering ground truth tracks')\n gt_boxes = filter_eval_boxes(nusc, gt_boxes, self.cfg.class_range, verbose=verbose)\n\n self.sample_tokens = gt_boxes.sample_tokens\n\n # Convert boxes to tracks format.\n self.tracks_gt = create_tracks(gt_boxes, nusc, self.eval_set, gt=True)\n self.tracks_pred = create_tracks(pred_boxes, nusc, self.eval_set, gt=False)\n\n def evaluate(self) -> Tuple[TrackingMetrics, TrackingMetricDataList]:\n \"\"\"\n Performs the actual evaluation.\n :return: A tuple of high-level and the raw metric data.\n \"\"\"\n start_time = time.time()\n metrics = TrackingMetrics(self.cfg)\n\n # -----------------------------------\n # Step 1: Accumulate metric data for all classes and distance thresholds.\n # -----------------------------------\n if self.verbose:\n print('Accumulating metric data...')\n metric_data_list = TrackingMetricDataList()\n\n def accumulate_class(curr_class_name):\n curr_ev = TrackingEvaluation(self.tracks_gt, self.tracks_pred, curr_class_name, self.cfg.dist_fcn_callable,\n self.cfg.dist_th_tp, self.cfg.min_recall,\n num_thresholds=TrackingMetricData.nelem,\n metric_worst=self.cfg.metric_worst,\n verbose=self.verbose,\n output_dir=self.output_dir,\n render_classes=self.render_classes)\n curr_md = curr_ev.accumulate()\n metric_data_list.set(curr_class_name, curr_md)\n\n for class_name in self.cfg.class_names:\n accumulate_class(class_name)\n\n # -----------------------------------\n # Step 2: Aggregate metrics from the metric data.\n # -----------------------------------\n if self.verbose:\n print('Calculating metrics...')\n for class_name in self.cfg.class_names:\n # Find best MOTA to determine threshold to pick for traditional metrics.\n # If multiple thresholds have the same value, pick the one with the highest recall.\n md = metric_data_list[class_name]\n if np.all(np.isnan(md.mota)):\n best_thresh_idx = None\n else:\n best_thresh_idx = np.nanargmax(md.mota)\n\n # Pick best value for traditional metrics.\n if best_thresh_idx is not None:\n for metric_name in MOT_METRIC_MAP.values():\n if metric_name == '':\n continue\n value = md.get_metric(metric_name)[best_thresh_idx]\n metrics.add_label_metric(metric_name, class_name, value)\n\n # Compute AMOTA / AMOTP.\n for metric_name in AVG_METRIC_MAP.keys():\n values = np.array(md.get_metric(AVG_METRIC_MAP[metric_name]))\n assert len(values) == TrackingMetricData.nelem\n\n if np.all(np.isnan(values)):\n # If no GT exists, set to nan.\n value = np.nan\n else:\n # Overwrite any nan value with the worst possible value.\n np.all(values[np.logical_not(np.isnan(values))] >= 0)\n values[np.isnan(values)] = self.cfg.metric_worst[metric_name]\n value = float(np.nanmean(values))\n metrics.add_label_metric(metric_name, class_name, value)\n\n # Compute evaluation time.\n metrics.add_runtime(time.time() - start_time)\n\n return metrics, metric_data_list\n\n def render(self, md_list: TrackingMetricDataList) -> None:\n \"\"\"\n Renders a plot for each class and each metric.\n :param md_list: TrackingMetricDataList instance.\n \"\"\"\n if self.verbose:\n print('Rendering curves')\n\n def savepath(name):\n return os.path.join(self.plot_dir, name + '.pdf')\n\n # Plot a summary.\n summary_plot(md_list, min_recall=self.cfg.min_recall, savepath=savepath('summary'))\n\n # For each metric, plot all the classes in one diagram.\n for metric_name in LEGACY_METRICS:\n recall_metric_curve(md_list, metric_name,\n self.cfg.min_recall, savepath=savepath('%s' % metric_name))\n\n def main(self, render_curves: bool = True) -> Dict[str, Any]:\n \"\"\"\n Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots.\n :param render_curves: Whether to render PR and TP curves to disk.\n :return: The serialized TrackingMetrics computed during evaluation.\n \"\"\"\n # Run evaluation.\n metrics, metric_data_list = self.evaluate()\n\n # Dump the metric data, meta and metrics to disk.\n if self.verbose:\n print('Saving metrics to: %s' % self.output_dir)\n metrics_summary = metrics.serialize()\n metrics_summary['meta'] = self.meta.copy()\n with open(os.path.join(self.output_dir, 'metrics_summary.json'), 'w') as f:\n json.dump(metrics_summary, f, indent=2)\n with open(os.path.join(self.output_dir, 'metrics_details.json'), 'w') as f:\n json.dump(metric_data_list.serialize(), f, indent=2)\n\n # Print metrics to stdout.\n if self.verbose:\n print_final_metrics(metrics)\n\n # Render curves.\n if render_curves:\n self.render(metric_data_list)\n\n return metrics_summary\n\n\nif __name__ == \"__main__\":\n\n # Settings.\n parser = argparse.ArgumentParser(description='Evaluate nuScenes tracking results.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('result_path', type=str, help='The submission as a JSON file.')\n parser.add_argument('--output_dir', type=str, default='~/nuscenes-metrics',\n help='Folder to store result metrics, graphs and example visualizations.')\n parser.add_argument('--eval_set', type=str, default='val',\n help='Which dataset split to evaluate on, train, val or test.')\n parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes',\n help='Default nuScenes data directory.')\n parser.add_argument('--version', type=str, default='v1.0-trainval',\n help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.')\n parser.add_argument('--config_path', type=str, default='',\n help='Path to the configuration file.'\n 'If no path given, the NIPS 2019 configuration will be used.')\n parser.add_argument('--render_curves', type=int, default=1,\n help='Whether to render statistic curves to disk.')\n parser.add_argument('--verbose', type=int, default=1,\n help='Whether to print to stdout.')\n parser.add_argument('--render_classes', type=str, default='', nargs='+',\n help='For which classes we render tracking results to disk.')\n args = parser.parse_args()\n\n result_path_ = os.path.expanduser(args.result_path)\n output_dir_ = os.path.expanduser(args.output_dir)\n eval_set_ = args.eval_set\n dataroot_ = args.dataroot\n version_ = args.version\n config_path = args.config_path\n render_curves_ = bool(args.render_curves)\n verbose_ = bool(args.verbose)\n render_classes_ = args.render_classes\n\n if config_path == '':\n cfg_ = config_factory('tracking_nips_2019')\n else:\n with open(config_path, 'r') as _f:\n cfg_ = TrackingConfig.deserialize(json.load(_f))\n\n nusc_eval = TrackingEval(config=cfg_, result_path=result_path_, eval_set=eval_set_, output_dir=output_dir_,\n nusc_version=version_, nusc_dataroot=dataroot_, verbose=verbose_,\n render_classes=render_classes_)\n nusc_eval.main(render_curves=render_curves_)\n" ]
[ [ "numpy.nanargmax", "numpy.nanmean", "numpy.isnan" ] ]
grebtsew/DiscGolf
[ "a4bb9862a53cba09c2dc64596c5f96cbc72fac95" ]
[ "Calculations/Frisbee_Flight_Trajectory(3dmodel)_updated.py" ]
[ "import matplotlib.pyplot as plt\nimport math\nimport numpy as np\n\n\"\"\"\nTermology explained:\n\n x - axis are forward/backward / roll\n y - axis are height / spin\n z - axis are sides / pitch\n\n Above AREA\n[-----------]\n ________ _\n /| |\\ |\n| | | | | Side AREA\n \\| |/ |\n -\n\nMain sources:\n[1] http://scripts.mit.edu/~womens-ult/frisbee_physics.pdf (good article about python 2dmodels, see script in the end of article)\n[2] https://scholarworks.moreheadstate.edu/cgi/viewcontent.cgi?article=1082&context=student_scholarship_posters\n[3] https://morleyfielddgc.files.wordpress.com/2009/04/hummelthesis.pdf (good article about 3dmodels, see figures on page 33 and 35!)\n\"\"\"\n\nclass Vector():\n def __init__(self, x = 0.0, y= 0.0, z= 0.0):\n self.x = x\n self.y = y\n self.z = z\n def tuple(self):\n return (self.x,self.y,self.z)\n def mag(self):\n return math.sqrt(self.x**2+self.y**2+self.z**2)\n def norm(self):\n if(self.mag() == 0):\n return Vector(0,0,0)\n return Vector(self.x/self.mag(), self.y/self.mag(), self.z/self.mag())\n\ndef dot(v1,v2):\n return v1.x*v2.x+v1.y*v2.y+v1.z*v2.z\n\nclass Frisbee():\n # Frisbee specifics\n m = 0.175 # in kg\n R = 0.1085 # radie in m\n\n # See these on discsport.se\n Rim_Thickness = 0.012\n Rim_depth = 0.013\n Center_Height = 0.019 # max height\n\n # Inertia\n I = Vector(0.00122, 0.00122, 0.00235)\n \n # Area\n above_AREA = math.pi * pow(R, 2)\n side_AREA = R*2*Center_Height\n\n # Disc specifics [3]\n # Pitch angle at completely straight flight\n ALPHA0 = -4\n\n CL0 = 0.33 #The lift coefficient at alpha=0.\n CLA = 1.91 #The lift coefficient dependent on alpha\n\n CD0 = 0.18 #The drag coefficient at alpha = 0\n CDA = 0.69 #The drag coefficient dependent on alpha\n\n # Rotational parameters\n CRR = 0.014\n CRP = -0.0055\n CNR = -0.0000071\n CM0 = -0.08\n CMA = 0.43\n CMq = -0.005\n\n# Global Variables!\ng = -9.81 # gravity\nRHO = 1.23 # density of air\nf = Frisbee() # frisbee reference\n\n\"\"\"\nPhysics functions\n\"\"\"\n\ndef lift(r,v):\n \"\"\"\n Calculate lift from angles and velocity.\n Return force vector\n \"\"\"\n alpha = math.acos(dot(v.norm(),r.norm()) )\n cl = f.CL0 + f.CLA * math.radians(alpha)\n lift = ((RHO*pow(v.mag(),2)*f.above_AREA*cl)/2)\n\n acc = Vector( math.sin(r.z)*lift , lift*((1- math.sin(r.z) + (1-math.sin(r.x))))/2, math.sin(r.x)*lift )\n \n return acc\n\ndef drag(r, v):\n \"\"\"\n Calculate drag from angles and velocity.\n Return force vector\n \"\"\"\n vn = v.norm()\n alpha = math.acos(dot(vn,r.norm()))\n cd = f.CD0 + f.CDA *pow(alpha - math.radians(f.ALPHA0), 2)\n drag = ((RHO*pow(v.mag(),2)*f.above_AREA*cd)/2)\n \n acc = Vector(- abs(vn.x)*drag,- abs(vn.y)*drag,- abs(vn.z)*drag)\n\n return acc \n\ndef x_axis_rot(w, v):\n \"\"\"\n ROLL\n x = Roll = vx\n Crr, Crp = constants\n Formula:\n R = (Crr*r + Crp*p)*1/2*RHO*v^2*AREA*2*R\n \"\"\"\n roll = (f.CRR*w.y+ f.CRP*w.x)* 1/2 * RHO * pow(v.mag(),2) * f.above_AREA * 2*f.R\n return roll\n\ndef y_axis_rot(w, v):\n \"\"\"\n SPIN DOWN (might be almost negligible in some cases)\n y = spin down = r\n angular velocity drag\n Formula:\n N = (CNR*r)*1/2*RHO*v^2*AREA*d\n \"\"\"\n spin = (f.CNR*w.y) * 1/2 * RHO * pow(v.mag(),2) * f.above_AREA *f.R*2\n return spin\n\n\ndef z_axis_rot(w, r, v):\n \"\"\"\n PITCH\n z = Pitch = p\n Cm0, Cma, Cmq = constants\n Formula:\n M = (CM0 + Cma*alpha + CMq*q)*1/2*RHO*v^2*AREA*d\n \"\"\"\n alpha = math.acos(dot(v.norm(),r.norm()) )\n pitch =(f.CM0 + f.CMA*math.radians(alpha) + f.CMq* w.z) * 1/2 * RHO * pow(v.mag(),2) * f.above_AREA * 2*f.R\n return pitch\n\ndef simulate(p, v, r, w, deltaT):\n \"\"\"\n @PARAMS:\n p - position Vector\n v - velocity Vector\n r - Rotation Vector\n w - Rotation velocity Vector\n \"\"\"\n\n print(\"Simulation Started:\")\n print( \"Position Vector (\",p.x, p.y, p.z,\") [m]\" )\n print( \"Velocity Vector (\",v.x, v.y, v.z,\") [m/s]\" )\n print( \"Velocity Vector (\",3.6*v.x, 3.6*v.y, 3.6*v.z,\") [km/h]\" )\n print( \"Rotation Vector (\",r.x, r.y, r.z,\") [rad/s]\")\n print( \"Rotational Speed Vector (\", w.x,w.y,w.z, \") [rad/s]\")\n\n # These lists are used to save values for plot!\n rx_values = []\n ry_values = []\n rz_values = []\n x_values = []\n y_values = []\n z_values = []\n vx_values = []\n vz_values = []\n counter = 0\n data = [[\"Position [m]\",\"Velocity [m/s]\",\"Rotation [Degree]\",\"Angular Velocity [rad/s]\"]]\n \n # Torque\n T = Vector(0,0,0)\n \n # Angular acceleration\n c = Vector(0,0,0)\n\n # Acceleration\n a = Vector(0,0,0)\n\n while (p.y > 0):\n\n # Get Angular Moments from [3]\n T.x = x_axis_rot(w, v) \n T.y = y_axis_rot(w, v) \n T.z = z_axis_rot(w, r, v)\n\n # Get Angular Acceleration\n c.x = T.x/f.I.x\n c.y = T.y/f.I.y\n c.z = T.z/f.I.z\n\n # Get Angle velocity\n w.x += c.x*deltaT\n w.y += c.y*deltaT\n w.z += c.z*deltaT\n\n # Get Angles\n r.x += w.x*deltaT\n r.y += w.y*deltaT\n r.z += w.z*deltaT\n\n # Forces\n drag_force = drag(r,v)\n lift_force = lift(r,v)\n gravity_force = Vector(0,g*f.m,0)\n \n # Acceleration a = F/m\n a.x = (drag_force.x + lift_force.x + gravity_force.x)/f.m\n a.y = (drag_force.y + lift_force.y + gravity_force.y)/f.m\n a.z = (drag_force.z + lift_force.z + gravity_force.z)/f.m\n\n # Velocity\n v.x += a.x*deltaT\n v.y += a.y*deltaT\n v.z += a.z*deltaT\n \n # Get position\n p.x += v.x*deltaT\n p.y += v.y*deltaT\n p.z += v.z*deltaT\n\n if (counter % 4 == 0): # skip some values to speed things up\n x_values.append(p.x)\n y_values.append(p.y)\n z_values.append(p.z)\n vz_values.append(v.z)\n vx_values.append(v.x)\n rx_values.append(r.x)\n ry_values.append(r.y)\n rz_values.append(r.z)\n\n counter+=1\n\n print(\"Done, Number of calculation iterations: \", counter)\n\n plot_graphs(x_values, y_values, z_values, vz_values, vx_values,rx_values, ry_values, rz_values)\n\ndef get_all_pos(x,y, data):\n res = []\n c = 0\n for d in data:\n if c == 0:\n c+=1\n continue\n res.append(d[x].tuple()[y])\n return res\n\ndef get_index_name(i):\n if i == 0:\n return \"x\"\n if i == 1:\n return \"y\"\n if i == 2:\n return \"z\"\n else:\n return \"Nan\"\n\ndef plot_graphs(x_values, y_values, z_values, vz_values, vx_values,rx_values, ry_values, rz_values):\n fig, axs = plt.subplots(5, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')\n\n axs[0].set_title('Frisbee Flight Trajectory')\n axs[0].set_xlabel('X distance [m] ')\n axs[0].set_ylabel('Y height [m]')\n axs[0].plot(x_values, y_values, '-b', label= \"x\")\n axs[0].plot(vx_values,'-r', label= \"vx\")\n\n axs[1].set_xlabel('Z displacement [m]')\n axs[1].set_ylabel('X distance [m]')\n axs[1].plot(z_values, x_values, '-b', label= \"z\")\n axs[1].plot(vz_values,'-r', label= \"vz\")\n\n axs[2].set_xlabel('X Rotation')\n axs[2].set_ylabel('X distance [m]')\n axs[2].plot(x_values, rx_values, '-b', label= \"rx\")\n\n axs[3].set_xlabel('Y Rotation [m]')\n axs[3].set_ylabel('X distance [m]')\n axs[3].plot(x_values, ry_values, '-b', label= \"ry\")\n\n axs[4].set_xlabel('Z Rotation')\n axs[4].set_ylabel('X distance [m]')\n axs[4].plot(x_values, rz_values, '-b', label= \"rz\")\n\n axs[0].legend(loc=\"upper right\")\n axs[1].legend(loc=\"upper right\")\n axs[2].legend(loc=\"upper right\")\n axs[3].legend(loc=\"upper right\")\n axs[4].legend(loc=\"upper right\")\n\n plt.subplots_adjust(hspace = 1 )\n plt.show()\n\nif __name__ == \"__main__\":\n position_vector = Vector(0,10,0) # [m]\n velocity_vector = Vector(20,0,0) # [m/s]\n rotation_vector = Vector(0,0,0) # [radians]\n rotational_speed_vector = Vector(0,25,0) # [radians per second] roll,spin,pitch\n deltaT = 0.01 # time intervall\n\n simulate(position_vector, velocity_vector, rotation_vector, rotational_speed_vector, deltaT)\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.subplots" ] ]
thequackdaddy/blaze
[ "21ba90c17b6b807623bbc9996bfc838f13ee6ea1" ]
[ "blaze/compute/tests/test_csv_compute.py" ]
[ "from blaze.compute.csv import pre_compute, CSV\nfrom blaze import compute, discover, dshape, into, join, concat, data\nfrom blaze.utils import example, filetext, filetexts\nfrom blaze.expr import symbol\nfrom pandas import DataFrame, Series\nimport pandas.util.testing as tm\nfrom datashape.predicates import iscollection\nimport numpy as np\nimport pandas as pd\nfrom toolz import first\nfrom collections import Iterator\nfrom odo import odo\nfrom odo.chunks import chunks\n\n\ndef test_pre_compute_on_small_csv_gives_dataframe():\n csv = CSV(example('iris.csv'))\n s = symbol('s', discover(csv))\n assert isinstance(pre_compute(s.species, csv), (Series, DataFrame))\n\n\ndef test_pre_compute_on_large_csv_gives_chunked_reader():\n csv = CSV(example('iris.csv'))\n s = symbol('s', discover(csv))\n assert isinstance(pre_compute(s.species, csv, comfortable_memory=10),\n (chunks(pd.DataFrame), pd.io.parsers.TextFileReader))\n\n\ndef test_pre_compute_with_head_on_large_csv_yields_iterator():\n csv = CSV(example('iris.csv'))\n s = symbol('s', discover(csv))\n assert isinstance(pre_compute(s.species.head(), csv, comfortable_memory=10),\n Iterator)\n\n\ndef test_compute_chunks_on_single_csv():\n csv = CSV(example('iris.csv'))\n s = symbol('s', discover(csv))\n expr = s.sepal_length.max()\n assert compute(expr, {s: csv}, comfortable_memory=10, chunksize=50) == 7.9\n\n\ndef test_pre_compute_with_projection_projects_on_data_frames():\n csv = CSV(example('iris.csv'))\n s = symbol('s', discover(csv))\n result = pre_compute(s[['sepal_length', 'sepal_width']].distinct(),\n csv, comfortable_memory=10)\n assert set(first(result).columns) == \\\n set(['sepal_length', 'sepal_width'])\n\n\ndef test_pre_compute_calls_lean_projection():\n csv = CSV(example('iris.csv'))\n s = symbol('s', discover(csv))\n result = pre_compute(s.sort('sepal_length').species,\n csv, comfortable_memory=10)\n assert set(first(result).columns) == \\\n set(['sepal_length', 'species'])\n\n\ndef test_unused_datetime_columns():\n ds = dshape('2 * {val: string, when: datetime}')\n with filetext(\"val,when\\na,2000-01-01\\nb,2000-02-02\") as fn:\n csv = CSV(fn, has_header=True)\n\n s = symbol('s', discover(csv))\n assert into(list, compute(s.val, csv)) == ['a', 'b']\n\n\ndef test_multiple_csv_files():\n d = {'mult1.csv': 'name,val\\nAlice,1\\nBob,2',\n 'mult2.csv': 'name,val\\nAlice,3\\nCharlie,4'}\n\n dta = [('Alice', 1), ('Bob', 2), ('Alice', 3), ('Charlie', 4)]\n with filetexts(d) as fns:\n r = data('mult*.csv')\n s = symbol('s', discover(r))\n\n for e in [s, s.name, s.name.nunique(), s.name.count_values(),\n s.val.mean()]:\n a = compute(e, {s: r})\n b = compute(e, {s: dta})\n if iscollection(e.dshape):\n a, b = into(set, a), into(set, b)\n assert a == b\n\n\ndef test_csv_join():\n d = {'a.csv': 'a,b,c\\n0,1,2\\n3,4,5',\n 'b.csv': 'c,d,e\\n2,3,4\\n5,6,7'}\n\n with filetexts(d):\n data_a = data('a.csv')\n data_b = data('b.csv')\n a = symbol('a', discover(data_a))\n b = symbol('b', discover(data_b))\n tm.assert_frame_equal(\n odo(\n compute(join(a, b, 'c'), {a: data_a, b: data_b}),\n pd.DataFrame,\n ),\n\n # windows needs explicit int64 construction b/c default is int32\n pd.DataFrame(np.array([[2, 0, 1, 3, 4],\n [5, 3, 4, 6, 7]], dtype='int64'),\n columns=list('cabde'))\n )\n\n\ndef test_concat():\n d = {'a.csv': 'a,b\\n1,2\\n3,4',\n 'b.csv': 'a,b\\n5,6\\n7,8'}\n\n with filetexts(d):\n a_rsc = data('a.csv')\n b_rsc = data('b.csv')\n\n a = symbol('a', discover(a_rsc))\n b = symbol('b', discover(b_rsc))\n\n tm.assert_frame_equal(\n odo(\n compute(concat(a, b), {a: a_rsc, b: b_rsc}), pd.DataFrame,\n ),\n\n # windows needs explicit int64 construction b/c default is int32\n pd.DataFrame(np.arange(1, 9, dtype='int64').reshape(4, 2),\n columns=list('ab')),\n )\n" ]
[ [ "numpy.array", "numpy.arange" ] ]
lvotapka/mmvt_seekr
[ "077115e0fbf95233e1b00fd262de6a43282e69fa" ]
[ "mmvt_seekr/pdb2.py" ]
[ "'''\npdb2.py\nby Lane Votapka\nAmaro Lab 2015\n\nThis file contains multiple functions and classes that can be used to read and write PDB, PQR, and PQRXML formats in versatile ways.\nIt also contains some functions for processing loaded structures, such as center-of-mass calculations, etc.\n\n'''\nimport re, warnings, time, os, sys\nfrom copy import deepcopy\nimport string\nimport numpy\nfrom numpy import array, matrix\nfrom io import StringIO, IOBase # NOTE: may want to change this to cStringIO if more speed is needed in the future\n#from modscience import normalize_vector # makes the longest vector equal to 1 # this is too annoying to import\nimport unittest\nimport xml.etree.cElementTree as ET\nfrom xml.etree.cElementTree import iterparse # for parsing the XML\nfrom io import StringIO # allows files to be loaded as a special object\n\nREMARK = 'REMARK 1 File generated by pdb2.py by Lane Votapka\\n'\natomic_weights = {'H':1.00800, 'C':12.0110, 'N':14.00699, 'O':15.9994, 'F':18.9984, 'Na':22.9898, 'Mg':24.305, 'P':30.9738, 'S':32.065, 'Cl':35.453, 'K':39.0983, 'Ca':40.078, 'Fe':55.845, 'Br':79.904, 'I':126.9045, '':0.0, ' ':0.0, 'G':0.0} # all the atomic weights. NOTE: 'G' refers to a \"ghost\" atom\nradii = {'C': 1.9080, 'N':1.824, 'H':0.0000, 'O':1.66, 'S':2.000, 'Ca':1.97, 'P':2.00, 'Cu':2.5, 'Zn':2.5, '':0.0, \"G\":0.0}\nTER_atoms = ['OXT', 'Ca+'] # atom names after which automatically add a TER card if amber mode activated\nTER_cutoff = sys.maxsize # the distance at which to assume atoms aren't bonded, and add a TER card\nTER_resnames = [\"WAT\", \"K+\", \"Cl-\", \"Na+\"] # resnames which represent an entire molecule of their own\nwater_resnames = ('TIP','H2O','HOH','WAT')\n\nclass Atom():\n def __init__(self, record, index, name, altloc, resname, chain, resid, icode, x, y, z, occupancy, beta, element, charge, rawline='', radius='0.0'):\n # fill out atomic information\n self.record = record\n self.index = int(index)\n self.name = name.strip()\n self.altloc = altloc\n self.resname = resname.strip()\n self.chain = chain\n self.resid = resid\n self.icode = icode\n self.coords = [float(x), float(y), float(z)]\n self.x = float(x)\n self.y = float(y)\n self.z = float(z)\n self.occupancy = float(occupancy)\n self.beta = float(beta)\n self.element = element\n if not self.element or self.element == ' ': # then assign an element to this molecule\n self.element = find_element(self.name)\n if charge:\n try:\n self.charge = float(charge)\n except ValueError: # then there is something weird with this value, and was obviously not meant to be a charge\n self.charge = charge\n else:\n self.charge = charge\n if radius:\n self.radius = float(radius)\n else:\n self.radius = radius\n #self.rawline = rawline\n def get_coords(self):\n return self.coords\n\n def to_dict(self):\n ''' returns the attributes of the Atom() class in dictionary form'''\n our_dict = {}\n for var in ['record','index','name','altloc','resname','chain','resid','icode','occupancy','beta','element','charge','radius']:\n exec(\"our_dict['%s'] = self.%s\" % (var,var))\n our_dict['x'] = self.coords[0]; our_dict['y'] = self.coords[1]; our_dict['z'] = self.coords[2]\n return our_dict # return the dictionary\n\n def set_coords(self, coord):\n if type(coord) == matrix or type(coord) == array:\n coord = coord[0,0:3].tolist()[0]\n self.coords = coord\n return\n\n def print_pdbline(self, standard=True, space_separated_values=False):\n name = self.name\n if len(name) <= 3: name = ''.join((' ',name)) # then pad it to left with a space\n if standard:\n #formatline = \"{0:<6.6}{1:>5.5} {2:<4.4}{3:1.1}{4:3.3} {5:1.1}{6:>4.4}{7:1.1} {8:> 8.3f}{9:> 8.3f}{10:> 8.3f}{11:>6.2f}{12:>6.2f} {13:>2.2}{14:>2.2}\\n\" # charge was included here, but insufficient space\n #return formatline.format(self.record,str(self.index),name,self.altloc,self.resname,self.chain,self.resid,self.icode,self.coords[0],self.coords[1],self.coords[2],self.occupancy,self.beta,self.element,self.charge)\n formatline = \"{0:<6.6}{1:>5.5} {2:<4.4}{3:1.1}{4:3.3} {5:1.1}{6:>4.4}{7:1.1} {8:> 8.3f}{9:> 8.3f}{10:> 8.3f}{11:>6.2f}{12:>6.2f} {13:>2.2} \\n\"\n return formatline.format(self.record,str(self.index),name,self.altloc,self.resname,self.chain,self.resid,self.icode,self.coords[0],self.coords[1],self.coords[2],self.occupancy,self.beta,self.element,self.charge)\n elif space_separated_values: # then every value is only separated by a space\n #formatline = \"{0} {1} {2} {3} {4} {5} {6} {7} {8:.3} {9:.3} {10:.3} {11:.2} {12:.2} {13} {14}\" # charge was included here, but not included for standard mode, so I'm not including here either\n #return formatline.format(self.record,str(self.index),self.name,self.altloc,self.resname,self.chain,self.resid,self.icode,self.coords[0],self.coords[1],self.coords[2],self.occupancy,self.beta,self.element,self.charge)\n formatline = \"{0} {1} {2} {3} {4} {5} {6} {7} {8:.3f} {9:.3f} {10:.3f} {11:.2f} {12:.2f} {13}\"\n ourline = formatline.format(self.record,str(self.index),self.name,self.altloc,self.resname,self.chain,self.resid,self.icode,self.coords[0],self.coords[1],self.coords[2],self.occupancy,self.beta,self.element)\n ourline = ' '.join(ourline.split()) # remove extra spaces\n return ourline\n else: # then print out a line that is close to standard at least for small PDBs, but can handle many more atoms/residues\n self.record = \"ATOM\" # set it to atom by default, which gives us more room that HETATM\n record_index_spaces = len(self.record) + len(str(self.index)) # all this gibberish just to get some decent spacing for the index\n after_index_spaces = 1\n if record_index_spaces < 1:\n after_index_spaces = 0\n record_index_spaces = 1\n record_index_formatline = r\"{0:}{1:}{2:}{3:}\".format(self.record, ' '*(11-record_index_spaces), str(self.index), ' '*after_index_spaces)\n after_resid_spaces = 4 # all this gibberish to get decent spacing for the resid column\n before_resid_spaces = 4 - len(str(self.resid))\n if before_resid_spaces < 0:\n after_resid_spaces = after_resid_spaces + before_resid_spaces\n before_resid_spaces = 0\n if after_resid_spaces < 0: # then its a HUGE resid number\n after_resid_spaces = 0 # forget about formatting\n resid_formatline = r\"{0:}{1:}{2:}\".format(' '*before_resid_spaces, self.resid, ' '*after_resid_spaces)\n #formatline = \"{0}{1:<4.4}{2:1.1}{3:3.3} {4:1.1}{5}{6:> 8.3f}{7:> 8.3f}{8:> 8.3f}{9:>6.2f}{10:>6.2f} {11:>2.2}{12:>2.2}\\n\"\n beta = '{0:4.2f}'.format(self.beta)[:4]\n occupancy = '{0:4.2f}'.format(self.occupancy)[:4]\n formatline = \"{0}{1:<4.4}{2:1.1}{3:3.3} {4:1.1}{5}{6:> 8.3f}{7:> 8.3f}{8:> 8.3f} {9} {10} {11:>2.2}{12:>2.2}\\n\"\n return formatline.format(record_index_formatline,name,self.altloc,self.resname,self.chain,resid_formatline,self.coords[0],self.coords[1],self.coords[2],occupancy,beta,self.element,self.charge)\n\n def set_occupancy(self,occupancy):\n self.occupancy = occupancy\n\nclass Structure():\n def __init__(self, struct_id):\n self.struct_id = struct_id\n self.atoms = []\n self.num_atoms = 0\n self.num_resids = 0\n return\n\n def get_atoms(self):\n return self.atoms\n\n def save(self, filename, amber=False, standard=True, space_separated_values=False, remark=True, endmdl=True, pqr=False, pqrxml=False, nohydrogens=False):\n '''Save this structure as a PDB or PQR file'''\n # perform some checks\n if standard:\n if self.num_atoms > 99999: # then it will require 6 spaces\n raise Exception(\"PDB file too big to write in standardized format. Atom indeces exceed 5 digit maximum. Set 'standard' option to False to enable large pdb file writing\")\n if self.num_resids > 9999: # then it will require 5 spaces\n raise Exception(\"Too many residues to write in standardized format. Residue indeces exceed 4 digit maximum. Set 'standard' option to False to enable large pdb file writing\")\n\n # write the file\n outfile = open(filename, 'w')\n if remark==True and pqrxml==False: # because we don't want remarks added to pqrxml files\n outfile.write(REMARK)\n if self.num_atoms > 0: oldcoord = self.atoms[0].coords\n #if self.num_resids > 9999: # then convert to hex\n # self.resid_to_hex()\n if self.num_atoms > 0: oldresid = self.atoms[0].resid\n if pqrxml==True: outfile.write(\"<roottag>\\n <residue>\\n <residue_name>%s</residue_name>\\n <residue_number>%s</residue_number>\\n\" % (self.atoms[0].resname, self.atoms[0].resid))\n for atom in self.atoms: # for every atom in this structure\n if amber: # for TER cards before an atom\n if (atom.resname in TER_resnames and atom.resid != oldresid):\n outfile.write(\"TER\\n\")\n if pqr==True:\n outfile.write(make_pqr_line(atom.to_dict())) # join an endline to the end of the pqr line\n elif pqrxml==True:\n if oldresid != atom.resid:\n outfile.write(\" </residue>\\n <residue>\\n <residue_name>%s</residue_name>\\n <residue_number>%s</residue_number>\\n\" % (atom.resname, atom.resid))\n outfile.write(make_pqrxml_line(atom.to_dict()))\n\n else: # then write a pdb file\n outfile.write(atom.print_pdbline(standard=standard, space_separated_values=space_separated_values))\n if amber: # need to correctly place the TER cards after the current atom\n coord_dist = array(atom.coords) - array(oldcoord)\n if atom.name in TER_atoms or numpy.linalg.norm(coord_dist) > TER_cutoff : # print a TER card\n outfile.write(\"TER\\n\")\n oldcoord = atom.coords\n oldresid = atom.resid\n #print atom.print_pdbline()\n if pqrxml: outfile.write(\" </residue>\\n</roottag>\\n\")\n if endmdl and not pqrxml:\n outfile.write('ENDMDL\\n')\n outfile.close()\n\n def moveby(self, vector):\n '''move all atoms by the vector'''\n vector = numpy.array(vector)\n #print vector\n for atom in self.atoms:\n atom.coords = list(vector + numpy.array(atom.coords))\n\n '''def remove_atoms(self, indeces):\n 'remove all atoms with given index from the structure. indeces may be a list'\n pass'''\n\n def matrix_operation(self,op_matrix):\n '''given a matrix, will apply matrix*vector operation on every atom'''\n for atom in self.atoms:\n coords = numpy.hstack((matrix([atom.coords]),matrix([[1.0]])))\n newcoord = op_matrix * coords.T\n atom.coords = newcoord.T.tolist()[0][:3]\n\n\n def remove_res(self, resid):\n '''remove all atoms with given resid from the structure. resid may be a list'''\n resids = set(resid) # convert to a set if not already\n index_counter = 0\n found_resids = set() # just to keep track of all the resids given that are ACTUALLY in this pdb\n\n while index_counter < self.num_atoms: # for all atoms\n if self.atoms[index_counter].resid in resids:\n self.atoms.pop(index_counter)\n self.num_atoms -= 1\n if self.num_atoms == 0: break\n found_resids.add(self.atoms[index_counter].resid)\n #resids.remove(self.atoms[index_counter].resid)\n else:\n self.atoms[index_counter].index = index_counter+1 # set the internal index numbers to be incremental\n index_counter += 1 # increment the index counter\n\n self.num_resids -= len(found_resids) # reset the number of residues\n\n def renumber_indeces(self, start_at = 1):\n '''renumbers the indeces within the pdb so that they are consecutively numbered'''\n counter = start_at\n for atom in self.atoms:\n atom.index = counter\n counter += 1\n return\n\n def resid_to_hex(self):\n '''converts all resids to hexadecimal values '''\n for atom in self.atoms:\n resid = int(atom.resid)\n hex_str = hex(resid)\n newresid = \"%4s\" % hex_str[2:]\n atom.resid = newresid\n\n# this regular expression should satisfy the fields of nearly any pdb file\n#pdbregex = re.compile(r'([A-Z]+) *([0-9]+) *([\\S]+) +([\\S]{3}) *([A-Z]{0,1}) *([0-9]+) *([-]{0,1}[0-9\\.]+) *([- ][0-9\\.]+) *([- ][0-9\\.]+) *([- ][0-9\\.]+) *([- ][0-9\\.]+) *(.)?$') # the original PDB parser regex\npdbregex = re.compile(r'^([A-Z]+) *([0-9a-f\\*]+) *([\\S]{1,4}) *([ a-zA-Z]{1})([\\S]{3,4}| [\\S]{2}| [\\S]) +([@A-Z]{0,1}) *([-0-9a-f]+)([a-zA-Z]{0,1}) *([- ]{0,1}[0-9]+\\.[0-9]{3}) *([- ]{0,1}[0-9]+\\.[0-9]{3}) *([- ]{0,1}[0-9]+\\.[0-9]{3}) *([- ]{0,1}[0-9]+\\.[0-9]{2}) *([- ]{0,1}[0-9]+\\.[0-9]{2}) *(\\S{0,2}) *(\\S{0,2}) *$')\n #pdbregex = re.compile(r'([A-Z]+) *([0-9a-f\\*]+) *([\\S]{1,4}) *([ a-zA-Z]{1})([\\S]{3}| [\\S]{2}| [\\S]) +([A-Z]{0,1}) *([-0-9]+)([a-zA-Z]{0,1}) *([- ][0-9\\.]+) *([- ][0-9\\.]+) *([- ][0-9\\.]+) *([- ][0-9]+\\.[0-9]{2}) *([- ]{0,1}[\\.0-9]+) *(\\S{0,2}) *(\\S{0,2}) *$')\n #pdbregex = re.compile(r'([A-Z]+) *([0-9]+) *([\\S]+) *([a-zA-Z]{0,1}) *([\\S]{3}) *([A-Z]{0,1}) *([0-9]+)([a-zA-Z]{0,1}) *([- ][0-9\\.]+) *([- ][0-9\\.]+) *([- ][0-9\\.]+) *([- ][0-9\\.]+) *([- ][0-9\\.]+) *(\\S{0,2}) *(\\S{0,2}) *$')\npqrregex = re.compile(r'^([A-Z]+) *([0-9a-f\\*]+) *([\\S]{1,4}) *([\\S]{3,4}| [\\S]{2}| [\\S]) +([@A-Z]{0,1}) *([-0-9a-f]+)([a-zA-Z]{0,1}) *([- ]{0,1}[0-9]+\\.[0-9]{3}) *([- ]{0,1}[0-9]+\\.[0-9]{3}) *([- ]{0,1}[0-9]+\\.[0-9]{3}) *([- ]{0,1}[0-9]+\\.[0-9]{1,4}) *([- ]{0,1}[0-9]+\\.[0-9]{1,4}) *$')\n\nclass Big_PDBParser():\n def pdbparse(self, filename, preserve_index = False, preserve_resid = True, pqr=False, conventional=False):\n '''parses a PDB file and returns the line as a dict.\n Atom indeces automatically numbered. If preserve_index\n is set to True, then indeces from the file will be preserved'''\n\n atoms = []\n if isinstance(filename, IOBase) or type(filename) == type(StringIO()): # then we're passing a file or file-like object\n pdbfile = filename\n elif type(filename) == str: # otherwise, try to open it as a string\n pdbfile = open(filename, 'r')\n counter = 1 # count each line of the file\n residcounter = 0\n oldresid = 0\n for pdbline in pdbfile:\n if not pdbline.startswith('ATOM') and not pdbline.startswith('HETATM'):\n continue\n pdbline = pdbline.strip()\n if pqr: # then we are dealing with a pqr file\n if conventional:\n rawlinelist = self.conventional_parse(pdbline, pqr=True)\n else:\n rawlinelist = re.findall(pqrregex, pdbline)\n #assert conventional==False, \"conventional parsing for PQRs not yet implemented.\"\n\n if not rawlinelist:\n linelist = pdbline.split() # then simply split on whitespace\n if len(linelist) == 9: # then the chain is missing\n linelist.insert(3,' ')\n linelist.insert(2,'')\n linelist.insert(6,'')\n\n\n\n try:\n linelist = rawlinelist[0]\n if len(linelist) == 10:\n linelist.insert(5,'')\n if len(linelist) == 11: # then the chain is missing\n linelist.insert(4,' ')\n\n if oldresid == \"\": oldresid = linelist[5]\n if linelist[5] != oldresid: # increment the resid counter\n residcounter += 1\n oldresid = linelist[5]\n\n if preserve_index:\n atomindex = linelist[1]\n else:\n atomindex = counter\n\n if preserve_resid:\n resid_index = linelist[5]\n else:\n resid_index = str(residcounter)\n\n #print \"linelist:\", linelist\n #exit()\n\n element=linelist[2][0]\n atom = Atom(record=linelist[0], index=atomindex, name=linelist[2], altloc=\"\", resname=linelist[3], chain=linelist[4], resid=resid_index, icode=linelist[6], x=linelist[7], y=linelist[8], z=linelist[9], charge=linelist[10], radius=linelist[11], occupancy='1.0', beta='0.0', element=element)\n\n except IndexError:\n print((\"failure line: %s\" % pdbline))\n print((\"rawline: %s\" % rawlinelist))\n raise IndexError\n\n\n\n else: # then parse for a pdb file\n if conventional: # then we are parsing using conventional\n rawlinelist = self.conventional_parse(pdbline)\n else:\n rawlinelist = re.findall(pdbregex, pdbline)\n # fill the atom object\n if not rawlinelist:\n #print \"error parsing line:\\n\", pdbline, rawlinelist\n #print \"attempting to parse conventionally...\"\n rawlinelist = self.conventional_parse(pdbline)\n rawlinelist[0][13] = rawlinelist[0][2][0] # getting the first letter to be the element\n #print \"ALERT: conventional parsing not yet implemented!\" # NOTE: line marked for removal upon completion of conventional parsing\n #continue\n try:\n linelist = rawlinelist[0]\n if oldresid == \"\": oldresid = linelist[6]\n if linelist[6] != oldresid: # increment the resid counter\n residcounter += 1\n oldresid = linelist[6]\n #print \"linelist: \", linelist\n if preserve_index:\n atomindex = linelist[1]\n else:\n atomindex = counter\n if preserve_resid:\n resid_index = linelist[6]\n else:\n resid_index = str(residcounter)\n\n\n #if charge and not element: # then probabily it assigned the charge when it should have assigned the element\n #if re.match(): # then there are alphabetical characters in the charge when there should be numbers for charge UNFINISHED...\n\n atom = Atom(record=linelist[0], index=atomindex, name=linelist[2], altloc=linelist[3], resname=linelist[4], chain=linelist[5], resid=resid_index, icode=linelist[7], x=linelist[8], y=linelist[9], z=linelist[10], occupancy=linelist[11], beta=linelist[12], element=linelist[13], charge=linelist[14]) #, rawline=pdbline)\n\n\n except IndexError:\n print((\"failure line: %s\" % pdbline))\n print((\"rawline: %s\" % rawlinelist))\n raise IndexError\n\n #atom = Atom(record=linelist[0], index=linelist[1], name=linelist[2], resname=linelist[3], chain=linelist[4], resid=linelist[5], x=linelist[6], y=linelist[7], z=linelist[8], occupancy=linelist[9], beta=linelist[10], atomtype=linelist[11])\n\n\n atoms.append(atom)\n #print linelist\n counter += 1\n if type(filename) == str: pdbfile = open(filename, 'r')\n return atoms, residcounter\n #if pdbline.startswith != \"ATOM\" and pdbline\n\n def conventional_parse(self,line, pqr=False): # an emergency function that attempts to parse the line of a pdb file according to conventional rules\n if pqr:\n conv_regex = re.compile(r'^(.{6})(.{5}).(.{4})(.{5}).(.{4}) (.{8})(.{8})(.{8})(.{8})(.{7})*$') # untested...\n else:\n conv_regex = re.compile(r'^(.{6})(.{5}).(.{4})(.)(.{4})(.)(.{4})(.).{3}(.{8})(.{8})(.{8})(.{6})(.{6}).{0,10}(.{0,2})(.{0,2}) *$')\n rawlinelist = re.findall(conv_regex,line)\n\n #print \"rawlinelist:\", map(string.strip, rawlinelist[0])\n if not rawlinelist:\n print((\"problem with line:\", line))\n return [list(map(string.strip, rawlinelist[0]))] # strip all the spaces off and return it\n\n def get_structure(self, struct_id, filename, preserve_index=False, preserve_resid=True, pqr=False, pqrxml=False, conventional=False):\n structure = Structure(struct_id)\n #structure.struct_id = struct_id\n # now parse the pdb file\n assert not (pqr == True and pqrxml == True), \"both the 'pqr' and 'pqrxml' options may not be set to 'True' at the same time.\"\n if pqrxml:\n pqrxml_generator = pqr_xml_parse(filename, preserve_resid=preserve_resid, preserve_index=preserve_index)\n for atom_dict,num_res in pqrxml_generator:\n #print \"atom_dict:\", atom_dict\n atom = Atom(record=\"ATOM\", index=atom_dict['index'], name=atom_dict['name'], altloc='', resname=atom_dict['resname'], chain='', resid=atom_dict['resid'], icode='', x=atom_dict['x'], y=atom_dict['y'], z=atom_dict['z'], element=atom_dict['name'][0], charge=atom_dict['charge'], radius=atom_dict['radius'], occupancy='1.0', beta='0.0', )\n structure.atoms.append(atom)\n structure.num_resids = num_res\n else:\n structure.atoms, structure.num_resids = self.pdbparse(filename,preserve_index,preserve_resid,pqr=pqr,conventional=conventional)\n structure.num_atoms = len(structure.atoms)\n #NOTE: There is no way so far to assign structure.num_resids\n return structure\n\nparser=Big_PDBParser()\n\ndef pqr_xml_parse(filename, preserve_resid = True, preserve_index=False): #parses a pqrxml file from BrownDye\n # I have to code this carefully because I want it to be able to handle very large structures\n resid_counter = 1\n index_counter = 1\n\n context = iterparse(filename, events=(\"start\", \"end\")) # get an iterable\n context = iter(context) # turn it into an iterator\n event, root = next(context)\n for event, elem in iterparse(filename):\n if event == \"end\":\n # residue info\n if elem.tag == \"residue_name\": resname = elem.text\n if preserve_resid and elem.tag == \"residue_number\": resid = int(elem.text)\n # atom info\n if elem.tag == \"atom_name\": atom_name = elem.text\n if preserve_index and elem.tag == \"atom_number\": index = int(elem.text)\n if elem.tag == \"x\": x = float(elem.text)\n if elem.tag == \"y\": y = float(elem.text)\n if elem.tag == \"z\": z = float(elem.text)\n if elem.tag == \"charge\": charge = float(elem.text)\n if elem.tag == \"radius\": radius = float(elem.text)\n if elem.tag == \"atom\": # ending the atom\n if not preserve_index:\n index = index_counter\n index_counter += 1\n line_dict = {'index':index, 'name':atom_name, 'resname':resname, 'resid':resid, 'x':x,'y':y,'z':z, 'charge':charge, 'radius':radius,}\n #print make_pqr_line(line_dict)\n yield line_dict, resid_counter\n elem.clear\n if elem.tag == \"residue\": # ending the residue\n if not preserve_resid:\n resid = resid_counter\n resid_counter += 1\n elem.clear()\n root.clear() # clear all previous entries in the root to save space\n\n elem.clear()\n '''\n root = tree.getroot()\n for residue in root:\n resname = residue.find('residue_name')\n resid = residue.find('residue_number')\n atoms = residue.findall('atom')\n for atom in atoms:\n atom_name = atom.find('atom_name')\n serial = atom.find('atom_number')\n x = atom.find('x')\n y = atom.find('y')\n z = atom.find('z')\n charge = atom.find('charge')\n radius = atom.find('radius')\n '''\n\ndef loadpdb(filename):\n \"quick and convenient way to load a pdb file\"\n newpdb=parser.get_structure('quickload', filename, preserve_resid=False )\n return newpdb\n\ndef loadpqr(filename,preserve_resid=False):\n \"quick and convenient way to load a pqr file\"\n newpqr=parser.get_structure('quickload', filename, pqr=True, preserve_resid=preserve_resid )\n return newpqr\n\ndef isfloat(number):\n if type(number) in [float, numpy.float64, numpy.float32]:\n return True\n else:\n return False\n\ndef make_pqr_line(user_line_dict, chain=False):\n '''creates a pqr line based on the specified parameters in line_dict'''\n line_dict = {'record':'ATOM', 'index':'0', 'name':'---', 'altloc':'', 'resname':'---', 'chain':'', 'resid':'0', 'icode':'', 'x':'0.000','y':'0.000','z':'0.000', 'charge':'0.0000', 'radius':'0.0000',}\n line_dict.update(user_line_dict)\n if isfloat(line_dict['x']): line_dict['x'] = \"% 8.3f\" % line_dict['x'] # if the value is a float, then convert to a string with the proper precision and width\n if isfloat(line_dict['y']): line_dict['y'] = \"% 8.3f\" % line_dict['y'] # if the value is a float, then convert to a string with the proper precision and width\n if isfloat(line_dict['z']): line_dict['z'] = \"% 8.3f\" % line_dict['z'] # if the value is a float, then convert to a string with the proper precision and width\n if isfloat(line_dict['charge']): line_dict['charge'] = \"%0 6.4f\" % line_dict['charge']\n if isfloat(line_dict['radius']): line_dict['radius'] = \"%0 6.4f\" % line_dict['radius']\n for key in list(line_dict.keys()):\n line_dict[key] = str(line_dict[key]) # convert every value in the dictionary into a string for easy formatting\n if chain: # then write the chainID\n formatline = \"{record:<6.6}{index:>5.10} {name:^4.4} {resname:<4.4}{chain:1.1}{resid:>4.9} {x:>8.8}{y:>8.8}{z:>8.8} {charge:>7.7} {radius:>6.6}\\n\"\n else:\n formatline = \"{record:<6.6}{index:>5.10} {name:^4.4} {resname:<4.4} {resid:>4.9} {x:>8.8}{y:>8.8}{z:>8.8} {charge:>7.7} {radius:>6.6}\\n\"\n formatstring = formatline.format(**line_dict) # automatically populates the line with values from line_dict\n return formatstring\n\n #return formatline.format(self.record,str(self.index),name,self.altloc,self.resname,self.chain,self.resid,self.icode,self.coords[0],self.coords[1],self.coords[2],self.occupancy,self.beta,self.element,self.charge)\n\n\n\ndef make_pqrxml_line(user_line_dict):\n '''creates a pqr line based on the specified parameters in line_dict'''\n line_dict = {'record':'ATOM', 'index':'0', 'name':'---', 'altloc':'', 'resname':'---', 'chain':'', 'resid':'0', 'icode':'', 'x':'0.000','y':'0.000','z':'0.000', 'charge':'0.0000', 'radius':'0.0000',}\n line_dict.update(user_line_dict)\n if isfloat(line_dict['x']): line_dict['x'] = \"%.6f\" % line_dict['x'] # if the value is a float, then convert to a string with the proper precision and width\n if isfloat(line_dict['y']): line_dict['y'] = \"%.6f\" % line_dict['y'] # if the value is a float, then convert to a string with the proper precision and width\n if isfloat(line_dict['z']): line_dict['z'] = \"%.6f\" % line_dict['z'] # if the value is a float, then convert to a string with the proper precision and width\n if isfloat(line_dict['charge']): line_dict['charge'] = \"%.6f\" % line_dict['charge']\n if isfloat(line_dict['radius']): line_dict['radius'] = \"%.6f\" % line_dict['radius']\n for key in list(line_dict.keys()):\n line_dict[key] = str(line_dict[key]) # convert every value in the dictionary into a string for easy formatting\n\n #formatline = \"{record:<6.6}{index:>5.5} {name:^4.4}{altloc:1.1}{resname:3.3} {chain:1.1}{resid:>4.4}{icode:1.1} {x:>8.8}{y:>8.8}{z:>8.8} {charge:>7.7}{radius:>7.7}\\n\"\n formatline = ''' <atom>\n <atom_name>{name}</atom_name>\n <atom_number>{index}</atom_number>\n <x>{x}</x>\n <y>{y}</y>\n <z>{z}</z>\n <charge>{charge}</charge>\n <radius>{radius}</radius>\n </atom>\\n'''\n formatstring = formatline.format(**line_dict) # automatically populates the line with values from line_dict\n return formatstring\n\ndef find_element(name):\n '''find the most likely element of an atom given the atomic name'''\n letters = re.sub(\"[^a-zA-Z]*\", \"\", name) # keeps only the letters in the 'name' string\n if letters in list(atomic_weights.keys()): # then we should have a weight for this entry\n return letters\n elif letters[0] in list(atomic_weights.keys()): # then its a named atom and we can take the first letter, which should be the proper name\n return letters[0]\n else:\n return ''\n\n\ndef avg(numlist):\n 'finds the average of a list of numbers'\n oursum = sum(numlist)\n return oursum / float(len(numlist))\n\ndef getcoords(structure):\n 'returns a list of coordinates from all of the atoms'\n coords = []\n atomlist = structure.get_atoms()\n for atom in atomlist:\n coords.append(atom.get_coords())\n return coords\n\ndef minmax_width (structure, water_only=True, wat_resnames=water_resnames):\n '''returns the width of the minimum and maximum coordinate values in the x,y,z direction'''\n coords = getcoords(structure)\n xlist = []\n ylist = []\n zlist = []\n i=0\n for coord in coords:\n if water_only and wat_resnames and structure.atoms[i].resname not in wat_resnames:\n i+=1\n continue\n xlist.append(coord[0])\n ylist.append(coord[1])\n zlist.append(coord[2])\n i+=1\n\n return ( max(xlist)-min(xlist), max(ylist)-min(ylist), max(zlist)-min(zlist))\n\ndef center (structure):\n '''returns the center of a pdb'''\n coords = getcoords(structure)\n xlist = []\n ylist = []\n zlist = []\n\n for coord in coords:\n xlist.append(coord[0])\n ylist.append(coord[1])\n zlist.append(coord[2])\n\n return ((max(xlist) + min(xlist))*0.5, (max(ylist) + min(ylist))*0.5, (max(zlist) + min(zlist)) * 0.5)\n\ndef center_of_mass (structure, weighted=True, heavy_atoms=False):\n '''finds the center of mass of a structure. Returns a numpy vector'''\n total_weight = 0.0 # keeps track of the weight of the molecule\n coord_sum = numpy.array([0.0,0.0,0.0]) # keeps track of the sum of the coordinates\n for atom in structure.get_atoms(): # for every atom in the structure\n if heavy_atoms==True and atom.element=='H': continue # we don't care about the hydrogens, so skip them\n if weighted:\n atom_weight = atomic_weights[atom.element]\n else:\n atom_weight = 1.0 # they are all given the same weight\n #print atom_weight\n total_weight += atom_weight # increment the total_weight\n coord = numpy.array(atom.get_coords())\n weighted_coord = coord * atom_weight\n coord_sum += weighted_coord # add the weighted coordinate\n #print total_weight\n return coord_sum / total_weight # divide by the total weight\n\ndef moments_of_inertia(structure, weighted=True):\n '''finds the inertia tensor for all atoms in the structure. Returns as a 3x3 numpy array'''\n com=center_of_mass(structure,weighted)\n #translate to the origin\n structure.moveby(-com)\n # these are the elements of our tensor\n Ixx = 0.0; Ixy = 0.0; Ixz = 0.0; Iyy = 0.0; Iyz = 0.0; Izz = 0.0\n for atom in structure.get_atoms():\n if weighted:\n atom_weight = atomic_weights[atom.element]\n else:\n atom_weight = 1.0 # they are all given the same weight\n x = atom.coords[0]\n y = atom.coords[1]\n z = atom.coords[2]\n Ixx = Ixx + atom_weight*(y*y + z*z)\n Ixy = Ixy - atom_weight*(x*y)\n Ixz = Ixz - atom_weight*(x*z)\n Iyy = Iyy + atom_weight*(x*x + z*z)\n Iyz = Iyz - atom_weight*(y*z)\n Izz = Izz + atom_weight*(x*x + y*y)\n # once the loop is complete, assemble the tensor matrix\n I = numpy.array([[Ixx, Ixy, Ixz],\n [Ixy, Iyy, Iyz],\n [Ixz, Iyz, Izz]])\n structure.moveby(com)\n return(I)\n\ndef principal_axes(structure, weighted=True):\n '''finds the principal axes of a structure'''\n I = moments_of_inertia(structure,weighted)\n evals, evecs = numpy.linalg.eig(I) # calculated the Inertia tensor\n # sort the eigenvectors by their eigenvalues\n idx = evals.argsort()[::-1]\n evals = evals[idx]\n evecs = evecs[:,idx]\n crs = normalize_vector(numpy.cross(evecs[:,0], evecs[:,1]))\n if not numpy.allclose(crs, evecs[:,2]):\n evecs[:,2] = -evecs[:,2]\n evecs = evecs.T # better to get the transpose\n return evals, evecs\n\n\ndef molecular_radius(structure):\n '''finds the distance of the structure center of mass to the furthest atom'''\n com = center_of_mass(structure)\n largest_radius = 0.0\n for atom in structure.get_atoms(): # for every atom in the structure\n if atom.radius == '0.0':\n atom_radius = radii[atom.element]\n else:\n atom_radius = atom.radius\n radius = numpy.linalg.norm(numpy.array(atom.coords) - numpy.array(com)) + float(atom_radius)\n if radius > largest_radius: largest_radius = radius\n return largest_radius\n\ndef regurge(infile,outfile):\n '''reads a pdbfile and attempts to rewrite it back out exactly the same way'''\n structure=parser.get_structure('regurge', infile, preserve_index = False)\n structure.save(outfile, standard=False)\n\ndef load_pdb_traj(infilename):\n '''reads a pdb trajectory separated by ENDMDL lines and returns a list of structures'''\n # Warning: This function is currently SLOW! Try to use MDAnalysis\n traj =[]\n starttime = time.time()\n pdbtemp = StringIO() # a file-like object to hold the current pdb file to be opened later by the pdb parser\n infile = open(infilename,'r')\n counter = 0\n for line in infile: # read every line in the file\n pdbtemp.write(line)\n if line.startswith(\"ENDMDL\"): # then we have ended a frame of the pdb trajectory\n pdbtemp.seek(0) # reset to the beginning of the file\n curstruct = parser.get_structure('traj_frame%d' % counter, pdbtemp, preserve_index = False)\n traj.append(curstruct)\n pdbtemp = StringIO()\n counter += 1\n curstruct = parser.get_structure('traj_frame%d' % counter, pdbtemp, preserve_index = False)\n if curstruct.num_atoms > 0: # this is to help prevent a frame containing nothing from being written\n traj.append(curstruct)\n pdbtemp.close()\n print((\"time elapsed:\", time.time() - starttime))\n return traj\n\ndef pdb_from_MDAnalysis(MDatomselection):\n '''given an MD atomselection object, this function will return a pdb file object'''\n newstruct = Structure(\"From MD Analysis\") # create a new structure object\n for atom in MDatomselection.atoms: # for every atom in this selection\n newatom = Atom(record='ATOM', index=atom.number, name=atom.name, altloc='', resname=atom.resname, chain='X', resid=atom.resid, icode='', x=atom.pos[0], y=atom.pos[1], z=atom.pos[2], occupancy=0.0, beta=0.0, element=find_element(atom.name), charge=atom.charge)\n newstruct.atoms.append(atom)\n newstruct.num_atoms = len(MDatomselection.atoms)\n return newstruct\n\n'''def pdb_from_MDAnalysis_alt(MDatomselection):\n # TOO SLOW\n MDatomselection.write('/tmp/temp_md.pdb', format=\"PDB\")\n newstruct = parser.get_structure('From MD Analysis', '/tmp/temp_md.pdb')\n return newstruct'''\n\ndef test2methods(n,MDatomselection):\n starttime = time.time()\n for i in range(n):\n something = pdb_from_MDAnalysis(MDatomselection)\n endtime = time.time()\n print((\"method 1 time:\", endtime - starttime))\n\n starttime = time.time()\n for i in range(n):\n something = pdb_from_MDAnalysis_alt(MDatomselection)\n endtime = time.time()\n print((\"method 2 time:\", endtime - starttime))\n\ndef ligmerge(ligand, receptor, remove_water=True, hard_limit=3.0, verbose=True):\n '''given a ligand structure and a receptor structure, will merge the two to create a single structure.\n if remove_water is True, will remove all water molecules clashing with the ligand within the hard_limit'''\n removing_atoms = [\"HOH\", \"H2O\", \"WAT\", \"Cl-\", \"Na+\"]\n new_receptor = deepcopy(receptor)\n lig_com = center_of_mass(ligand)\n if verbose: print((\"lig_com:\", lig_com))\n lig_rad = molecular_radius(ligand)\n within_lig_proximity = [] # all receptor atoms close to the ligand\n for recatom in range(receptor.num_atoms): # loop thru the receptor atoms\n recatom_coords = numpy.array(receptor.atoms[recatom].coords)\n dist = numpy.linalg.norm(recatom_coords - lig_com)\n if dist < lig_rad + hard_limit: # if the rec atom is within the molecular radius of the ligand center\n within_lig_proximity.append(receptor.atoms[recatom]) # append the atom index\n # now we have all the indeces of any water molecule that could possibly be clashing, find the ones who are actually clashing\n clashing_waters = []\n resids_to_remove = []\n for prox_atom in within_lig_proximity:\n for lig_atom in ligand.get_atoms():\n lig_coords = numpy.array(lig_atom.coords)\n rec_coords = numpy.array(prox_atom.coords)\n #if prox_atom.resid == 5883 and verbose: print \"found resid 5883\"\n if numpy.linalg.norm(rec_coords - lig_coords) < hard_limit: # then its clashing\n #print \"clashing\", numpy.linalg.norm(rec_coords - lig_coords), rec_coords, lig_coords\n if prox_atom.resname in removing_atoms: # then mark the waters for removal\n if prox_atom.element == \"H\": # ...but only if its an oxygen\n pass # ignore clashing hydrogens\n else: # then its presumably a hydrogen\n resids_to_remove.append(prox_atom.resid) # NOTE: method does not exist yet\n else: # then something else is clashing\n if verbose: print((\"Alert: non-H2O atom clashing with ligand: id:%d resname:%s resid: %s\" % (prox_atom.index, prox_atom.resname, prox_atom.resid)))\n else:\n pass\n #print \"not clashing\", numpy.linalg.norm(rec_coords - lig_coords), rec_coords, lig_coords\n new_receptor.remove_res(resids_to_remove)\n # now create a new structure, appending non-waters first\n new_holo = Structure('holo')\n hit_water_1 = False\n hit_water_2 = False\n counter1 = 0\n counter2 = 0\n oldresid = ''\n # loop through until we hit water or ions\n if new_receptor.num_atoms == 0: return new_holo, 0\n while hit_water_1 == False:\n curresname = new_receptor.atoms[counter1].resname\n if curresname in removing_atoms:\n hit_water_1 = True\n break\n new_holo.atoms.append(deepcopy(new_receptor.atoms[counter1]))\n new_holo.num_atoms += 1\n if new_receptor.atoms[counter1].resid != oldresid:\n new_holo.num_resids += 1\n oldresid = new_receptor.atoms[counter1].resid\n counter1 += 1\n if counter1 >= new_receptor.num_atoms:\n break\n inserted_index = counter1 # the last index of the receptor where the ligand was added\n # we've added all protein atoms from the receptor, now add ligand\n oldresid = ''\n for counter2 in range(ligand.num_atoms):\n new_holo.atoms.append(deepcopy(ligand.atoms[counter2]))\n new_holo.num_resids += 1\n if ligand.atoms[counter2].resid != oldresid:\n new_holo.num_resids += 1\n oldresid = ligand.atoms[counter2].resid\n last_ligand_index = counter2\n # all ligand atoms are added, now add the rest of the water molecules from receptor\n oldresid = ''\n for counter1 in range(counter1, new_receptor.num_atoms):\n new_holo.atoms.append(deepcopy(new_receptor.atoms[counter1]))\n new_holo.num_atoms += 1\n if new_receptor.atoms[counter1].resid != oldresid:\n new_holo.num_resids += 1\n oldresid = new_receptor.atoms[counter1].resid\n\n # all atoms now added to new structure in proper order\n return new_holo, inserted_index, last_ligand_index\n\ndef normalize_vector(array, axis=-1):\n \"\"\"\n Normalize the vectors of A in the direction of axis. This means that each\n vector will have length 1. The default axis is the last.\n\n Arguments:\n\n - array (``numpy.ndarray``) A numpy array.\n - axis (``int``) The axis which will have vectors of lenght 1.\n \"\"\"\n shape = list(array.shape)\n shape[axis] = 1\n length = numpy.sqrt((array*array).sum(axis))\n out = array / length.reshape(shape)\n return out\n\nclass Test_pdb_functions(unittest.TestCase):\n # several test cases to ensure the functions in this module are working properly\n #def aaa(self): # this function runs first, it's like an initialization\n # print \"initializing...\"\n\n\n def test_main(self):\n #print \"WARNING: pdb2.py does not have comprehensive unittests\"\n return\n\n\n def test_Atom_class(self): # unit test for the Atom class\n # first test __init__()\n testatom = Atom(record=\"ATOM\", index=\"150\", name=\"CA\", altloc='', resname=\"ALA\", chain=\"X\", resid=\"15\", icode='', x='123.456', y='234.567', z='345.678', occupancy=\"1.00\", beta='0.50', element='C', charge='0.30')\n self.assertEqual(testatom.record, \"ATOM\"); self.assertEqual(testatom.index, 150); self.assertEqual(testatom.name, \"CA\"); self.assertEqual(testatom.resname, \"ALA\"); self.assertEqual(testatom.chain, \"X\")\n self.assertEqual(testatom.resid, \"15\"); self.assertEqual(testatom.coords, [123.456,234.567,345.678]); self.assertEqual(testatom.occupancy, 1.0); self.assertEqual(testatom.beta, 0.5)\n self.assertEqual(testatom.element, 'C'); self.assertEqual(testatom.charge, 0.3);\n # test get_coords()\n self.assertEqual(testatom.get_coords(), [123.456,234.567,345.678])\n # test to_dict()\n testdict = testatom.to_dict()\n atomdict = {'record':\"ATOM\", 'index':150, 'name':\"CA\", 'altloc':'', 'resname':\"ALA\", 'chain':\"X\", 'resid':\"15\", 'icode':'', 'x':123.456, 'y':234.567, 'z':345.678, 'occupancy':1.0, 'beta':0.5, 'element':'C', 'charge':0.30,'radius':0.0}\n self.assertEqual(testatom.to_dict(), atomdict)\n # test write_pdbline()\n # standard\n result = testatom.print_pdbline(standard=True, space_separated_values=False)\n expected_result = \"ATOM 150 CA ALA X 15 123.456 234.567 345.678 1.00 0.50 C \\n\"\n self.assertEqual(result, expected_result) # standard mode\n # space separated values\n result = testatom.print_pdbline(standard=False, space_separated_values=True)\n expected_result = \"ATOM 150 CA ALA X 15 123.456 234.567 345.678 1.00 0.50 C\"\n self.assertEqual(result, expected_result) # space separated mode\n # test set_coords()\n testatom.set_coords([987.654, 876.543, 765.432])\n self.assertEqual(testatom.coords, [987.654, 876.543, 765.432])\n\n def test_Structure_class(self): # unit test for the Structure class\n # first test __init__()\n empty = Structure('unit test empty')\n\n\n def test_loadpdb(self): # this simultaneously tests many other functions in the parser\n self.assertEqual(test_struct1_pdb.num_atoms, len(test_struct1_pdb.atoms))\n self.assertEqual(test_struct1_pdb.num_resids, 3)\n self.assertEqual(test_struct1_pdb.num_atoms, 22)\n self.assertEqual(test_struct2_pdb.num_atoms, len(test_struct2_pdb.atoms))\n self.assertEqual(test_struct2_pdb.num_resids, 8519)\n self.assertEqual(test_struct2_pdb.num_atoms, 26565)\n self.assertEqual(test_struct3_pdb.num_atoms, len(test_struct3_pdb.atoms))\n self.assertEqual(test_struct3_pdb.num_resids, 1544)\n self.assertEqual(test_struct3_pdb.num_atoms, 23004)\n\n def test_pdb_parse(self):\n test_pdb_line = \"ATOM 1 N MET X 0 -304.463-303.390 111.670 0.00 0.00 \"\n test1_list = [['ATOM', '1', 'N', '', 'MET', 'X', '0', '', '-304.463','-303.390','111.670', '0.00', '0.00', '', '']]\n test_pqr_line = \"ATOM 1 N MET 0 -304.463-303.390 111.670 -0.3000 1.8000\"\n test2_list = [['ATOM', '1', 'N', 'MET', '0', '-304.463','-303.390','111.670', '-0.3000', '1.8000']]\n problem_line = \"ATOM 1 N VAL 1 -1.552 16.438 -23.987 0.0577 1.8240\"\n test3_list = [['ATOM', '1', 'N', 'VAL', '1', '-1.552','16.438','-23.987', '0.0577', '1.8240']]\n pdb_line = parser.conventional_parse(test_pdb_line,pqr=False)\n self.assertEqual(pdb_line, test1_list)\n pqr_line = parser.conventional_parse(test_pqr_line,pqr=True)\n self.assertEqual(pqr_line, test2_list)\n pqr_line = parser.conventional_parse(problem_line,pqr=True)\n self.assertEqual(pqr_line, test3_list)\n\n def test_pdbparse_pqr(self):\n problem_line = \"ATOM 1 N VAL 1 -1.552 16.438 -23.987 0.0577 1.8240\"\n test_list = [['ATOM', '1', 'N', 'VAL', '1', '-1.552','16.438','-23.987', '0.0577', '1.8240']]\n pqr_line = parser.conventional_parse(problem_line,pqr=True)\n self.assertEqual(pqr_line, test_list)\n\n\n def test_make_pqr_line(self):\n test_pqr_line = \"ATOM 1 N MET 0 -304.463-303.390 111.670 -0.3000 1.800\\n\"\n test_line_dict1 = {'index':1, 'name':'N', 'resname':'MET', 'resid':'0', 'x':-304.463, 'y':-303.390, 'z':111.670, 'radius':1.8000, 'charge':-0.3000}\n test_line_dict2 = {'index':'1', 'name':'N', 'resname':'MET', 'resid':'0', 'x':'-304.463', 'y':'-303.390', 'z':'111.670', 'radius':'1.800', 'charge':'-0.3000'}\n result = make_pqr_line(test_line_dict1, chain=False)\n self.assertEqual(test_pqr_line, result)\n result = make_pqr_line(test_line_dict2, chain=False)\n self.assertEqual(test_pqr_line, result)\n\n def test_make_pqrxml_line(self):\n test_pqr_line = formatline = ''' <atom>\n <atom_name>N</atom_name>\n <atom_number>1</atom_number>\n <x>-304.463000</x>\n <y>-303.390000</y>\n <z>111.670000</z>\n <charge>-0.300000</charge>\n <radius>1.800000</radius>\n </atom>\\n'''\n test_line_dict1 = {'index':1, 'name':'N', 'resname':'MET', 'resid':'0', 'x':-304.463, 'y':-303.390, 'z':111.670, 'radius':1.8000, 'charge':-0.3000}\n test_line_dict2 = {'index':'1', 'name':'N', 'resname':'MET', 'resid':'0', 'x':'-304.463000', 'y':'-303.390000', 'z':'111.670000', 'radius':'1.800000', 'charge':'-0.300000'}\n result = make_pqrxml_line(test_line_dict1)\n self.assertEqual(test_pqr_line, result)\n result = make_pqrxml_line(test_line_dict2)\n self.assertEqual(test_pqr_line, result)\n\n def test_find_element(self): # just a small test to make sure that the function calls without an error\n self.assertEqual(find_element('CA'), 'C') # alpha carbon\n\n def test_avg(self): # just a small test to make sure that the function calls without an error\n test_list = [20,60,70]\n test_avg = 50\n self.assertEqual(avg(test_list), test_avg)\n\n def test_get_coords(self): # just a small test to make sure that the function calls without an error\n getcoords(test_struct1_pdb)\n\n def test_minmax_width(self):\n minmax_list = minmax_width(test_struct1_pdb, water_only=False)\n expected = (5.2510000467300415,7.6479997634887695,3.021000027656555)\n for i in range(len(expected)):\n self.assertAlmostEqual(minmax_list[i], expected[i], places=3)\n\n def test_center(self):\n center_list = center(test_struct1_pdb)\n expected = (4.1114999651908875, 4.823999881744385, -0.6205000281333923)\n for i in range(len(expected)):\n self.assertAlmostEqual(center_list[i], expected[i], places=3)\n\n def test_center_of_mass(self):\n center_list = center_of_mass(test_struct1_pdb, weighted = False)\n expected = (4.4929094314575195, 4.853545665740967, -0.22363637387752533) # center of mass\n for i in range(len(expected)):\n self.assertAlmostEqual(center_list[i], expected[i], places=3)\n\n def test_moments_of_inertia(self): # just a small test to make sure that the function calls without an error\n moments_of_inertia(test_struct1_pdb)\n\n def test_principal_axes(self): # just a small test to make sure that the function calls without an error\n principal_axes(test_struct1_pdb)\n\n def test_molecular_radius(self): # just a small test to make sure that the function calls without an error\n molecular_radius(test_struct1_pdb)\n\n def test_regurge(self): # just a small test to make sure that the function calls without an error\n regurge(test_struct1_pdb_filename,'/tmp/test.pdb')\n\n def test_load_pdb_traj(self):\n pass # not testing this function\n\n def test_pdb_from_MDAnalysis(self):\n pass # not testing this function\n\n def test_ligmerge(self):\n pass # not testing this function\n\n def test_normalize_vector(self):\n vec = numpy.array([1.0, 2.0, 3.0])\n normed = numpy.array([0.2672612419124244, 0.5345224838248488, 0.8017837257372732])\n self.assertEqual(normalize_vector(vec).all(), normed.all())\n\n\nif __name__==\"__main__\":\n print(\"Now running unit tests for pdb2.py\")\n test_struct1_pdb_filename = '../test/adi_dry.pdb' # an alanine dipeptide\n test_struct2_pdb_filename = '../test/tropc_wet.pdb' # troponin C solvated in H2O\n test_struct3_pdb_filename = '../test/cal-2hty-tetra.pdb'\n test_struct1_pqr_filename = '../test/tami.pqr'\n test_struct2_pqr_filename = '../test/tropc_dry.pqr'\n test_struct3_pqr_filename = '../test/cal-2hty-tetra.pqr'\n test_struct1_pqrxml_filename = '../test/tami.pqrxml'\n test_struct1_pdb = loadpdb(test_struct1_pdb_filename)\n test_struct2_pdb = loadpdb(test_struct2_pdb_filename)\n test_struct3_pdb = loadpdb(test_struct3_pdb_filename)\n test_struct1_pqr = loadpqr(test_struct1_pqr_filename)\n test_struct2_pqr = loadpqr(test_struct2_pqr_filename)\n test_struct3_pqr = loadpqr(test_struct3_pqr_filename)\n unittest.main() # then run unit tests\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.matrix", "numpy.allclose", "numpy.linalg.eig", "numpy.cross" ] ]
JanKorinek/machine-learning-engineering-for-production-public
[ "e2fc12e4a32ba005e3d36f0e566d987956d241e0" ]
[ "course4/week3-ungraded-labs/C4_W3_Lab_4_Github_Actions/app/main.py" ]
[ "import pickle\nimport numpy as np\nfrom typing import List\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel, conlist\n\n\n\napp = FastAPI(title=\"Predicting Wine Class with batching\")\n\n# Open classifier in global scope\nwith open(\"models/wine-95-fixed.pkl\", \"rb\") as file:\n clf = pickle.load(file)\n\n\nclass Wine(BaseModel):\n batches: List[conlist(item_type=float, min_items=13, max_items=13)]\n\n\n@app.post(\"/predict\")\ndef predict(wine: Wine):\n batches = wine.batches\n np_batches = np.array(batches)\n pred = clf.predict(np_batches).tolist()\n return {\"Prediction\": pred}\n\n# Test comment\n" ]
[ [ "numpy.array" ] ]
g-votte/pfrl
[ "4c30c1d73f0941a2b649b62937eec346bb55a95e" ]
[ "tests/experiments_tests/test_hooks.py" ]
[ "import unittest\n\nimport numpy as np\n\nimport pfrl\n\n\nclass TestLinearInterpolationHook(unittest.TestCase):\n def test_call(self):\n\n buf = []\n\n def setter(env, agent, value):\n buf.append(value)\n\n hook = pfrl.experiments.LinearInterpolationHook(\n total_steps=10, start_value=0.1, stop_value=1.0, setter=setter\n )\n\n for step in range(1, 10 + 1):\n hook(env=None, agent=None, step=step)\n\n np.testing.assert_allclose(buf, np.arange(1, 10 + 1, dtype=np.float32) / 10)\n" ]
[ [ "numpy.arange" ] ]
prefrontalcortex/ai2thor
[ "f28ebbf2a6eb468a17996320eec485b3c7e3444d" ]
[ "ai2thor/util/trials.py" ]
[ "import numpy as np\nimport math\n\n\nclass TrialMetric(object):\n def init_trials(self, num_trials, metadata):\n ...\n\n def update_with_trial(self, trial_index, metadata):\n ...\n\n\nclass ObjectPositionVarianceAverage(TrialMetric):\n \"\"\"\n Metric that computes the average of the variance of all objects in a scene across multiple runs.\n \"\"\"\n def __init__(self):\n self.trials = []\n self.object_ids = []\n\n def init_trials(self, num_trials, metadata):\n objects = metadata[\"objects\"]\n self.object_ids = sorted([o['objectId'] for o in objects])\n num_objects = len(self.object_ids)\n self.trials = np.empty([num_trials, num_objects, 3])\n\n def update_with_trial(self, trial_index, metadata):\n objects = metadata[\"objects\"]\n object_pos_map = {o['objectId']: vec_to_np_array(o['position']) for o in objects}\n for object_index in range(len(self.object_ids)):\n object_id = self.object_ids[object_index]\n self.trials[trial_index][object_index] = object_pos_map[object_id]\n\n def compute(self, n=None):\n return np.mean(np.var(self.trials[:n], axis=0))\n\n\ndef vec_to_np_array(vec):\n return np.array([vec['x'], vec['y'], vec['z']])\n\n\ndef trial_runner(controller, number, metric, compute_running_metric=False):\n \"\"\"\n Generator that wraps metric capture from controller metadata for a number of trials\n :param controller: ai2thor controller\n :param number: int number of trials to collect metrics from\n :param metric: TrialMetric the metric to use\n :param compute_running_metric: bool whether or not to compute the metric after every trial\n :return: tuple(controller, float) with the controller and the metric after every trial\n \"\"\"\n\n metric.init_trials(number, controller.last_event.metadata)\n\n for trial_index in range(number):\n try:\n yield controller, metric.compute(n=trial_index) if compute_running_metric else math.nan\n metric.update_with_trial(trial_index, controller.last_event.metadata)\n controller.reset()\n except RuntimeError as e:\n print(\n e,\n \"Last action status: {}\".format(controller.last_event.meatadata['actionSuccess']),\n controller.last_event.meatadata['errorMessage']\n )\n yield controller, metric.compute()\n" ]
[ [ "numpy.array", "numpy.empty", "numpy.var" ] ]
kappakkala/mypyscripts
[ "4d83e35009687e11755b3d7edac4822b5bea9de7" ]
[ "bday_bot.py" ]
[ "import pandas as pd\nfrom datetime import datetime\nfrom telegram_api import TelegramApi\n\ndf_cal = pd.read_excel(r'C:/Users/kappakkala/Documents/Calendar.xlsx')\ndf_cal = df_cal.fillna(value=\"nothing\")\nevent = df_cal.at[int(datetime.today().strftime('%d'))-1, datetime.today().strftime('%B')]\n\nAPI_KEY = '' # Provide the bot api key \nCHAT_ID = '' # Provide the chat id of the bot/channel\ntelegram = TelegramApi(API_KEY, CHAT_ID)\nbody = \"Notification: \"\nmsg = body + event\nif event != 'nothing':\n telegram.send_message(msg)\n" ]
[ [ "pandas.read_excel" ] ]
SlimFun/FedML
[ "47a14c84b9e07dd0f73c64647be024809ce2a490" ]
[ "fedml_experiments/distributed/contrastive_fed/CovaMNet.py" ]
[ "'''\nResNet for CIFAR-10/100 Dataset.\n\nReference:\n1. https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n2. https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua\n3. Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\nDeep Residual Learning for Image Recognition. https://arxiv.org/abs/1512.03385\n\n'''\nimport logging\n\nimport torch\nimport torch.nn as nn\n\n__all__ = ['ResNet', 'resnet110']\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=dilation, groups=groups, bias=False, dilation=dilation)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(BasicBlock, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n# self.relu = nn.ReLU(inplace=True)\n self.relu = nn.LeakyReLU(0.2, True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None):\n super(Bottleneck, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n# self.relu = nn.ReLU(inplace=True)\n self.relu = nn.LeakyReLU(0.2, True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=10, zero_init_residual=False, groups=1,\n width_per_group=64, replace_stride_with_dilation=None, norm_layer=None, KD=False):\n super(ResNet, self).__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 16\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1,\n bias=False)\n self.bn1 = nn.BatchNorm2d(self.inplanes)\n# self.relu = nn.ReLU(inplace=True)\n self.relu = nn.LeakyReLU(0.2, True)\n # self.maxpool = nn.MaxPool2d()\n self.layer1 = self._make_layer(block, 16, layers[0])\n self.layer2 = self._make_layer(block, 32, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 64, layers[2], stride=2)\n \n \n# self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n# self.fc = nn.Linear(64 * block.expansion, num_classes)\n# self.KD = KD\n# for m in self.modules():\n# if isinstance(m, nn.Conv2d):\n# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n# elif isinstance(m, nn.BatchNorm2d):\n# nn.init.constant_(m.weight, 1)\n# nn.init.constant_(m.bias, 0)\n# # Zero-initialize the last BN in each residual branch,\n# # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n# # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n# if zero_init_residual:\n# for m in self.modules():\n# if isinstance(m, Bottleneck):\n# nn.init.constant_(m.bn3.weight, 0)\n# elif isinstance(m, BasicBlock):\n# nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n input = x\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x) # B x 16 x 32 x 32\n lx1 = self.layer1(x) # B x 16 x 32 x 32\n lx2 = self.layer2(lx1) # B x 32 x 16 x 16\n lx3 = self.layer3(lx2) # B x 64 x 8 x 8\n# if torch.any(torch.isnan(lx3)):\n# print(f'input: {torch.any(torch.isnan(input))}')\n# print(f'x: {torch.any(torch.isnan(x))}')\n# print(f'lx1: {torch.any(torch.isnan(lx1))}')\n# print(f'lx2: {torch.any(torch.isnan(lx2))}')\n# print(f'lx3: {torch.any(torch.isnan(lx3))}')\n \n return lx3\n\n# x = self.avgpool(x) # B x 64 x 1 x 1\n# x_f = x.view(x.size(0), -1) # B x 64\n# x = self.fc(x_f) # B x num_classes\n# if self.KD == True:\n# return x_f, x\n# else:\n# return x\n \n \nclass CovaMResnet56(nn.Module):\n in_planes = 256\n \n def __init__(self, class_num, neck, pretrained=False, num_features=0, neck_feat='after', with_cova=True, path=None, **kwargs):\n super(CovaMResnet56, self).__init__()\n self.base = ResNet(Bottleneck, [6, 6, 6], class_num, **kwargs)\n \n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.num_features = num_features\n self.neck = neck\n self.neck_feat = neck_feat\n self.num_classes = class_num\n self.with_cova = with_cova\n\n if self.neck == 'no':\n self.ce_classifier = nn.Linear(self.in_planes, self.num_classes)\n elif self.neck == 'bnneck':\n self.bottleneck = nn.BatchNorm1d(self.in_planes)\n self.bottleneck.bias.requires_grad_(False) # no shift\n self.ce_classifier = nn.utils.weight_norm(nn.Linear(self.in_planes, self.num_classes, bias=False))\n\n self.bottleneck.apply(weights_init_kaiming)\n self.ce_classifier.apply(weights_init_classifier)\n \n\n# if not with_cova:\n# return\n self.covariance = CovaBlock()\n \n self.cova_classifier = nn.Sequential(\n nn.LeakyReLU(0.2, True),\n nn.Dropout(),\n nn.Conv1d(1, 1, kernel_size=441, stride=441, bias=True),\n )\n self.cova_classifier.apply(weights_init_kaiming)\n \n \n # calculate the covariance\n def cal_covariance(self, input):\n \n support_set_sam = input\n B, C = support_set_sam.size()\n # B, C, h, w = support_set_sam.size()\n\n support_set_sam = support_set_sam.permute(1, 0)\n support_set_sam = support_set_sam.contiguous().view(C, -1)\n # mean_support = torch.mean(support_set_sam, 0, True)\n# mean_support = torch.mean(support_set_sam, 1, True)\n# support_set_sam = support_set_sam-mean_support\n\n covariance_matrix = support_set_sam@torch.transpose(support_set_sam, 0, 1)\n # covariance_matrix = support_set_sam@support_set_sam.t()\n # covariance_matrix = torch.div(covariance_matrix, h*w*B-1)\n covariance_matrix = torch.div(covariance_matrix, B-1)\n # CovaMatrix_list.append(covariance_matrix)\n return covariance_matrix\n \n# return covariance_matrix\n\n# def re_init_classifier(self, m, b=None):\n# self.ce_classifier()\n \n # return: cls_score, cova_score, cova\n def forward(self, x, support_covas=None, ml=None):\n output_feat = self.base(x)\n global_feat = self.avgpool(output_feat) # (b, 256, 1, 1)\n global_feat = global_feat.view(global_feat.shape[0], -1) #(b, 256)\n \n if self.neck == 'no':\n feat = global_feat\n elif self.neck == 'bnneck':\n feat = self.bottleneck(global_feat) # normalize for angular softmax\n \n cls_score = self.ce_classifier(feat)\n if (not self.with_cova) or support_covas == None:\n return cls_score, None, output_feat\n \n \n# covariance_matrix = self.cal_covariance(global_feat)\n# cova_score = self.cova_classifier(self.covariance(output_feat, support_covas))\n cova_score = self.covariance(output_feat, support_covas, ml)\n cova_score = cova_score.squeeze(1)\n# print(cova_score)\n# if not self.training:\n# return cls_score, -cova_score, output_feat\n return cls_score, cova_score, output_feat\n\n\nclass CovaBlock(nn.Module):\n def __init__(self):\n super(CovaBlock, self).__init__()\n# self.ranking_loss = nn.MarginRankingLoss(margin=margin)\n\n # calculate the similarity \n def cal_similarity(self, input, CovaMatrix_list, ml):\n\n# input = input\n# B, C = input.size()\n# Cova_Sim = []\n# for i in range(B):\n# query_sam = input[i]\n# query_sam = query_sam.view(C, -1)\n# query_sam_norm = torch.norm(query_sam, 2, 1, True)\n# query_sam = query_sam/query_sam_norm\n \n# if torch.cuda.is_available():\n# mea_sim = torch.zeros(1, len(CovaMatrix_list)).cuda()\n \n# for j in range(len(CovaMatrix_list)):\n# temp_dis = torch.transpose(query_sam, 0, 1)@CovaMatrix_list[j]@query_sam\n# mea_sim[0, j*h*w:(j+1)*h*w] = temp_dis.diag()\n \n# Cova_Sim.append(mea_sim.unsqueeze(0))\n \n# Cova_Sim = torch.cat(Cova_Sim, 0) # get Batch*1*(h*w*num_classes)\n# return Cova_Sim\n \n B, C, h, w = input.size()\n Cova_Sim = []\n\n for i in range(B):\n \n query_sam = input[i]\n# print(query_sam)\n \n query_sam = query_sam.view(C, -1)\n mean_query = torch.mean(query_sam, 1, True)\n \n# if query_sam_norm.min() == 0:\n# print('query_sam_norm.min() == 0')\n \n \n# query_sam = query_sam - mean_query\n \n query_sam_norm = torch.norm(query_sam, 2, 1, True) \n# query_sam_norm = torch.clamp(query_sam_norm, min=1e-6)\n query_sam = query_sam/query_sam_norm\n \n# query_sam_norm = torch.norm(query_sam, 2, 1, True) \n# if query_sam_norm.min() == 0:\n# print('query_sam_norm.min() == 0')\n\n# query_sam = query_sam - ml[i]\n# print(query_sam)\n# t = query_sam.sum() / (256*256)\n# if abs(float(t.data))<1e-4 and self.training:\n# print(t)\n# print(float(t.data))\n# print(int(t.data)<1e-4)\n# print(int(t.data)-1e-4)\n \n \n# query_sam_norm = torch.clamp(query_sam_norm, min=1e-6)\n# query_sam = query_sam/query_sam_norm\n \n\n# query_sam\n# if torch.any(torch.isnan(query_sam)):\n# print(f'input[i]: {torch.any(torch.isnan(input[i]))}')\n# print(f'query_sam_norm: {query_sam_norm}')\n# print(f'query_sam: {torch.any(torch.isnan(query_sam))}')\n\n if torch.cuda.is_available():\n# mea_sim = torch.zeros(1, len(CovaMatrix_list)*h*w).cuda()\n mea_sim = torch.zeros(1, len(CovaMatrix_list)).to('cuda:3')\n\n for j in range(len(CovaMatrix_list)):\n# print(CovaMatrix_list[j])\n# query_sam = query_sam - ml[j]\n# temp_dis = torch.transpose(query_sam, 0, 1)@query_sam\n# t = query_sam.sum() / (256*64)\n# if abs(float(t.data))<1e-4 and self.training:\n# print(query_sam)\n# print(temp_dis)\n temp_dis = torch.transpose(query_sam, 0, 1)@CovaMatrix_list[j]@query_sam\n# mea_sim[0, j*h*w:(j+1)*h*w] = temp_dis.diag()\n# print(temp_dis.shape)\n diag = temp_dis.diag()\n \n# print(len(diag))\n# d = diag.sum() / (len(diag)*256)\n d = diag.sum() / len(diag)\n# if query_sam_norm.min() == 0:\n# print('query_sam_norm.min() == 0')\n# print(torch.count_nonzero(query_sam))\n# print(d)\n# if d < 1e-6:\n# print(d)\n tau = 1e-6\n# print(d)\n# if d > 100:\n# print('d>100')\n# print(d)\n# mea_sim[0, j] = torch.clamp(d, min=0.001, max=10)\n# mea_sim[0, j] = d if d>tau else tau\n# query_sam = torch.cat(query_sam, 0)\n# l = torch.cat(ml[i], 0)\n# print(query_sam.shape)\n# print(ml[i].shape)\n# d = torch.cosine_similarity(query_sam, ml[i], dim=0)\n \n mea_sim[0, j] = d\n# print(d)\n if torch.isnan(mea_sim[0, j]):\n# print(d)\n print('********is nan*************')\n print(CovaMatrix_list[j].sum())\n print(query_sam.max())\n# print(torch.isnan(query_sam))\n print(torch.transpose(query_sam, 0, 1)@CovaMatrix_list[j])\n \n# Cova_Sim = torch.cat([CovaMatrix_list[j], query_sam, temp_dis], 0)\n# return Cova_Sim\n# print(diag.sum())\n# print(mea_sim[0, j])\n\n Cova_Sim.append(mea_sim.unsqueeze(0))\n# if torch.gt(mea_sim.max(), 1):\n# print(f'mea_sim.max() > 1; {mea_sim}')\n\n Cova_Sim = torch.cat(Cova_Sim, 0) # get Batch*1*(h*w*num_classes)\n# print(Cova_Sim.shape)\n# print(Cova_Sim)\n return Cova_Sim \n\n def forward(self, input, support_covas, ml):\n return self.cal_similarity(input, support_covas, ml)\n \n \ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('Conv') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('BatchNorm') != -1:\n if m.affine:\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)\n\n\ndef weights_init_classifier(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.normal_(m.weight, std=0.001)\n if m.bias:\n nn.init.constant_(m.bias, 0.0)\n\n \n\n\ndef resnet110(class_num, pretrained=False, path=None, **kwargs):\n \"\"\"\n Constructs a ResNet-110 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained.\n \"\"\"\n logging.info(\"path = \" + str(path))\n model = ResNet(Bottleneck, [12, 12, 12], class_num, **kwargs)\n if pretrained:\n checkpoint = torch.load(path)\n state_dict = checkpoint['state_dict']\n\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n # name = k[7:] # remove 'module.' of dataparallel\n name = k.replace(\"module.\", \"\")\n new_state_dict[name] = v\n\n model.load_state_dict(new_state_dict)\n return model\n\n\n" ]
[ [ "torch.nn.Linear", "torch.cat", "torch.isnan", "torch.nn.LeakyReLU", "torch.nn.BatchNorm2d", "torch.nn.init.kaiming_normal_", "torch.cuda.is_available", "torch.load", "torch.transpose", "torch.nn.init.constant_", "torch.nn.Conv1d", "torch.norm", "torch.nn.init.normal_", "torch.div", "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.Dropout", "torch.nn.BatchNorm1d", "torch.nn.AdaptiveAvgPool2d", "torch.mean" ] ]
aabobakr/adversarial-robustness-toolbox
[ "d62b2606132d6e6fd5946d6bdc8f1da940eb3282", "d62b2606132d6e6fd5946d6bdc8f1da940eb3282" ]
[ "tests/attacks/test_virtual_adversarial.py", "tests/attacks/test_zoo.py" ]
[ "# MIT License\n#\n# Copyright (C) IBM Corporation 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport unittest\n\nimport keras.backend as k\nimport numpy as np\nimport tensorflow as tf\n\nfrom art.attacks.virtual_adversarial import VirtualAdversarialMethod\nfrom art.classifiers import KerasClassifier\nfrom art.utils import load_dataset, get_labels_np_array, master_seed, random_targets\nfrom art.utils import get_classifier_tf, get_classifier_kr, get_classifier_pt\nfrom art.utils import get_iris_classifier_tf, get_iris_classifier_kr, get_iris_classifier_pt\n\nlogger = logging.getLogger('testLogger')\n\nBATCH_SIZE = 10\nNB_TRAIN = 100\nNB_TEST = 10\n\n\nclass TestVirtualAdversarial(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n k.set_learning_phase(1)\n\n # Get MNIST\n (x_train, y_train), (x_test, y_test), _, _ = load_dataset('mnist')\n x_train, y_train, x_test, y_test = x_train[:NB_TRAIN], y_train[:NB_TRAIN], x_test[:NB_TEST], y_test[:NB_TEST]\n cls.mnist = (x_train, y_train), (x_test, y_test)\n\n # Keras classifier\n cls.classifier_k, sess = get_classifier_kr()\n\n scores = cls.classifier_k._model.evaluate(x_train, y_train)\n logging.info('[Keras, MNIST] Accuracy on training set: %.2f%%', (scores[1] * 100))\n scores = cls.classifier_k._model.evaluate(x_test, y_test)\n logging.info('[Keras, MNIST] Accuracy on test set: %.2f%%', (scores[1] * 100))\n\n # Create basic CNN on MNIST using TensorFlow\n cls.classifier_tf, sess = get_classifier_tf()\n\n scores = get_labels_np_array(cls.classifier_tf.predict(x_train))\n acc = np.sum(np.argmax(scores, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0]\n logging.info('[TF, MNIST] Accuracy on training set: %.2f%%', (acc * 100))\n\n scores = get_labels_np_array(cls.classifier_tf.predict(x_test))\n acc = np.sum(np.argmax(scores, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]\n logging.info('[TF, MNIST] Accuracy on test set: %.2f%%', (acc * 100))\n\n # Create basic PyTorch model\n cls.classifier_py = get_classifier_pt()\n x_train, x_test = np.swapaxes(x_train, 1, 3), np.swapaxes(x_test, 1, 3)\n\n scores = get_labels_np_array(cls.classifier_py.predict(x_train))\n acc = np.sum(np.argmax(scores, axis=1) == np.argmax(y_train, axis=1)) / y_train.shape[0]\n logging.info('[PyTorch, MNIST] Accuracy on training set: %.2f%%', (acc * 100))\n\n scores = get_labels_np_array(cls.classifier_py.predict(x_test))\n acc = np.sum(np.argmax(scores, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]\n logging.info('[PyTorch, MNIST] Accuracy on test set: %.2f%%', (acc * 100))\n\n def setUp(self):\n # Set master seed\n master_seed(1234)\n\n def test_mnist(self):\n # Define all backends to test\n backends = {'keras': self.classifier_k,\n 'tf': self.classifier_tf,\n 'pytorch': self.classifier_py}\n\n for _, classifier in backends.items():\n if _ == 'pytorch':\n self._swap_axes()\n self._test_backend_mnist(classifier)\n if _ == 'pytorch':\n self._swap_axes()\n\n self.classifier_tf._sess.close()\n tf.reset_default_graph()\n k.clear_session()\n\n def _swap_axes(self):\n (x_train, y_train), (x_test, y_test) = self.mnist\n x_train = np.swapaxes(x_train, 1, 3)\n x_test = np.swapaxes(x_test, 1, 3)\n self.mnist = (x_train, y_train), (x_test, y_test)\n\n def _test_backend_mnist(self, classifier):\n # Get MNIST\n (_, _), (x_test, y_test) = self.mnist\n x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST]\n\n df = VirtualAdversarialMethod(classifier, batch_size=100)\n x_test_adv = df.generate(x_test)\n\n self.assertFalse((x_test == x_test_adv).all())\n\n y_pred = get_labels_np_array(classifier.predict(x_test_adv))\n self.assertFalse((y_test == y_pred).all())\n\n acc = np.sum(np.argmax(y_pred, axis=1) == np.argmax(y_test, axis=1)) / y_test.shape[0]\n logging.info('Accuracy on adversarial examples: %.2f%%', (acc * 100))\n\n\nclass TestVirtualAdversarialVectors(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n # Get Iris\n (x_train, y_train), (x_test, y_test), _, _ = load_dataset('iris')\n cls.iris = (x_train, y_train), (x_test, y_test)\n\n def setUp(self):\n master_seed(1234)\n\n def test_iris_k_clipped(self):\n (_, _), (x_test, y_test) = self.iris\n classifier, _ = get_iris_classifier_kr()\n\n # Test untargeted attack\n attack = VirtualAdversarialMethod(classifier, eps=.1)\n x_test_adv = attack.generate(x_test)\n self.assertFalse((x_test == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1).all())\n self.assertTrue((x_test_adv >= 0).all())\n\n preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())\n acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]\n logger.info('Accuracy on Iris with VAT adversarial examples: %.2f%%', (acc * 100))\n\n def test_iris_k_unbounded(self):\n (_, _), (x_test, y_test) = self.iris\n classifier, _ = get_iris_classifier_kr()\n\n # Recreate a classifier without clip values\n classifier = KerasClassifier(model=classifier._model, use_logits=False, channel_index=1)\n attack = VirtualAdversarialMethod(classifier, eps=1)\n x_test_adv = attack.generate(x_test)\n self.assertFalse((x_test == x_test_adv).all())\n self.assertTrue((x_test_adv > 1).any())\n self.assertTrue((x_test_adv < 0).any())\n\n preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())\n acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]\n logger.info('Accuracy on Iris with VAT adversarial examples: %.2f%%', (acc * 100))\n\n def test_iris_tf(self):\n (_, _), (x_test, y_test) = self.iris\n classifier, _ = get_iris_classifier_tf()\n\n attack = VirtualAdversarialMethod(classifier, eps=.1)\n x_test_adv = attack.generate(x_test)\n self.assertFalse((x_test == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1).all())\n self.assertTrue((x_test_adv >= 0).all())\n\n preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())\n acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]\n logger.info('Accuracy on Iris with VAT adversarial examples: %.2f%%', (acc * 100))\n\n def test_iris_pt(self):\n (_, _), (x_test, y_test) = self.iris\n classifier = get_iris_classifier_pt()\n\n attack = VirtualAdversarialMethod(classifier, eps=.1)\n x_test_adv = attack.generate(x_test)\n self.assertFalse((x_test == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1).all())\n self.assertTrue((x_test_adv >= 0).all())\n\n preds_adv = np.argmax(classifier.predict(x_test_adv), axis=1)\n self.assertFalse((np.argmax(y_test, axis=1) == preds_adv).all())\n acc = np.sum(preds_adv == np.argmax(y_test, axis=1)) / y_test.shape[0]\n logger.info('Accuracy on Iris with VAT adversarial examples: %.2f%%', (acc * 100))\n\n\nif __name__ == '__main__':\n unittest.main()\n", "# MIT License\n#\n# Copyright (C) IBM Corporation 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport unittest\n\nimport keras.backend as k\nimport numpy as np\nimport tensorflow as tf\n\nfrom art.attacks.zoo import ZooAttack\nfrom art.utils import get_classifier_kr, get_classifier_pt, get_classifier_tf\nfrom art.utils import load_dataset, random_targets, master_seed, get_iris_classifier_pt\n\nlogger = logging.getLogger('testLogger')\n\nNB_TEST = 6\n\n\nclass TestZooAttack(unittest.TestCase):\n \"\"\"\n A unittest class for testing the ZOO attack.\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n # Get MNIST\n (_, _), (x_test, y_test), _, _ = load_dataset('mnist')\n x_test, y_test = x_test[:NB_TEST], y_test[:NB_TEST]\n cls.mnist = x_test, y_test\n\n def setUp(self):\n # Set master seed\n master_seed(1234)\n\n def test_failure_attack(self):\n \"\"\"\n Test the corner case when attack fails.\n :return:\n \"\"\"\n # Build TFClassifier\n tfc, sess = get_classifier_tf()\n\n # Get MNIST\n x_test, _ = self.mnist\n\n # Failure attack\n zoo = ZooAttack(classifier=tfc, max_iter=0, binary_search_steps=0, learning_rate=0)\n x_test_adv = zoo.generate(x_test)\n self.assertTrue((x_test_adv <= 1.0001).all())\n self.assertTrue((x_test_adv >= -0.0001).all())\n np.testing.assert_almost_equal(x_test, x_test_adv, 3)\n\n # Clean-up session\n sess.close()\n tf.reset_default_graph()\n\n def test_tfclassifier(self):\n \"\"\"\n First test with the TFClassifier.\n :return:\n \"\"\"\n # Build TFClassifier\n tfc, sess = get_classifier_tf()\n\n # Get MNIST\n x_test, y_test = self.mnist\n\n # Targeted attack\n zoo = ZooAttack(classifier=tfc, targeted=True)\n params = {'y': random_targets(y_test, tfc.nb_classes)}\n x_test_adv = zoo.generate(x_test, **params)\n self.assertFalse((x_test == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1.0001).all())\n self.assertTrue((x_test_adv >= -0.0001).all())\n target = np.argmax(params['y'], axis=1)\n y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)\n logger.debug('ZOO target: %s', target)\n logger.debug('ZOO actual: %s', y_pred_adv)\n logger.info('ZOO success rate on MNIST: %.2f', (sum(target == y_pred_adv) / float(len(target))))\n\n # Untargeted attack\n zoo = ZooAttack(classifier=tfc, targeted=False)\n x_test_adv = zoo.generate(x_test)\n # self.assertFalse((x_test == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1.0001).all())\n self.assertTrue((x_test_adv >= -0.0001).all())\n y_pred = np.argmax(tfc.predict(x_test), axis=1)\n y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)\n logger.debug('ZOO actual: %s', y_pred_adv)\n logger.info('ZOO success rate on MNIST: %.2f', (sum(y_pred != y_pred_adv) / float(len(y_pred))))\n\n # Clean-up session\n sess.close()\n tf.reset_default_graph()\n\n def test_krclassifier(self):\n \"\"\"\n Second test with the KerasClassifier.\n :return:\n \"\"\"\n # Build KerasClassifier\n krc, _ = get_classifier_kr()\n\n # Get MNIST and test with 3 channels\n x_test, y_test = self.mnist\n\n # Targeted attack\n zoo = ZooAttack(classifier=krc, targeted=True, batch_size=5)\n params = {'y': random_targets(y_test, krc.nb_classes)}\n x_test_adv = zoo.generate(x_test, **params)\n self.assertFalse((x_test == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1.0001).all())\n self.assertTrue((x_test_adv >= -0.0001).all())\n target = np.argmax(params['y'], axis=1)\n y_pred_adv = np.argmax(krc.predict(x_test_adv), axis=1)\n logger.debug('ZOO target: %s', target)\n logger.debug('ZOO actual: %s', y_pred_adv)\n logger.info('ZOO success rate on MNIST: %.2f', (sum(target == y_pred_adv) / float(len(target))))\n\n # Untargeted attack\n zoo = ZooAttack(classifier=krc, targeted=False, max_iter=20)\n x_test_adv = zoo.generate(x_test)\n # self.assertFalse((x_test == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1.0001).all())\n self.assertTrue((x_test_adv >= -0.0001).all())\n y_pred_adv = np.argmax(krc.predict(x_test_adv), axis=1)\n y_pred = np.argmax(krc.predict(x_test), axis=1)\n logger.debug('ZOO actual: %s', y_pred_adv)\n logger.info('ZOO success rate on MNIST: %.2f', (sum(y_pred != y_pred_adv) / float(len(y_pred))))\n\n # Clean-up\n k.clear_session()\n\n def test_ptclassifier(self):\n \"\"\"\n Third test with the PyTorchClassifier.\n :return:\n \"\"\"\n # Build PyTorchClassifier\n ptc = get_classifier_pt()\n\n # Get MNIST\n x_test, y_test = self.mnist\n x_test = np.swapaxes(x_test, 1, 3)\n\n # First attack\n zoo = ZooAttack(classifier=ptc, targeted=True, max_iter=10)\n params = {'y': random_targets(y_test, ptc.nb_classes)}\n x_test_adv = zoo.generate(x_test, **params)\n self.assertFalse((x_test == x_test_adv).all())\n self.assertTrue((x_test_adv <= 1.0001).all())\n self.assertTrue((x_test_adv >= -0.0001).all())\n target = np.argmax(params['y'], axis=1)\n y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1)\n logger.debug('ZOO target: %s', target)\n logger.debug('ZOO actual: %s', y_pred_adv)\n logger.info('ZOO success rate on MNIST: %.2f', (sum(target != y_pred_adv) / float(len(target))))\n\n # Second attack\n zoo = ZooAttack(classifier=ptc, targeted=False, max_iter=10)\n x_test_adv = zoo.generate(x_test)\n self.assertTrue((x_test_adv <= 1.0001).all())\n self.assertTrue((x_test_adv >= -0.0001).all())\n y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1)\n y_pred = np.argmax(ptc.predict(x_test), axis=1)\n logger.debug('ZOO actual: %s', y_pred_adv)\n logger.info('ZOO success rate on MNIST: %.2f', (sum(y_pred != y_pred_adv) / float(len(y_pred))))\n\n def test_failure_feature_vectors(self):\n attack_params = {\"rotation_max\": 22.5, \"scale_min\": 0.1, \"scale_max\": 1.0,\n \"learning_rate\": 5.0, \"number_of_steps\": 5, \"patch_shape\": (1, 28, 28), \"batch_size\": 10}\n classifier = get_iris_classifier_pt()\n data = np.random.rand(10, 4)\n\n # Assert that value error is raised for feature vectors\n with self.assertRaises(ValueError) as context:\n attack = ZooAttack(classifier=classifier)\n attack.set_params(**attack_params)\n attack.generate(data)\n\n self.assertTrue('Feature vectors detected.' in str(context.exception))\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "tensorflow.reset_default_graph", "numpy.argmax", "numpy.swapaxes" ], [ "numpy.random.rand", "numpy.testing.assert_almost_equal", "tensorflow.reset_default_graph", "numpy.swapaxes", "numpy.argmax" ] ]
squirrelo/scikit-bio
[ "f9016283638ef49ffccb3bb5f79e5a421462cfd1", "f9016283638ef49ffccb3bb5f79e5a421462cfd1" ]
[ "skbio/metadata/_testing.py", "skbio/alignment/tests/test_pairwise.py" ]
[ "# ----------------------------------------------------------------------------\n# Copyright (c) 2013--, scikit-bio development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport copy\n\nimport pandas as pd\nimport numpy as np\nimport numpy.testing as npt\n\nfrom skbio.util._testing import assert_data_frame_almost_equal\n\n\nclass MetadataMixinTests:\n def test_constructor_invalid_type(self):\n for md in (0, 'a', ('f', 'o', 'o'), np.array([]), pd.DataFrame()):\n with self.assertRaisesRegex(TypeError, 'metadata must be a dict'):\n self._metadata_constructor_(metadata=md)\n\n def test_constructor_no_metadata(self):\n for md in None, {}:\n obj = self._metadata_constructor_(metadata=md)\n\n self.assertEqual(obj.metadata, {})\n\n def test_constructor_with_metadata(self):\n obj = self._metadata_constructor_(metadata={'foo': 'bar'})\n self.assertEqual(obj.metadata, {'foo': 'bar'})\n\n obj = self._metadata_constructor_(\n metadata={'': '', 123: {'a': 'b', 'c': 'd'}})\n self.assertEqual(obj.metadata, {'': '', 123: {'a': 'b', 'c': 'd'}})\n\n def test_constructor_makes_shallow_copy_of_metadata(self):\n md = {'foo': 'bar', 42: []}\n obj = self._metadata_constructor_(metadata=md)\n\n self.assertEqual(obj.metadata, md)\n self.assertIsNot(obj.metadata, md)\n\n md['foo'] = 'baz'\n self.assertEqual(obj.metadata, {'foo': 'bar', 42: []})\n\n md[42].append(True)\n self.assertEqual(obj.metadata, {'foo': 'bar', 42: [True]})\n\n def test_eq(self):\n self.assertReallyEqual(\n self._metadata_constructor_(metadata={'foo': 42}),\n self._metadata_constructor_(metadata={'foo': 42}))\n\n self.assertReallyEqual(\n self._metadata_constructor_(metadata={'foo': 42, 123: {}}),\n self._metadata_constructor_(metadata={'foo': 42, 123: {}}))\n\n def test_eq_missing_metadata(self):\n self.assertReallyEqual(self._metadata_constructor_(),\n self._metadata_constructor_())\n self.assertReallyEqual(self._metadata_constructor_(),\n self._metadata_constructor_(metadata={}))\n self.assertReallyEqual(self._metadata_constructor_(metadata={}),\n self._metadata_constructor_(metadata={}))\n\n def test_ne(self):\n # Both have metadata.\n obj1 = self._metadata_constructor_(metadata={'id': 'foo'})\n obj2 = self._metadata_constructor_(metadata={'id': 'bar'})\n self.assertReallyNotEqual(obj1, obj2)\n\n # One has metadata.\n obj1 = self._metadata_constructor_(metadata={'id': 'foo'})\n obj2 = self._metadata_constructor_()\n self.assertReallyNotEqual(obj1, obj2)\n\n def test_copy_metadata_none(self):\n obj = self._metadata_constructor_()\n obj_copy = copy.copy(obj)\n\n self.assertEqual(obj, obj_copy)\n self.assertIsNot(obj, obj_copy)\n\n self.assertEqual(obj.metadata, {})\n self.assertEqual(obj_copy.metadata, {})\n self.assertIsNot(obj.metadata, obj_copy.metadata)\n\n def test_copy_metadata_empty(self):\n obj = self._metadata_constructor_(metadata={})\n obj_copy = copy.copy(obj)\n\n self.assertEqual(obj, obj_copy)\n self.assertIsNot(obj, obj_copy)\n\n self.assertEqual(obj.metadata, {})\n self.assertEqual(obj_copy.metadata, {})\n self.assertIsNot(obj.metadata, obj_copy.metadata)\n\n def test_copy_with_metadata(self):\n obj = self._metadata_constructor_(metadata={'foo': [1]})\n obj_copy = copy.copy(obj)\n\n self.assertEqual(obj, obj_copy)\n self.assertIsNot(obj, obj_copy)\n\n self.assertEqual(obj.metadata, {'foo': [1]})\n self.assertEqual(obj_copy.metadata, {'foo': [1]})\n self.assertIsNot(obj.metadata, obj_copy.metadata)\n self.assertIs(obj.metadata['foo'], obj_copy.metadata['foo'])\n\n obj_copy.metadata['foo'].append(2)\n obj_copy.metadata['foo2'] = 42\n\n self.assertEqual(obj_copy.metadata, {'foo': [1, 2], 'foo2': 42})\n self.assertEqual(obj.metadata, {'foo': [1, 2]})\n\n def test_deepcopy_metadata_none(self):\n obj = self._metadata_constructor_()\n obj_copy = copy.deepcopy(obj)\n\n self.assertEqual(obj, obj_copy)\n self.assertIsNot(obj, obj_copy)\n\n self.assertEqual(obj.metadata, {})\n self.assertEqual(obj_copy.metadata, {})\n self.assertIsNot(obj.metadata, obj_copy.metadata)\n\n def test_deepcopy_metadata_empty(self):\n obj = self._metadata_constructor_(metadata={})\n obj_copy = copy.deepcopy(obj)\n\n self.assertEqual(obj, obj_copy)\n self.assertIsNot(obj, obj_copy)\n\n self.assertEqual(obj.metadata, {})\n self.assertEqual(obj_copy.metadata, {})\n self.assertIsNot(obj.metadata, obj_copy.metadata)\n\n def test_deepcopy_with_metadata(self):\n obj = self._metadata_constructor_(metadata={'foo': [1]})\n obj_copy = copy.deepcopy(obj)\n\n self.assertEqual(obj, obj_copy)\n self.assertIsNot(obj, obj_copy)\n\n self.assertEqual(obj.metadata, {'foo': [1]})\n self.assertEqual(obj_copy.metadata, {'foo': [1]})\n self.assertIsNot(obj.metadata, obj_copy.metadata)\n self.assertIsNot(obj.metadata['foo'], obj_copy.metadata['foo'])\n\n obj_copy.metadata['foo'].append(2)\n obj_copy.metadata['foo2'] = 42\n\n self.assertEqual(obj_copy.metadata, {'foo': [1, 2], 'foo2': 42})\n self.assertEqual(obj.metadata, {'foo': [1]})\n\n def test_deepcopy_memo_is_respected(self):\n # Basic test to ensure deepcopy's memo is passed through to recursive\n # deepcopy calls.\n obj = self._metadata_constructor_(metadata={'foo': 'bar'})\n memo = {}\n copy.deepcopy(obj, memo)\n self.assertGreater(len(memo), 2)\n\n def test_metadata_getter(self):\n obj = self._metadata_constructor_(\n metadata={42: 'foo', ('hello', 'world'): 43})\n\n self.assertIsInstance(obj.metadata, dict)\n self.assertEqual(obj.metadata, {42: 'foo', ('hello', 'world'): 43})\n\n obj.metadata[42] = 'bar'\n self.assertEqual(obj.metadata, {42: 'bar', ('hello', 'world'): 43})\n\n def test_metadata_getter_no_metadata(self):\n obj = self._metadata_constructor_()\n\n self.assertIsInstance(obj.metadata, dict)\n self.assertEqual(obj.metadata, {})\n\n def test_metadata_setter(self):\n obj = self._metadata_constructor_()\n self.assertEqual(obj.metadata, {})\n\n obj.metadata = {'hello': 'world'}\n self.assertEqual(obj.metadata, {'hello': 'world'})\n\n obj.metadata = {}\n self.assertEqual(obj.metadata, {})\n\n def test_metadata_setter_makes_shallow_copy(self):\n obj = self._metadata_constructor_()\n\n md = {'foo': 'bar', 42: []}\n obj.metadata = md\n\n self.assertEqual(obj.metadata, md)\n self.assertIsNot(obj.metadata, md)\n\n md['foo'] = 'baz'\n self.assertEqual(obj.metadata, {'foo': 'bar', 42: []})\n\n md[42].append(True)\n self.assertEqual(obj.metadata, {'foo': 'bar', 42: [True]})\n\n def test_metadata_setter_invalid_type(self):\n obj = self._metadata_constructor_(metadata={123: 456})\n\n for md in (None, 0, 'a', ('f', 'o', 'o'), np.array([]),\n pd.DataFrame()):\n with self.assertRaisesRegex(TypeError, 'metadata must be a dict'):\n obj.metadata = md\n self.assertEqual(obj.metadata, {123: 456})\n\n def test_metadata_deleter(self):\n obj = self._metadata_constructor_(metadata={'foo': 'bar'})\n\n self.assertEqual(obj.metadata, {'foo': 'bar'})\n\n del obj.metadata\n self.assertEqual(obj.metadata, {})\n\n # Delete again.\n del obj.metadata\n self.assertEqual(obj.metadata, {})\n\n obj = self._metadata_constructor_()\n\n self.assertEqual(obj.metadata, {})\n del obj.metadata\n self.assertEqual(obj.metadata, {})\n\n def test_has_metadata(self):\n obj = self._metadata_constructor_()\n\n self.assertFalse(obj.has_metadata())\n\n self.assertFalse(\n self._metadata_constructor_(metadata={}).has_metadata())\n\n self.assertTrue(\n self._metadata_constructor_(metadata={'': ''}).has_metadata())\n self.assertTrue(\n self._metadata_constructor_(\n metadata={'foo': 42}).has_metadata())\n\n\nclass PositionalMetadataMixinTests:\n def test_constructor_invalid_positional_metadata_type(self):\n with self.assertRaisesRegex(TypeError,\n 'Invalid positional metadata. Must be '\n 'consumable by `pd.DataFrame` constructor.'\n ' Original pandas error message: '):\n self._positional_metadata_constructor_(0, positional_metadata=2)\n\n def test_constructor_positional_metadata_len_mismatch(self):\n # Zero elements.\n with self.assertRaisesRegex(ValueError, '\\(0\\).*\\(4\\)'):\n self._positional_metadata_constructor_(4, positional_metadata=[])\n\n # Not enough elements.\n with self.assertRaisesRegex(ValueError, '\\(3\\).*\\(4\\)'):\n self._positional_metadata_constructor_(\n 4, positional_metadata=[2, 3, 4])\n\n # Too many elements.\n with self.assertRaisesRegex(ValueError, '\\(5\\).*\\(4\\)'):\n self._positional_metadata_constructor_(\n 4, positional_metadata=[2, 3, 4, 5, 6])\n\n # Series not enough rows.\n with self.assertRaisesRegex(ValueError, '\\(3\\).*\\(4\\)'):\n self._positional_metadata_constructor_(\n 4, positional_metadata=pd.Series(range(3)))\n\n # Series too many rows.\n with self.assertRaisesRegex(ValueError, '\\(5\\).*\\(4\\)'):\n self._positional_metadata_constructor_(\n 4, positional_metadata=pd.Series(range(5)))\n\n # DataFrame not enough rows.\n with self.assertRaisesRegex(ValueError, '\\(3\\).*\\(4\\)'):\n self._positional_metadata_constructor_(\n 4, positional_metadata=pd.DataFrame({'quality': range(3)}))\n\n # DataFrame too many rows.\n with self.assertRaisesRegex(ValueError, '\\(5\\).*\\(4\\)'):\n self._positional_metadata_constructor_(\n 4, positional_metadata=pd.DataFrame({'quality': range(5)}))\n\n # Empty DataFrame wrong size.\n with self.assertRaisesRegex(ValueError, '\\(2\\).*\\(3\\)'):\n self._positional_metadata_constructor_(\n 3, positional_metadata=pd.DataFrame(index=range(2)))\n\n def test_constructor_no_positional_metadata(self):\n # Length zero with missing/empty positional metadata.\n for empty in None, {}, pd.DataFrame():\n obj = self._positional_metadata_constructor_(\n 0, positional_metadata=empty)\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=np.arange(0)))\n\n # Nonzero length with missing positional metadata.\n obj = self._positional_metadata_constructor_(\n 3, positional_metadata=None)\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=np.arange(3)))\n\n def test_constructor_with_positional_metadata_len_zero(self):\n for data in [], (), np.array([]):\n obj = self._positional_metadata_constructor_(\n 0, positional_metadata={'foo': data})\n\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'foo': data}, index=np.arange(0)))\n\n def test_constructor_with_positional_metadata_len_one(self):\n for data in [2], (2, ), np.array([2]):\n obj = self._positional_metadata_constructor_(\n 1, positional_metadata={'foo': data})\n\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'foo': data}, index=np.arange(1)))\n\n def test_constructor_with_positional_metadata_len_greater_than_one(self):\n for data in ([0, 42, 42, 1, 0, 8, 100, 0, 0],\n (0, 42, 42, 1, 0, 8, 100, 0, 0),\n np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):\n obj = self._positional_metadata_constructor_(\n 9, positional_metadata={'foo': data})\n\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'foo': data}, index=np.arange(9)))\n\n def test_constructor_with_positional_metadata_multiple_columns(self):\n obj = self._positional_metadata_constructor_(\n 5, positional_metadata={'foo': np.arange(5),\n 'bar': np.arange(5)[::-1]})\n\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'foo': np.arange(5),\n 'bar': np.arange(5)[::-1]}, index=np.arange(5)))\n\n def test_constructor_with_positional_metadata_custom_index(self):\n df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},\n index=['a', 'b', 'c', 'd', 'e'])\n obj = self._positional_metadata_constructor_(\n 5, positional_metadata=df)\n\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'foo': np.arange(5),\n 'bar': np.arange(5)[::-1]}, index=np.arange(5)))\n\n def test_constructor_makes_shallow_copy_of_positional_metadata(self):\n df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},\n index=['a', 'b', 'c'])\n obj = self._positional_metadata_constructor_(\n 3, positional_metadata=df)\n\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},\n index=np.arange(3)))\n self.assertIsNot(obj.positional_metadata, df)\n\n # Original df is not mutated.\n orig_df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},\n index=['a', 'b', 'c'])\n assert_data_frame_almost_equal(df, orig_df)\n\n # Change values of column (using same dtype).\n df['foo'] = [42, 42, 42]\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},\n index=np.arange(3)))\n\n # Change single value of underlying data.\n df.values[0][0] = 10\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},\n index=np.arange(3)))\n\n # Mutate list (not a deep copy).\n df['bar'][0].append(42)\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},\n index=np.arange(3)))\n\n def test_eq_basic(self):\n obj1 = self._positional_metadata_constructor_(\n 3, positional_metadata={'foo': [1, 2, 3]})\n obj2 = self._positional_metadata_constructor_(\n 3, positional_metadata={'foo': [1, 2, 3]})\n self.assertReallyEqual(obj1, obj2)\n\n def test_eq_from_different_source(self):\n obj1 = self._positional_metadata_constructor_(\n 3, positional_metadata={'foo': np.array([1, 2, 3])})\n obj2 = self._positional_metadata_constructor_(\n 3, positional_metadata=pd.DataFrame({'foo': [1, 2, 3]},\n index=['foo', 'bar', 'baz']))\n self.assertReallyEqual(obj1, obj2)\n\n def test_eq_missing_positional_metadata(self):\n for empty in None, {}, pd.DataFrame(), pd.DataFrame(index=[]):\n obj = self._positional_metadata_constructor_(\n 0, positional_metadata=empty)\n\n self.assertReallyEqual(\n obj,\n self._positional_metadata_constructor_(0))\n self.assertReallyEqual(\n obj,\n self._positional_metadata_constructor_(\n 0, positional_metadata=empty))\n\n for empty in None, pd.DataFrame(index=['a', 'b']):\n obj = self._positional_metadata_constructor_(\n 2, positional_metadata=empty)\n\n self.assertReallyEqual(\n obj,\n self._positional_metadata_constructor_(2))\n self.assertReallyEqual(\n obj,\n self._positional_metadata_constructor_(\n 2, positional_metadata=empty))\n\n def test_ne_len_zero(self):\n # Both have positional metadata.\n obj1 = self._positional_metadata_constructor_(\n 0, positional_metadata={'foo': []})\n obj2 = self._positional_metadata_constructor_(\n 0, positional_metadata={'foo': [], 'bar': []})\n self.assertReallyNotEqual(obj1, obj2)\n\n # One has positional metadata.\n obj1 = self._positional_metadata_constructor_(\n 0, positional_metadata={'foo': []})\n obj2 = self._positional_metadata_constructor_(0)\n self.assertReallyNotEqual(obj1, obj2)\n\n def test_ne_len_greater_than_zero(self):\n # Both have positional metadata.\n obj1 = self._positional_metadata_constructor_(\n 3, positional_metadata={'foo': [1, 2, 3]})\n obj2 = self._positional_metadata_constructor_(\n 3, positional_metadata={'foo': [1, 2, 2]})\n self.assertReallyNotEqual(obj1, obj2)\n\n # One has positional metadata.\n obj1 = self._positional_metadata_constructor_(\n 3, positional_metadata={'foo': [1, 2, 3]})\n obj2 = self._positional_metadata_constructor_(3)\n self.assertReallyNotEqual(obj1, obj2)\n\n def test_ne_len_mismatch(self):\n obj1 = self._positional_metadata_constructor_(\n 3, positional_metadata=pd.DataFrame(index=range(3)))\n obj2 = self._positional_metadata_constructor_(\n 2, positional_metadata=pd.DataFrame(index=range(2)))\n self.assertReallyNotEqual(obj1, obj2)\n\n def test_copy_positional_metadata_none(self):\n obj = self._positional_metadata_constructor_(3)\n obj_copy = copy.copy(obj)\n\n self.assertEqual(obj, obj_copy)\n self.assertIsNot(obj, obj_copy)\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=range(3)))\n assert_data_frame_almost_equal(obj_copy.positional_metadata,\n pd.DataFrame(index=range(3)))\n self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)\n\n def test_copy_positional_metadata_empty(self):\n obj = self._positional_metadata_constructor_(\n 3, positional_metadata=pd.DataFrame(index=range(3)))\n obj_copy = copy.copy(obj)\n\n self.assertEqual(obj, obj_copy)\n self.assertIsNot(obj, obj_copy)\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=range(3)))\n assert_data_frame_almost_equal(obj_copy.positional_metadata,\n pd.DataFrame(index=range(3)))\n self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)\n\n def test_copy_with_positional_metadata(self):\n obj = self._positional_metadata_constructor_(\n 4, positional_metadata={'bar': [[], [], [], []],\n 'baz': [42, 42, 42, 42]})\n obj_copy = copy.copy(obj)\n\n self.assertEqual(obj, obj_copy)\n self.assertIsNot(obj, obj_copy)\n\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'bar': [[], [], [], []],\n 'baz': [42, 42, 42, 42]}, index=range(4)))\n assert_data_frame_almost_equal(\n obj_copy.positional_metadata,\n pd.DataFrame({'bar': [[], [], [], []],\n 'baz': [42, 42, 42, 42]}, index=range(4)))\n\n self.assertIsNot(obj.positional_metadata,\n obj_copy.positional_metadata)\n self.assertIsNot(obj.positional_metadata.values,\n obj_copy.positional_metadata.values)\n self.assertIs(obj.positional_metadata.loc[0, 'bar'],\n obj_copy.positional_metadata.loc[0, 'bar'])\n\n obj_copy.positional_metadata.loc[0, 'bar'].append(1)\n obj_copy.positional_metadata.loc[0, 'baz'] = 43\n\n assert_data_frame_almost_equal(\n obj_copy.positional_metadata,\n pd.DataFrame({'bar': [[1], [], [], []],\n 'baz': [43, 42, 42, 42]}))\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'bar': [[1], [], [], []],\n 'baz': [42, 42, 42, 42]}))\n\n def test_deepcopy_positional_metadata_none(self):\n obj = self._positional_metadata_constructor_(3)\n obj_copy = copy.deepcopy(obj)\n\n self.assertEqual(obj, obj_copy)\n self.assertIsNot(obj, obj_copy)\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=range(3)))\n assert_data_frame_almost_equal(obj_copy.positional_metadata,\n pd.DataFrame(index=range(3)))\n self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)\n\n def test_deepcopy_positional_metadata_empty(self):\n obj = self._positional_metadata_constructor_(\n 3, positional_metadata=pd.DataFrame(index=range(3)))\n obj_copy = copy.deepcopy(obj)\n\n self.assertEqual(obj, obj_copy)\n self.assertIsNot(obj, obj_copy)\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=range(3)))\n assert_data_frame_almost_equal(obj_copy.positional_metadata,\n pd.DataFrame(index=range(3)))\n self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)\n\n def test_deepcopy_with_positional_metadata(self):\n obj = self._positional_metadata_constructor_(\n 4, positional_metadata={'bar': [[], [], [], []],\n 'baz': [42, 42, 42, 42]})\n obj_copy = copy.deepcopy(obj)\n\n self.assertEqual(obj, obj_copy)\n self.assertIsNot(obj, obj_copy)\n\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'bar': [[], [], [], []],\n 'baz': [42, 42, 42, 42]}, index=range(4)))\n assert_data_frame_almost_equal(\n obj_copy.positional_metadata,\n pd.DataFrame({'bar': [[], [], [], []],\n 'baz': [42, 42, 42, 42]}, index=range(4)))\n\n self.assertIsNot(obj.positional_metadata, obj_copy.positional_metadata)\n self.assertIsNot(obj.positional_metadata.values,\n obj_copy.positional_metadata.values)\n self.assertIsNot(obj.positional_metadata.loc[0, 'bar'],\n obj_copy.positional_metadata.loc[0, 'bar'])\n\n obj_copy.positional_metadata.loc[0, 'bar'].append(1)\n obj_copy.positional_metadata.loc[0, 'baz'] = 43\n\n assert_data_frame_almost_equal(\n obj_copy.positional_metadata,\n pd.DataFrame({'bar': [[1], [], [], []],\n 'baz': [43, 42, 42, 42]}))\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'bar': [[], [], [], []],\n 'baz': [42, 42, 42, 42]}))\n\n def test_deepcopy_memo_is_respected(self):\n # Basic test to ensure deepcopy's memo is passed through to recursive\n # deepcopy calls.\n obj = self._positional_metadata_constructor_(\n 3, positional_metadata={'foo': [1, 2, 3]})\n memo = {}\n copy.deepcopy(obj, memo)\n self.assertGreater(len(memo), 2)\n\n def test_positional_metadata_getter(self):\n obj = self._positional_metadata_constructor_(\n 3, positional_metadata={'foo': [22, 22, 0]})\n\n self.assertIsInstance(obj.positional_metadata, pd.DataFrame)\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame({'foo': [22, 22, 0]}))\n\n # Update existing column.\n obj.positional_metadata['foo'] = [42, 42, 43]\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame({'foo': [42, 42, 43]}))\n\n # Add new column.\n obj.positional_metadata['foo2'] = [True, False, True]\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'foo': [42, 42, 43],\n 'foo2': [True, False, True]}))\n\n def test_positional_metadata_getter_no_positional_metadata(self):\n obj = self._positional_metadata_constructor_(4)\n\n self.assertIsInstance(obj.positional_metadata, pd.DataFrame)\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame(index=np.arange(4)))\n\n def test_positional_metadata_getter_set_column_series(self):\n length = 8\n obj = self._positional_metadata_constructor_(\n length, positional_metadata={'foo': range(length)})\n\n obj.positional_metadata['bar'] = pd.Series(range(length-3))\n # pandas.Series will be padded with NaN if too short.\n npt.assert_equal(obj.positional_metadata['bar'],\n np.array(list(range(length-3)) + [np.nan]*3))\n\n obj.positional_metadata['baz'] = pd.Series(range(length+3))\n # pandas.Series will be truncated if too long.\n npt.assert_equal(obj.positional_metadata['baz'],\n np.array(range(length)))\n\n def test_positional_metadata_getter_set_column_array(self):\n length = 8\n obj = self._positional_metadata_constructor_(\n length, positional_metadata={'foo': range(length)})\n\n # array-like objects will fail if wrong size.\n for array_like in (np.array(range(length-1)), range(length-1),\n np.array(range(length+1)), range(length+1)):\n with self.assertRaisesRegex(ValueError,\n \"Length of values does not match \"\n \"length of index\"):\n obj.positional_metadata['bar'] = array_like\n\n def test_positional_metadata_setter_pandas_consumable(self):\n obj = self._positional_metadata_constructor_(3)\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=range(3)))\n\n obj.positional_metadata = {'foo': [3, 2, 1]}\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame({'foo': [3, 2, 1]}))\n\n obj.positional_metadata = pd.DataFrame(index=np.arange(3))\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=np.arange(3)))\n\n def test_positional_metadata_setter_data_frame(self):\n obj = self._positional_metadata_constructor_(3)\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=range(3)))\n\n obj.positional_metadata = pd.DataFrame({'foo': [3, 2, 1]},\n index=['a', 'b', 'c'])\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame({'foo': [3, 2, 1]}))\n\n obj.positional_metadata = pd.DataFrame(index=np.arange(3))\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=np.arange(3)))\n\n def test_positional_metadata_setter_none(self):\n obj = self._positional_metadata_constructor_(\n 0, positional_metadata={'foo': []})\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame({'foo': []}))\n\n # `None` behavior differs from constructor.\n obj.positional_metadata = None\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=np.arange(0)))\n\n def test_positional_metadata_setter_makes_shallow_copy(self):\n obj = self._positional_metadata_constructor_(3)\n\n df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},\n index=['a', 'b', 'c'])\n obj.positional_metadata = df\n\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},\n index=np.arange(3)))\n self.assertIsNot(obj.positional_metadata, df)\n\n # Original df is not mutated.\n orig_df = pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},\n index=['a', 'b', 'c'])\n assert_data_frame_almost_equal(df, orig_df)\n\n # Change values of column (using same dtype).\n df['foo'] = [42, 42, 42]\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},\n index=np.arange(3)))\n\n # Change single value of underlying data.\n df.values[0][0] = 10\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'foo': [22, 22, 0], 'bar': [[], [], []]},\n index=np.arange(3)))\n\n # Mutate list (not a deep copy).\n df['bar'][0].append(42)\n assert_data_frame_almost_equal(\n obj.positional_metadata,\n pd.DataFrame({'foo': [22, 22, 0], 'bar': [[42], [], []]},\n index=np.arange(3)))\n\n def test_positional_metadata_setter_invalid_type(self):\n obj = self._positional_metadata_constructor_(\n 3, positional_metadata={'foo': [1, 2, 42]})\n\n with self.assertRaisesRegex(TypeError,\n 'Invalid positional metadata. Must be '\n 'consumable by `pd.DataFrame` constructor.'\n ' Original pandas error message: '):\n obj.positional_metadata = 2\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame({'foo': [1, 2, 42]}))\n\n def test_positional_metadata_setter_len_mismatch(self):\n obj = self._positional_metadata_constructor_(\n 3, positional_metadata={'foo': [1, 2, 42]})\n\n # `None` behavior differs from constructor.\n with self.assertRaisesRegex(ValueError, '\\(0\\).*\\(3\\)'):\n obj.positional_metadata = None\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame({'foo': [1, 2, 42]}))\n\n with self.assertRaisesRegex(ValueError, '\\(4\\).*\\(3\\)'):\n obj.positional_metadata = [1, 2, 3, 4]\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame({'foo': [1, 2, 42]}))\n\n def test_positional_metadata_deleter(self):\n obj = self._positional_metadata_constructor_(\n 3, positional_metadata={'foo': [1, 2, 3]})\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame({'foo': [1, 2, 3]}))\n\n del obj.positional_metadata\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=range(3)))\n\n # Delete again.\n del obj.positional_metadata\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=range(3)))\n\n obj = self._positional_metadata_constructor_(3)\n\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=range(3)))\n del obj.positional_metadata\n assert_data_frame_almost_equal(obj.positional_metadata,\n pd.DataFrame(index=range(3)))\n\n def test_has_positional_metadata(self):\n obj = self._positional_metadata_constructor_(4)\n self.assertFalse(obj.has_positional_metadata())\n\n obj = self._positional_metadata_constructor_(0, positional_metadata={})\n self.assertFalse(obj.has_positional_metadata())\n\n obj = self._positional_metadata_constructor_(\n 4, positional_metadata=pd.DataFrame(index=np.arange(4)))\n self.assertFalse(obj.has_positional_metadata())\n\n obj = self._positional_metadata_constructor_(\n 4, positional_metadata=pd.DataFrame(index=['a', 'b', 'c', 'd']))\n self.assertFalse(obj.has_positional_metadata())\n\n obj = self._positional_metadata_constructor_(\n 0, positional_metadata={'foo': []})\n self.assertTrue(obj.has_positional_metadata())\n\n obj = self._positional_metadata_constructor_(\n 4, positional_metadata={'foo': [1, 2, 3, 4]})\n self.assertTrue(obj.has_positional_metadata())\n\n obj = self._positional_metadata_constructor_(\n 2, positional_metadata={'foo': [1, 2], 'bar': ['abc', 'def']})\n self.assertTrue(obj.has_positional_metadata())\n", "# ----------------------------------------------------------------------------\n# Copyright (c) 2013--, scikit-bio development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n# ----------------------------------------------------------------------------\n\nfrom unittest import TestCase, main\nimport warnings\n\nimport numpy as np\n\nfrom skbio import Sequence, Protein, DNA, RNA, TabularMSA\nfrom skbio.alignment import (\n global_pairwise_align_protein, local_pairwise_align_protein,\n global_pairwise_align_nucleotide, local_pairwise_align_nucleotide,\n make_identity_substitution_matrix, local_pairwise_align,\n global_pairwise_align)\nfrom skbio.alignment._pairwise import (\n _init_matrices_sw, _init_matrices_nw,\n _compute_score_and_traceback_matrices, _traceback, _first_largest,\n _compute_substitution_score)\nfrom skbio.sequence import GrammaredSequence\nfrom skbio.util._decorator import classproperty, overrides\n\n\nclass CustomSequence(GrammaredSequence):\n @classproperty\n @overrides(GrammaredSequence)\n def gap_chars(cls):\n return set('^$')\n\n @classproperty\n @overrides(GrammaredSequence)\n def default_gap_char(cls):\n return '^'\n\n @classproperty\n @overrides(GrammaredSequence)\n def nondegenerate_chars(cls):\n return set('WXYZ')\n\n @classproperty\n @overrides(GrammaredSequence)\n def degenerate_map(cls):\n return {}\n\n\nclass PairwiseAlignmentTests(TestCase):\n \"\"\"\n Note: In the high-level tests, the expected results were derived with\n assistance from the EMBOSS web server:\n http://www.ebi.ac.uk/Tools/psa/emboss_needle/\n http://www.ebi.ac.uk/Tools/psa/emboss_water/\n In some cases, placement of non-gap characters surrounded by gap\n characters are slighly different between scikit-bio and the EMBOSS\n server. These differences arise from arbitrary implementation\n differences, and always result in the same score (which tells us that\n the alignments are equivalent). In cases where the expected results\n included here differ from those generated by the EMBOSS server, I note\n the EMBOSS result as a comment below the expected value.\n\n \"\"\"\n\n def setUp(self):\n \"\"\"Ignore warnings during tests.\"\"\"\n warnings.simplefilter(\"ignore\")\n\n def tearDown(self):\n \"\"\"Clear the list of warning filters, so that no filters are active.\"\"\"\n warnings.resetwarnings()\n\n def test_make_identity_substitution_matrix(self):\n expected = {'A': {'A': 1, 'C': -2, 'G': -2, 'T': -2, 'U': -2},\n 'C': {'A': -2, 'C': 1, 'G': -2, 'T': -2, 'U': -2},\n 'G': {'A': -2, 'C': -2, 'G': 1, 'T': -2, 'U': -2},\n 'T': {'A': -2, 'C': -2, 'G': -2, 'T': 1, 'U': -2},\n 'U': {'A': -2, 'C': -2, 'G': -2, 'T': -2, 'U': 1}}\n self.assertEqual(make_identity_substitution_matrix(1, -2), expected)\n\n expected = {'A': {'A': 5, 'C': -4, 'G': -4, 'T': -4, 'U': -4},\n 'C': {'A': -4, 'C': 5, 'G': -4, 'T': -4, 'U': -4},\n 'G': {'A': -4, 'C': -4, 'G': 5, 'T': -4, 'U': -4},\n 'T': {'A': -4, 'C': -4, 'G': -4, 'T': 5, 'U': -4},\n 'U': {'A': -4, 'C': -4, 'G': -4, 'T': -4, 'U': 5}}\n self.assertEqual(make_identity_substitution_matrix(5, -4), expected)\n\n def test_global_pairwise_align_custom_alphabet(self):\n custom_substitution_matrix = make_identity_substitution_matrix(\n 1, -1, alphabet=CustomSequence.nondegenerate_chars)\n\n custom_msa, custom_score, custom_start_end = global_pairwise_align(\n CustomSequence(\"WXYZ\"), CustomSequence(\"WXYYZZ\"),\n 10.0, 5.0, custom_substitution_matrix)\n\n # Expected values computed by running an equivalent alignment using the\n # DNA alphabet with the following mapping:\n #\n # W X Y Z\n # | | | |\n # A C G T\n #\n self.assertEqual(custom_msa, TabularMSA([CustomSequence('WXYZ^^'),\n CustomSequence('WXYYZZ')]))\n self.assertEqual(custom_score, 2.0)\n self.assertEqual(custom_start_end, [(0, 3), (0, 5)])\n\n def test_local_pairwise_align_custom_alphabet(self):\n custom_substitution_matrix = make_identity_substitution_matrix(\n 5, -4, alphabet=CustomSequence.nondegenerate_chars)\n\n custom_msa, custom_score, custom_start_end = local_pairwise_align(\n CustomSequence(\"YWXXZZYWXXWYYZWXX\"),\n CustomSequence(\"YWWXZZZYWXYZWWX\"), 5.0, 0.5,\n custom_substitution_matrix)\n\n # Expected values computed by running an equivalent alignment using the\n # DNA alphabet with the following mapping:\n #\n # W X Y Z\n # | | | |\n # A C G T\n #\n self.assertEqual(\n custom_msa,\n TabularMSA([CustomSequence('WXXZZYWXXWYYZWXX'),\n CustomSequence('WXZZZYWX^^^YZWWX')]))\n self.assertEqual(custom_score, 41.0)\n self.assertEqual(custom_start_end, [(1, 16), (2, 14)])\n\n def test_global_pairwise_align_invalid_type(self):\n with self.assertRaisesRegex(TypeError,\n \"GrammaredSequence.*\"\n \"TabularMSA.*'Sequence'\"):\n global_pairwise_align(DNA('ACGT'), Sequence('ACGT'), 1.0, 1.0, {})\n\n def test_global_pairwise_align_dtype_mismatch(self):\n with self.assertRaisesRegex(TypeError,\n \"same dtype: 'DNA' != 'RNA'\"):\n global_pairwise_align(DNA('ACGT'), TabularMSA([RNA('ACGU')]),\n 1.0, 1.0, {})\n\n with self.assertRaisesRegex(TypeError,\n \"same dtype: 'DNA' != 'RNA'\"):\n global_pairwise_align(TabularMSA([DNA('ACGT')]),\n TabularMSA([RNA('ACGU')]),\n 1.0, 1.0, {})\n\n def test_global_pairwise_align_protein(self):\n obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(\n Protein(\"HEAGAWGHEE\"), Protein(\"PAWHEAE\"), gap_open_penalty=10.,\n gap_extend_penalty=5.)\n\n self.assertEqual(obs_msa, TabularMSA([Protein(\"HEAGAWGHEE-\"),\n Protein(\"---PAW-HEAE\")]))\n self.assertEqual(obs_score, 23.0)\n self.assertEqual(obs_start_end, [(0, 9), (0, 6)])\n\n # EMBOSS result: P---AW-HEAE\n obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(\n Protein(\"HEAGAWGHEE\"), Protein(\"PAWHEAE\"), gap_open_penalty=5.,\n gap_extend_penalty=0.5)\n\n self.assertEqual(obs_msa, TabularMSA([Protein(\"HEAGAWGHE-E\"),\n Protein(\"---PAW-HEAE\")]))\n self.assertEqual(obs_score, 30.0)\n self.assertEqual(obs_start_end, [(0, 9), (0, 6)])\n\n # Protein sequences with metadata\n obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(\n Protein(\"HEAGAWGHEE\", metadata={'id': \"s1\"}),\n Protein(\"PAWHEAE\", metadata={'id': \"s2\"}),\n gap_open_penalty=10., gap_extend_penalty=5.)\n\n self.assertEqual(obs_msa, TabularMSA([Protein(\"HEAGAWGHEE-\"),\n Protein(\"---PAW-HEAE\")]))\n self.assertEqual(obs_score, 23.0)\n self.assertEqual(obs_start_end, [(0, 9), (0, 6)])\n\n # One TabularMSA and one Protein as input\n obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(\n TabularMSA([Protein(\"HEAGAWGHEE\", metadata={'id': \"s1\"})]),\n Protein(\"PAWHEAE\", metadata={'id': \"s2\"}),\n gap_open_penalty=10., gap_extend_penalty=5.)\n\n self.assertEqual(obs_msa, TabularMSA([Protein(\"HEAGAWGHEE-\"),\n Protein(\"---PAW-HEAE\")]))\n self.assertEqual(obs_score, 23.0)\n self.assertEqual(obs_start_end, [(0, 9), (0, 6)])\n\n # One single-sequence alignment as input and one double-sequence\n # alignment as input. Score confirmed manually.\n obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(\n TabularMSA([Protein(\"HEAGAWGHEE\", metadata={'id': \"s1\"}),\n Protein(\"HDAGAWGHDE\", metadata={'id': \"s2\"})]),\n TabularMSA([Protein(\"PAWHEAE\", metadata={'id': \"s3\"})]),\n gap_open_penalty=10., gap_extend_penalty=5.)\n\n self.assertEqual(obs_msa, TabularMSA([Protein(\"HEAGAWGHEE-\"),\n Protein(\"HDAGAWGHDE-\"),\n Protein(\"---PAW-HEAE\")]))\n self.assertEqual(obs_score, 21.0)\n self.assertEqual(obs_start_end, [(0, 9), (0, 6)])\n\n # TypeError on invalid input\n self.assertRaises(TypeError, global_pairwise_align_protein,\n 42, Protein(\"HEAGAWGHEE\"))\n self.assertRaises(TypeError, global_pairwise_align_protein,\n Protein(\"HEAGAWGHEE\"), 42)\n\n def test_global_pairwise_align_protein_invalid_dtype(self):\n with self.assertRaisesRegex(TypeError,\n \"TabularMSA with Protein dtype.*dtype \"\n \"'DNA'\"):\n global_pairwise_align_protein(TabularMSA([Protein('PAW')]),\n TabularMSA([DNA('ACGT')]))\n\n def test_global_pairwise_align_protein_penalize_terminal_gaps(self):\n obs_msa, obs_score, obs_start_end = global_pairwise_align_protein(\n Protein(\"HEAGAWGHEE\"), Protein(\"PAWHEAE\"), gap_open_penalty=10.,\n gap_extend_penalty=5., penalize_terminal_gaps=True)\n\n self.assertEqual(obs_msa, TabularMSA([Protein(\"HEAGAWGHEE\"),\n Protein(\"---PAWHEAE\")]))\n self.assertEqual(obs_score, 1.0)\n self.assertEqual(obs_start_end, [(0, 9), (0, 6)])\n\n def test_global_pairwise_align_nucleotide_penalize_terminal_gaps(self):\n # in these tests one sequence is about 3x the length of the other.\n # we toggle penalize_terminal_gaps to confirm that it results in\n # different alignments and alignment scores.\n seq1 = DNA(\"ACCGTGGACCGTTAGGATTGGACCCAAGGTTG\")\n seq2 = DNA(\"T\"*25 + \"ACCGTGGACCGTAGGATTGGACCAAGGTTA\" + \"A\"*25)\n\n obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(\n seq1, seq2, gap_open_penalty=5., gap_extend_penalty=0.5,\n match_score=5, mismatch_score=-4, penalize_terminal_gaps=False)\n\n self.assertEqual(\n obs_msa,\n TabularMSA([DNA(\"-------------------------ACCGTGGACCGTTAGGA\"\n \"TTGGACCCAAGGTTG-------------------------\"),\n DNA(\"TTTTTTTTTTTTTTTTTTTTTTTTTACCGTGGACCGT-AGGA\"\n \"TTGGACC-AAGGTTAAAAAAAAAAAAAAAAAAAAAAAAAA\")]))\n self.assertEqual(obs_score, 131.0)\n\n obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(\n seq1, seq2, gap_open_penalty=5., gap_extend_penalty=0.5,\n match_score=5, mismatch_score=-4, penalize_terminal_gaps=True)\n\n self.assertEqual(\n obs_msa,\n TabularMSA([DNA(\"-------------------------ACCGTGGACCGTTAGGA\"\n \"TTGGACCCAAGGTT-------------------------G\"),\n DNA(\"TTTTTTTTTTTTTTTTTTTTTTTTTACCGTGGACCGT-AGGA\"\n \"TTGGACC-AAGGTTAAAAAAAAAAAAAAAAAAAAAAAAAA\")]))\n self.assertEqual(obs_score, 97.0)\n\n def test_local_pairwise_align_protein(self):\n obs_msa, obs_score, obs_start_end = local_pairwise_align_protein(\n Protein(\"HEAGAWGHEE\"), Protein(\"PAWHEAE\"), gap_open_penalty=10.,\n gap_extend_penalty=5.)\n\n self.assertEqual(obs_msa, TabularMSA([Protein(\"AWGHE\"),\n Protein(\"AW-HE\")]))\n self.assertEqual(obs_score, 26.0)\n self.assertEqual(obs_start_end, [(4, 8), (1, 4)])\n\n obs_msa, obs_score, obs_start_end = local_pairwise_align_protein(\n Protein(\"HEAGAWGHEE\"), Protein(\"PAWHEAE\"), gap_open_penalty=5.,\n gap_extend_penalty=0.5)\n\n self.assertEqual(obs_msa, TabularMSA([Protein(\"AWGHE-E\"),\n Protein(\"AW-HEAE\")]))\n self.assertEqual(obs_score, 32.0)\n self.assertEqual(obs_start_end, [(4, 9), (1, 6)])\n\n # Protein sequences with metadata\n obs_msa, obs_score, obs_start_end = local_pairwise_align_protein(\n Protein(\"HEAGAWGHEE\", metadata={'id': \"s1\"}),\n Protein(\"PAWHEAE\", metadata={'id': \"s2\"}),\n gap_open_penalty=10., gap_extend_penalty=5.)\n\n self.assertEqual(obs_msa, TabularMSA([Protein(\"AWGHE\"),\n Protein(\"AW-HE\")]))\n self.assertEqual(obs_score, 26.0)\n self.assertEqual(obs_start_end, [(4, 8), (1, 4)])\n\n # Fails when either input is passed as a TabularMSA\n self.assertRaises(TypeError, local_pairwise_align_protein,\n TabularMSA([Protein(\"HEAGAWGHEE\",\n metadata={'id': \"s1\"})]),\n Protein(\"PAWHEAE\", metadata={'id': \"s2\"}),\n gap_open_penalty=10.,\n gap_extend_penalty=5.)\n self.assertRaises(TypeError, local_pairwise_align_protein,\n Protein(\"HEAGAWGHEE\", metadata={'id': \"s1\"}),\n TabularMSA([Protein(\"PAWHEAE\",\n metadata={'id': \"s2\"})]),\n gap_open_penalty=10., gap_extend_penalty=5.)\n\n # TypeError on invalid input\n self.assertRaises(TypeError, local_pairwise_align_protein,\n 42, Protein(\"HEAGAWGHEE\"))\n self.assertRaises(TypeError, local_pairwise_align_protein,\n Protein(\"HEAGAWGHEE\"), 42)\n\n def test_global_pairwise_align_nucleotide(self):\n obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(\n DNA(\"GACCTTGACCAGGTACC\"), DNA(\"GAACTTTGACGTAAC\"),\n gap_open_penalty=5., gap_extend_penalty=0.5, match_score=5,\n mismatch_score=-4)\n\n self.assertEqual(obs_msa, TabularMSA([DNA(\"G-ACCTTGACCAGGTACC\"),\n DNA(\"GAACTTTGAC---GTAAC\")]))\n self.assertEqual(obs_score, 41.0)\n self.assertEqual(obs_start_end, [(0, 16), (0, 14)])\n\n obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(\n DNA(\"GACCTTGACCAGGTACC\"), DNA(\"GAACTTTGACGTAAC\"),\n gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,\n mismatch_score=-4)\n\n self.assertEqual(obs_msa, TabularMSA([DNA(\"-GACCTTGACCAGGTACC\"),\n DNA(\"GAACTTTGAC---GTAAC\")]))\n self.assertEqual(obs_score, 32.0)\n self.assertEqual(obs_start_end, [(0, 16), (0, 14)])\n\n # DNA sequences with metadata\n obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(\n DNA(\"GACCTTGACCAGGTACC\", metadata={'id': \"s1\"}),\n DNA(\"GAACTTTGACGTAAC\", metadata={'id': \"s2\"}),\n gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,\n mismatch_score=-4)\n\n self.assertEqual(obs_msa, TabularMSA([DNA(\"-GACCTTGACCAGGTACC\"),\n DNA(\"GAACTTTGAC---GTAAC\")]))\n self.assertEqual(obs_score, 32.0)\n self.assertEqual(obs_start_end, [(0, 16), (0, 14)])\n\n # Align one DNA sequence and one TabularMSA, score computed manually\n obs_msa, obs_score, obs_start_end = global_pairwise_align_nucleotide(\n TabularMSA([DNA(\"GACCTTGACCAGGTACC\", metadata={'id': \"s1\"}),\n DNA(\"GACCATGACCAGGTACC\", metadata={'id': \"s2\"})]),\n DNA(\"GAACTTTGACGTAAC\", metadata={'id': \"s3\"}),\n gap_open_penalty=10., gap_extend_penalty=0.5, match_score=5,\n mismatch_score=-4)\n\n self.assertEqual(obs_msa, TabularMSA([DNA(\"-GACCTTGACCAGGTACC\"),\n DNA(\"-GACCATGACCAGGTACC\"),\n DNA(\"GAACTTTGAC---GTAAC\")]))\n self.assertEqual(obs_score, 27.5)\n self.assertEqual(obs_start_end, [(0, 16), (0, 14)])\n\n # TypeError on invalid input\n self.assertRaises(TypeError, global_pairwise_align_nucleotide,\n 42, DNA(\"ACGT\"))\n self.assertRaises(TypeError, global_pairwise_align_nucleotide,\n DNA(\"ACGT\"), 42)\n\n def test_global_pairwise_align_nucleotide_invalid_dtype(self):\n with self.assertRaisesRegex(TypeError,\n \"TabularMSA with DNA or RNA dtype.*dtype \"\n \"'Protein'\"):\n global_pairwise_align_nucleotide(TabularMSA([DNA('ACGT')]),\n TabularMSA([Protein('PAW')]))\n\n def test_local_pairwise_align_nucleotide(self):\n obs_msa, obs_score, obs_start_end = local_pairwise_align_nucleotide(\n DNA(\"GACCTTGACCAGGTACC\"), DNA(\"GAACTTTGACGTAAC\"),\n gap_open_penalty=5., gap_extend_penalty=0.5, match_score=5,\n mismatch_score=-4)\n\n self.assertEqual(obs_msa, TabularMSA([DNA(\"ACCTTGACCAGGTACC\"),\n DNA(\"ACTTTGAC---GTAAC\")]))\n self.assertEqual(obs_score, 41.0)\n self.assertEqual(obs_start_end, [(1, 16), (2, 14)])\n\n obs_msa, obs_score, obs_start_end = local_pairwise_align_nucleotide(\n DNA(\"GACCTTGACCAGGTACC\"), DNA(\"GAACTTTGACGTAAC\"),\n gap_open_penalty=10., gap_extend_penalty=5., match_score=5,\n mismatch_score=-4)\n\n self.assertEqual(obs_msa, TabularMSA([DNA(\"ACCTTGAC\"),\n DNA(\"ACTTTGAC\")]))\n self.assertEqual(obs_score, 31.0)\n self.assertEqual(obs_start_end, [(1, 8), (2, 9)])\n\n # DNA sequences with metadata\n obs_msa, obs_score, obs_start_end = local_pairwise_align_nucleotide(\n DNA(\"GACCTTGACCAGGTACC\", metadata={'id': \"s1\"}),\n DNA(\"GAACTTTGACGTAAC\", metadata={'id': \"s2\"}),\n gap_open_penalty=10., gap_extend_penalty=5., match_score=5,\n mismatch_score=-4)\n\n self.assertEqual(obs_msa, TabularMSA([DNA(\"ACCTTGAC\"),\n DNA(\"ACTTTGAC\")]))\n self.assertEqual(obs_score, 31.0)\n self.assertEqual(obs_start_end, [(1, 8), (2, 9)])\n\n # Fails when either input is passed as a TabularMSA\n self.assertRaises(TypeError, local_pairwise_align_nucleotide,\n TabularMSA([DNA(\"GACCTTGACCAGGTACC\",\n metadata={'id': \"s1\"})]),\n DNA(\"GAACTTTGACGTAAC\", metadata={'id': \"s2\"}),\n gap_open_penalty=10., gap_extend_penalty=5.,\n match_score=5, mismatch_score=-4)\n self.assertRaises(TypeError, local_pairwise_align_nucleotide,\n DNA(\"GACCTTGACCAGGTACC\", metadata={'id': \"s1\"}),\n TabularMSA([DNA(\"GAACTTTGACGTAAC\",\n metadata={'id': \"s2\"})]),\n gap_open_penalty=10., gap_extend_penalty=5.,\n match_score=5, mismatch_score=-4)\n\n # TypeError on invalid input\n self.assertRaises(TypeError, local_pairwise_align_nucleotide,\n 42, DNA(\"ACGT\"))\n self.assertRaises(TypeError, local_pairwise_align_nucleotide,\n DNA(\"ACGT\"), 42)\n\n def test_nucleotide_aligners_use_substitution_matrices(self):\n alt_sub = make_identity_substitution_matrix(10, -10)\n # alternate substitution matrix yields different alignment (the\n # aligned sequences and the scores are different) with local alignment\n msa_no_sub, score_no_sub, start_end_no_sub = \\\n local_pairwise_align_nucleotide(\n DNA(\"GACCTTGACCAGGTACC\"), DNA(\"GAACTTTGACGTAAC\"),\n gap_open_penalty=10., gap_extend_penalty=5., match_score=5,\n mismatch_score=-4)\n\n msa_alt_sub, score_alt_sub, start_end_alt_sub = \\\n local_pairwise_align_nucleotide(\n DNA(\"GACCTTGACCAGGTACC\"), DNA(\"GAACTTTGACGTAAC\"),\n gap_open_penalty=10., gap_extend_penalty=5., match_score=5,\n mismatch_score=-4, substitution_matrix=alt_sub)\n\n self.assertNotEqual(msa_no_sub, msa_alt_sub)\n self.assertNotEqual(score_no_sub, score_alt_sub)\n self.assertNotEqual(start_end_no_sub, start_end_alt_sub)\n\n # alternate substitution matrix yields different alignment (the\n # aligned sequences and the scores are different) with global alignment\n msa_no_sub, score_no_sub, start_end_no_sub = \\\n global_pairwise_align_nucleotide(\n DNA(\"GACCTTGACCAGGTACC\"), DNA(\"GAACTTTGACGTAAC\"),\n gap_open_penalty=10., gap_extend_penalty=5., match_score=5,\n mismatch_score=-4)\n\n msa_alt_sub, score_alt_sub, start_end_alt_sub = \\\n global_pairwise_align_nucleotide(\n DNA(\"GACCTTGACCAGGTACC\"), DNA(\"GAACTTTGACGTAAC\"),\n gap_open_penalty=10., gap_extend_penalty=5., match_score=5,\n mismatch_score=-4, substitution_matrix=alt_sub)\n\n self.assertNotEqual(msa_no_sub, msa_alt_sub)\n self.assertNotEqual(score_no_sub, score_alt_sub)\n self.assertEqual(start_end_no_sub, start_end_alt_sub)\n\n def test_local_pairwise_align_invalid_type(self):\n with self.assertRaisesRegex(TypeError,\n 'GrammaredSequence.*Sequence'):\n local_pairwise_align(DNA('ACGT'), Sequence('ACGT'), 1.0, 1.0, {})\n\n def test_local_pairwise_align_type_mismatch(self):\n with self.assertRaisesRegex(TypeError,\n \"same type: 'DNA' != 'RNA'\"):\n local_pairwise_align(DNA('ACGT'), RNA('ACGU'), 1.0, 1.0, {})\n\n def test_init_matrices_sw(self):\n expected_score_m = np.zeros((5, 4))\n expected_tback_m = [[0, 0, 0, 0],\n [0, -1, -1, -1],\n [0, -1, -1, -1],\n [0, -1, -1, -1],\n [0, -1, -1, -1]]\n actual_score_m, actual_tback_m = _init_matrices_sw(\n TabularMSA([DNA('AAA', metadata={'id': 'id'})]),\n TabularMSA([DNA('AAAA', metadata={'id': 'id'})]), 5, 2)\n np.testing.assert_array_equal(actual_score_m, expected_score_m)\n np.testing.assert_array_equal(actual_tback_m, expected_tback_m)\n\n def test_init_matrices_nw(self):\n expected_score_m = [[0, -5, -7, -9],\n [-5, 0, 0, 0],\n [-7, 0, 0, 0],\n [-9, 0, 0, 0],\n [-11, 0, 0, 0]]\n expected_tback_m = [[0, 3, 3, 3],\n [2, -1, -1, -1],\n [2, -1, -1, -1],\n [2, -1, -1, -1],\n [2, -1, -1, -1]]\n actual_score_m, actual_tback_m = _init_matrices_nw(\n TabularMSA([DNA('AAA', metadata={'id': 'id'})]),\n TabularMSA([DNA('AAAA', metadata={'id': 'id'})]), 5, 2)\n np.testing.assert_array_equal(actual_score_m, expected_score_m)\n np.testing.assert_array_equal(actual_tback_m, expected_tback_m)\n\n def test_compute_substitution_score(self):\n # these results were computed manually\n subs_m = make_identity_substitution_matrix(5, -4)\n gap_chars = set('-.')\n\n self.assertEqual(\n _compute_substitution_score(['A'], ['A'], subs_m, 0, gap_chars),\n 5.0)\n self.assertEqual(\n _compute_substitution_score(['A', 'A'], ['A'], subs_m, 0,\n gap_chars),\n 5.0)\n self.assertEqual(\n _compute_substitution_score(['A', 'C'], ['A'], subs_m, 0,\n gap_chars),\n 0.5)\n self.assertEqual(\n _compute_substitution_score(['A', 'C'], ['A', 'C'], subs_m, 0,\n gap_chars),\n 0.5)\n self.assertEqual(\n _compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 0,\n gap_chars),\n 2.5)\n self.assertEqual(\n _compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 1,\n gap_chars),\n 3)\n\n # alt subs_m\n subs_m = make_identity_substitution_matrix(1, -2)\n\n self.assertEqual(\n _compute_substitution_score(['A', 'A'], ['A', '-'], subs_m, 0,\n gap_chars),\n 0.5)\n\n def test_compute_score_and_traceback_matrices(self):\n # these results were computed manually\n expected_score_m = [[0, -5, -7, -9],\n [-5, 2, -3, -5],\n [-7, -3, 4, -1],\n [-9, -5, -1, 6],\n [-11, -7, -3, 1]]\n expected_tback_m = [[0, 3, 3, 3],\n [2, 1, 3, 3],\n [2, 2, 1, 3],\n [2, 2, 2, 1],\n [2, 2, 2, 2]]\n m = make_identity_substitution_matrix(2, -1)\n actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(\n TabularMSA([DNA('ACG', metadata={'id': 'id'})]),\n TabularMSA([DNA('ACGT', metadata={'id': 'id'})]), 5, 2, m)\n np.testing.assert_array_equal(actual_score_m, expected_score_m)\n np.testing.assert_array_equal(actual_tback_m, expected_tback_m)\n\n # different sequences\n # these results were computed manually\n expected_score_m = [[0, -5, -7, -9],\n [-5, 2, -3, -5],\n [-7, -3, 4, -1],\n [-9, -5, -1, 3],\n [-11, -7, -3, -2]]\n expected_tback_m = [[0, 3, 3, 3],\n [2, 1, 3, 3],\n [2, 2, 1, 3],\n [2, 2, 2, 1],\n [2, 2, 2, 1]]\n m = make_identity_substitution_matrix(2, -1)\n actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(\n TabularMSA([DNA('ACC', metadata={'id': 'id'})]),\n TabularMSA([DNA('ACGT', metadata={'id': 'id'})]), 5, 2, m)\n np.testing.assert_array_equal(actual_score_m, expected_score_m)\n np.testing.assert_array_equal(actual_tback_m, expected_tback_m)\n\n # four sequences provided in two alignments\n # these results were computed manually\n expected_score_m = [[0, -5, -7, -9],\n [-5, 2, -3, -5],\n [-7, -3, 4, -1],\n [-9, -5, -1, 3],\n [-11, -7, -3, -2]]\n expected_tback_m = [[0, 3, 3, 3],\n [2, 1, 3, 3],\n [2, 2, 1, 3],\n [2, 2, 2, 1],\n [2, 2, 2, 1]]\n m = make_identity_substitution_matrix(2, -1)\n actual_score_m, actual_tback_m = _compute_score_and_traceback_matrices(\n TabularMSA([DNA('ACC', metadata={'id': 's1'}),\n DNA('ACC', metadata={'id': 's2'})]),\n TabularMSA([DNA('ACGT', metadata={'id': 's3'}),\n DNA('ACGT', metadata={'id': 's4'})]), 5, 2, m)\n np.testing.assert_array_equal(actual_score_m, expected_score_m)\n np.testing.assert_array_equal(actual_tback_m, expected_tback_m)\n\n def test_compute_score_and_traceback_matrices_invalid(self):\n # if the sequence contains a character that is not in the\n # substitution matrix, an informative error should be raised\n m = make_identity_substitution_matrix(2, -1)\n self.assertRaises(ValueError, _compute_score_and_traceback_matrices,\n TabularMSA([DNA('AWG', metadata={'id': 'id'})]),\n TabularMSA([DNA('ACGT', metadata={'id': 'id'})]),\n 5, 2, m)\n\n def test_traceback(self):\n score_m = [[0, -5, -7, -9],\n [-5, 2, -3, -5],\n [-7, -3, 4, -1],\n [-9, -5, -1, 6],\n [-11, -7, -3, 1]]\n score_m = np.array(score_m)\n tback_m = [[0, 3, 3, 3],\n [2, 1, 3, 3],\n [2, 2, 1, 3],\n [2, 2, 2, 1],\n [2, 2, 2, 2]]\n tback_m = np.array(tback_m)\n # start at bottom-right\n expected = ([DNA(\"ACG-\")], [DNA(\"ACGT\")], 1, 0, 0)\n actual = _traceback(tback_m, score_m,\n TabularMSA([DNA('ACG', metadata={'id': ''})]),\n TabularMSA([DNA('ACGT', metadata={'id': ''})]),\n 4, 3)\n self.assertEqual(actual, expected)\n\n # four sequences in two alignments\n score_m = [[0, -5, -7, -9],\n [-5, 2, -3, -5],\n [-7, -3, 4, -1],\n [-9, -5, -1, 6],\n [-11, -7, -3, 1]]\n score_m = np.array(score_m)\n tback_m = [[0, 3, 3, 3],\n [2, 1, 3, 3],\n [2, 2, 1, 3],\n [2, 2, 2, 1],\n [2, 2, 2, 2]]\n tback_m = np.array(tback_m)\n # start at bottom-right\n expected = ([DNA(\"ACG-\"),\n DNA(\"ACG-\")],\n [DNA(\"ACGT\"),\n DNA(\"ACGT\")],\n 1, 0, 0)\n actual = _traceback(tback_m, score_m,\n TabularMSA([DNA('ACG', metadata={'id': 's1'}),\n DNA('ACG', metadata={'id': 's2'})]),\n TabularMSA([DNA('ACGT', metadata={'id': 's3'}),\n DNA('ACGT', metadata={'id': 's4'})]),\n 4, 3)\n self.assertEqual(actual, expected)\n\n # start at highest-score\n expected = ([DNA(\"ACG\")],\n [DNA(\"ACG\")], 6, 0, 0)\n actual = _traceback(tback_m, score_m,\n TabularMSA([DNA('ACG', metadata={'id': ''})]),\n TabularMSA([DNA('ACGT', metadata={'id': ''})]),\n 3, 3)\n self.assertEqual(actual, expected)\n\n # terminate traceback before top-right\n tback_m = [[0, 3, 3, 3],\n [2, 1, 3, 3],\n [2, 2, 0, 3],\n [2, 2, 2, 1],\n [2, 2, 2, 2]]\n tback_m = np.array(tback_m)\n expected = ([DNA(\"G\")],\n [DNA(\"G\")], 6, 2, 2)\n actual = _traceback(tback_m, score_m,\n TabularMSA([DNA('ACG', metadata={'id': ''})]),\n TabularMSA([DNA('ACGT', metadata={'id': ''})]),\n 3, 3)\n self.assertEqual(actual, expected)\n\n def test_first_largest(self):\n l = [(5, 'a'), (5, 'b'), (5, 'c')]\n self.assertEqual(_first_largest(l), (5, 'a'))\n l = [(5, 'c'), (5, 'b'), (5, 'a')]\n self.assertEqual(_first_largest(l), (5, 'c'))\n l = [(5, 'c'), (6, 'b'), (5, 'a')]\n self.assertEqual(_first_largest(l), (6, 'b'))\n # works for more than three entries\n l = [(5, 'c'), (6, 'b'), (5, 'a'), (7, 'd')]\n self.assertEqual(_first_largest(l), (7, 'd'))\n # Note that max([(5, 'a'), (5, 'c')]) == max([(5, 'c'), (5, 'a')])\n # but for the purposes needed here, we want the max to be the same\n # regardless of what the second item in the tuple is.\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.DataFrame", "numpy.array", "numpy.arange" ], [ "numpy.array", "numpy.testing.assert_array_equal", "numpy.zeros" ] ]
ISM-Weimar/DeepEnergyMethods
[ "3a51131e0827446bf5986d698aaac396c7fa5037" ]
[ "tf2/Poisson2D_Dirichlet_SinCos.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nPoisson equation example\nSolve the equation -\\Delta u(x) = f(x) for x\\in\\Omega with Dirichlet boundary conditions u(x)=u0 for x\\in\\partial\\Omega\n@author: cosmin\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport time\nfrom utils.tfp_loss import tfp_function_factory\nfrom utils.Geom_examples import Quadrilateral\nfrom utils.Solvers import Poisson2D_coll\nimport tensorflow_probability as tfp\nimport matplotlib.pyplot as plt\n#make figures bigger on HiDPI monitors\nimport matplotlib as mpl\nmpl.rcParams['figure.dpi'] = 200\ntf.random.set_seed(42)\n \nif __name__ == \"__main__\":\n \n #define the RHS function f(x)\n kx = 1\n ky = 1\n def rhs_fun(x,y):\n f = (kx**2+ky**2)*np.pi**2*np.sin(kx*np.pi*x)*np.sin(ky*np.pi*y)\n return f\n \n def exact_sol(x,y):\n u = np.sin(kx*np.pi*x)*np.sin(ky*np.pi*y)\n return u\n \n def deriv_exact_sol(x,y):\n du = kx*np.pi*np.cos(kx*np.pi*x)*np.sin(ky*np.pi*y)\n dv = ky*np.pi*np.sin(kx*np.pi*x)*np.cos(ky*np.pi*y)\n return du, dv\n \n #define the input and output data set\n xmin = 0\n xmax = 1\n ymin = 0\n ymax = 1\n domainCorners = np.array([[xmin,ymin], [xmin,ymax], [xmax,ymin], [xmax,ymax]])\n myQuad = Quadrilateral(domainCorners)\n\n numPtsU = 28\n numPtsV = 28\n xPhys, yPhys = myQuad.getUnifIntPts(numPtsU, numPtsV, [0,0,0,0])\n data_type = \"float32\"\n \n Xint = np.concatenate((xPhys,yPhys),axis=1).astype(data_type)\n Yint = rhs_fun(Xint[:,[0]], Xint[:,[1]])\n \n xPhysBnd, yPhysBnd, _, _s = myQuad.getUnifEdgePts(numPtsU, numPtsV, [1,1,1,1])\n Xbnd = np.concatenate((xPhysBnd, yPhysBnd), axis=1).astype(data_type)\n Ybnd = exact_sol(Xbnd[:,[0]], Xbnd[:,[1]])\n \n #define the model \n tf.keras.backend.set_floatx(data_type)\n l1 = tf.keras.layers.Dense(20, \"tanh\")\n l2 = tf.keras.layers.Dense(20, \"tanh\")\n l3 = tf.keras.layers.Dense(20, \"tanh\")\n l4 = tf.keras.layers.Dense(1, None)\n train_op = tf.keras.optimizers.Adam()\n num_epoch = 5000\n print_epoch = 100\n pred_model = Poisson2D_coll([l1, l2, l3, l4], train_op, num_epoch, print_epoch)\n \n #convert the training data to tensors\n Xint_tf = tf.convert_to_tensor(Xint)\n Yint_tf = tf.convert_to_tensor(Yint)\n Xbnd_tf = tf.convert_to_tensor(Xbnd)\n Ybnd_tf = tf.convert_to_tensor(Ybnd)\n \n #training\n print(\"Training (ADAM)...\")\n t0 = time.time()\n pred_model.network_learn(Xint_tf, Yint_tf, Xbnd_tf, Ybnd_tf)\n t1 = time.time()\n print(\"Time taken (ADAM)\", t1-t0, \"seconds\")\n print(\"Training (LBFGS)...\")\n \n loss_func = tfp_function_factory(pred_model, Xint_tf, Yint_tf, Xbnd_tf, Ybnd_tf)\n #loss_func = scipy_function_factory(pred_model, Xint_tf, Yint_tf, Xbnd_tf, Ybnd_tf)\n # convert initial model parameters to a 1D tf.Tensor\n init_params = tf.dynamic_stitch(loss_func.idx, pred_model.trainable_variables)#.numpy()\n # train the model with L-BFGS solver\n results = tfp.optimizer.lbfgs_minimize(\n value_and_gradients_function=loss_func, initial_position=init_params,\n max_iterations=1500, num_correction_pairs=50, tolerance=1e-14) \n # results = scipy.optimize.minimize(fun=loss_func, x0=init_params, jac=True, method='L-BFGS-B',\n # options={'disp': None, 'maxls': 50, 'iprint': -1, \n # 'gtol': 1e-12, 'eps': 1e-12, 'maxiter': 50000, 'ftol': 1e-12, \n # 'maxcor': 50, 'maxfun': 50000})\n # after training, the final optimized parameters are still in results.position\n # so we have to manually put them back to the model\n loss_func.assign_new_model_parameters(results.position)\n #loss_func.assign_new_model_parameters(results.x)\n t2 = time.time()\n print(\"Time taken (LBFGS)\", t2-t1, \"seconds\")\n print(\"Time taken (all)\", t2-t0, \"seconds\")\n print(\"Testing...\")\n numPtsUTest = 2*numPtsU\n numPtsVTest = 2*numPtsV\n xPhysTest, yPhysTest = myQuad.getUnifIntPts(numPtsUTest, numPtsVTest, [1,1,1,1])\n XTest = np.concatenate((xPhysTest,yPhysTest),axis=1).astype(data_type)\n XTest_tf = tf.convert_to_tensor(XTest)\n YTest = pred_model(XTest_tf).numpy() \n YExact = exact_sol(XTest[:,[0]], XTest[:,[1]])\n\n xPhysTest2D = np.resize(XTest[:,0], [numPtsUTest, numPtsVTest])\n yPhysTest2D = np.resize(XTest[:,1], [numPtsUTest, numPtsVTest])\n YExact2D = np.resize(YExact, [numPtsUTest, numPtsVTest])\n YTest2D = np.resize(YTest, [numPtsUTest, numPtsVTest])\n plt.contourf(xPhysTest2D, yPhysTest2D, YExact2D, 255, cmap=plt.cm.jet)\n plt.colorbar()\n plt.title(\"Exact solution\")\n plt.show()\n plt.contourf(xPhysTest2D, yPhysTest2D, YTest2D, 255, cmap=plt.cm.jet)\n plt.colorbar()\n plt.title(\"Computed solution\")\n plt.show()\n plt.contourf(xPhysTest2D, yPhysTest2D, YExact2D-YTest2D, 255, cmap=plt.cm.jet)\n plt.colorbar()\n plt.title(\"Error: U_exact-U_computed\")\n plt.show()\n \n err = YExact - YTest\n print(\"L2-error norm: {}\".format(np.linalg.norm(err)/np.linalg.norm(YTest)))\n \n " ]
[ [ "numpy.concatenate", "tensorflow.keras.backend.set_floatx", "numpy.array", "tensorflow.convert_to_tensor", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.contourf", "numpy.sin", "numpy.linalg.norm", "tensorflow.random.set_seed", "matplotlib.pyplot.title", "tensorflow.keras.layers.Dense", "numpy.resize", "numpy.cos", "matplotlib.pyplot.show", "tensorflow.keras.optimizers.Adam", "tensorflow.dynamic_stitch" ] ]
nmallinar/EigenPro-pytorch
[ "43b4b8d92665b490f5773339a4288c92a111a2e1" ]
[ "mnist.py" ]
[ "import keras\nimport numpy as np\n\nfrom keras.datasets.mnist import load_data\n\n\ndef unit_range_normalize(samples):\n\tmin_vals = np.min(samples, axis=0)\n\tmax_vals = np.max(samples, axis=0)\n\tdiff = max_vals - min_vals\n\tdiff[diff <= 0.0] = np.maximum(1.0, min_vals[diff <= 0.0])\n\tnormalized = (samples - min_vals) / diff\n\treturn normalized\n\n\ndef load():\n # input image dimensions\n n_class = 10\n img_rows, img_cols = 28, 28\n\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = load_data()\n\n x_train = x_train.reshape(x_train.shape[0], img_rows * img_cols)\n x_test = x_test.reshape(x_test.shape[0], img_rows * img_cols)\n x_train = x_train.astype('float32') / 255\n x_test = x_test.astype('float32') / 255\n\n x_train = unit_range_normalize(x_train)\n x_test = unit_range_normalize(x_test)\n y_train = keras.utils.np_utils.to_categorical(y_train, n_class)\n y_test = keras.utils.np_utils.to_categorical(y_test, n_class)\n print(\"Load MNIST dataset.\")\n print(x_train.shape[0], 'train samples')\n print(x_test.shape[0], 'test samples')\n\n return (x_train, y_train), (x_test, y_test)\n" ]
[ [ "numpy.max", "numpy.min", "numpy.maximum" ] ]
MIR-MU/sr
[ "fe4c3aff1d1646201ca58cff705032dc7dc0a64b" ]
[ "generate_header_and_model.py" ]
[ "try:\n import tensorflow.compat.v1 as tf\nexcept ImportError:\n import tensorflow as tf\nimport numpy as np\nimport argparse\nimport os\nimport enum\nfrom models.model_espcn import ESPCN\nfrom models.model_srcnn import SRCNN\nfrom models.model_vespcn import VESPCN\nfrom models.model_vsrnet import VSRnet\nfrom collections import OrderedDict\n\n@enum.unique\nclass Padding(enum.Enum):\n Valid = 0\n Same = 1\n Same_clamp_to_edge = 2\n\ndef get_arguments():\n parser = argparse.ArgumentParser(description='generate c header with model weights and binary model file')\n parser.add_argument('--model', type=str, default='srcnn', choices=['srcnn', 'espcn', 'vespcn', 'vsrnet'],\n help='What model to use for generation')\n parser.add_argument('--output_folder', type=str, default='./',\n help='where to put generated files')\n parser.add_argument('--ckpt_path', default=None,\n help='Path to the model checkpoint, from which weights are loaded')\n parser.add_argument('--use_mc', action='store_true',\n help='Whether motion compensation is used in video super resolution model')\n parser.add_argument('--scale_factor', type=int, default=2, choices=[2, 3, 4],\n help='What scale factor was used for chosen model')\n\n return parser.parse_args()\n\n\ndef dump_to_file(file, values, name):\n file.write('\\nstatic const float ' + name + '[] = {\\n')\n\n values_flatten = values.flatten()\n\n max_len = 0\n for value in values_flatten:\n if len(str(value)) > max_len:\n max_len = len(str(value))\n\n counter = 0\n for i in range(len(values_flatten)):\n counter += 1\n if counter == 4:\n file.write(str(values_flatten[i]) + 'f')\n if i != len(values_flatten) - 1:\n file.write(',')\n file.write('\\n')\n counter = 0\n else:\n if counter == 1:\n file.write(' ')\n file.write(str(values_flatten[i]) + 'f')\n if i != len(values_flatten) - 1:\n file.write(',')\n file.write(' ' * (1 + max_len - len(str(values_flatten[i]))))\n if counter != 0:\n file.write('\\n')\n file.write('};\\n')\n\n file.write('\\nstatic const long int ' + name + '_dims[] = {\\n')\n for i in range(len(values.shape)):\n file.write(' ')\n file.write(str(values.shape[i]))\n if i != len(values.shape) - 1:\n file.write(',\\n')\n file.write('\\n};\\n')\n\n\ndef write_conv_layer(kernel, bias, dilation_rate, padding, activation, model_file):\n kernel = np.transpose(kernel, [3, 0, 1, 2])\n np.array([1, dilation_rate, padding.value, activation, kernel.shape[3], kernel.shape[0], kernel.shape[1]], dtype=np.uint32).tofile(model_file)\n kernel.tofile(model_file)\n bias.tofile(model_file)\n\n\ndef write_depth_to_space_layer(block_size, model_file):\n np.array([2, block_size], dtype=np.uint32).tofile(model_file)\n\n\ndef prepare_native_mf_srcnn(weights, model_file):\n np.array([3], dtype=np.uint32).tofile(model_file)\n write_conv_layer(weights['srcnn/conv1/kernel:0'], weights['srcnn/conv1/bias:0'], 1, Padding.Same_clamp_to_edge, 0, model_file)\n write_conv_layer(weights['srcnn/conv2/kernel:0'], weights['srcnn/conv2/bias:0'], 1, Padding.Same_clamp_to_edge, 0, model_file)\n write_conv_layer(weights['srcnn/conv3/kernel:0'], weights['srcnn/conv3/bias:0'], 1, Padding.Same_clamp_to_edge, 0, model_file)\n\n\ndef prepare_native_mf_espcn(weights, model_file, scale_factor):\n np.array([4], dtype=np.uint32).tofile(model_file)\n write_conv_layer(weights['espcn/conv1/kernel:0'], weights['espcn/conv1/bias:0'], 1, Padding.Same_clamp_to_edge, 1, model_file)\n write_conv_layer(weights['espcn/conv2/kernel:0'], weights['espcn/conv2/bias:0'], 1, Padding.Same_clamp_to_edge, 1, model_file)\n write_conv_layer(weights['espcn/conv3/kernel:0'], weights['espcn/conv3/bias:0'], 1, Padding.Same_clamp_to_edge, 2, model_file)\n write_depth_to_space_layer(scale_factor, model_file)\n\n\ndef prepare_native_mf_vespcn(weights, model_file, scale_factor):\n np.array([6], dtype=np.uint32).tofile(model_file)\n write_conv_layer(weights['vespcn/conv1/kernel:0'], weights['vespcn/conv1/bias:0'], 1, Padding.Same_clamp_to_edge, 0, model_file)\n write_conv_layer(weights['vespcn/conv2/kernel:0'], weights['vespcn/conv2/bias:0'], 1, Padding.Same_clamp_to_edge, 0, model_file)\n write_conv_layer(weights['vespcn/conv3/kernel:0'], weights['vespcn/conv3/bias:0'], 1, Padding.Same_clamp_to_edge, 0, model_file)\n write_conv_layer(weights['vespcn/conv4/kernel:0'], weights['vespcn/conv4/bias:0'], 1, Padding.Same_clamp_to_edge, 0, model_file)\n write_conv_layer(weights['vespcn/conv5/kernel:0'], weights['vespcn/conv5/bias:0'], 1, Padding.Same_clamp_to_edge, 0, model_file)\n write_depth_to_space_layer(scale_factor, model_file)\n\n\ndef prepare_native_mf_vsrnet(weights, model_file):\n np.array([3], dtype=np.uint32).tofile(model_file)\n write_conv_layer(weights['vsrnet/conv1/kernel:0'], weights['vsrnet/conv1/bias:0'], 1, Padding.Same_clamp_to_edge, 0, model_file)\n write_conv_layer(weights['vsrnet/conv2/kernel:0'], weights['vsrnet/conv2/bias:0'], 1, Padding.Same_clamp_to_edge, 0, model_file)\n write_conv_layer(weights['vsrnet/conv3/kernel:0'], weights['vsrnet/conv3/bias:0'], 1, Padding.Same_clamp_to_edge, 0, model_file)\n\n\ndef main():\n args = get_arguments()\n\n if not os.path.exists(args.output_folder):\n os.mkdir(args.output_folder)\n\n if args.ckpt_path is None:\n print(\"Path to the checkpoint file was not provided\")\n exit(1)\n\n if args.model == 'srcnn':\n model = SRCNN(args)\n elif args.model == 'espcn':\n model = ESPCN(args)\n elif args.model == 'vespcn':\n model = VESPCN(args)\n elif args.model == 'vsrnet':\n model = VSRnet(args)\n else:\n exit(1)\n\n with tf.Session() as sess:\n input_ph = model.get_placeholder()\n predicted = model.load_model(input_ph)\n\n if args.model == 'vespcn':\n predicted = predicted[2]\n predicted = tf.identity(predicted, name='y')\n\n if os.path.isdir(args.ckpt_path):\n args.ckpt_path = tf.train.latest_checkpoint(args.ckpt_path)\n saver = tf.train.Saver()\n saver.restore(sess, args.ckpt_path)\n\n with open(os.path.join(args.output_folder, args.model + '.model'), 'wb') as native_mf:\n weights = model.get_model_weights(sess)\n if args.model == 'srcnn':\n prepare_native_mf_srcnn(weights, native_mf)\n elif args.model == 'espcn':\n prepare_native_mf_espcn(weights, native_mf, args.scale_factor)\n elif args.model == 'vespcn':\n prepare_native_mf_vespcn(weights, native_mf, args.scale_factor)\n elif args.model == 'vsrnet':\n prepare_native_mf_vsrnet(weights, native_mf)\n\n with open(os.path.join(args.output_folder, 'dnn_' + args.model + '.h'), 'w') as header:\n header.write('/**\\n')\n header.write(' * @file\\n')\n header.write(' * Default cnn weights for x' + str(args.scale_factor) + ' upscaling with ' +\n args.model + ' model.\\n')\n header.write(' */\\n\\n')\n\n header.write('#ifndef AVFILTER_DNN_' + args.model.upper() + '_H\\n')\n header.write('#define AVFILTER_DNN_' + args.model.upper() + '_H\\n')\n\n variables = tf.trainable_variables()\n var_dict = OrderedDict()\n for variable in variables:\n var_name = variable.name.split(':')[0].replace('/', '_')\n value = variable.eval()\n if 'kernel' in var_name:\n value = np.transpose(value, axes=(3, 0, 1, 2))\n var_dict[var_name] = value\n\n for name, value in var_dict.items():\n dump_to_file(header, value, name)\n\n header.write('#endif\\n')\n\n output_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ['y'])\n tf.train.write_graph(output_graph_def, args.output_folder, args.model + '.pb', as_text=False)\n\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "tensorflow.trainable_variables", "numpy.array", "tensorflow.train.latest_checkpoint", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.graph_util.convert_variables_to_constants", "numpy.transpose", "tensorflow.train.write_graph", "tensorflow.identity" ] ]
ArnovanHilten/NVFlare
[ "bb45e7d606849c6bc8f7542347459c6ba1be00c4" ]
[ "nvflare/private/fed/utils/numproto.py" ]
[ "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"NumPy ndarray to protobuf serialization and deserialization.\"\"\"\n\nfrom io import BytesIO\n\nimport numpy as np\n\nfrom nvflare.private.fed.protos.federated_pb2 import NDArray\n\n\ndef ndarray_to_proto(nda: np.ndarray) -> NDArray:\n \"\"\"Serializes a numpy array into an NDArray protobuf message.\n\n Args:\n nda (np.ndarray): numpy array to serialize.\n\n Returns:\n Returns an NDArray protobuf message.\n \"\"\"\n nda_bytes = BytesIO()\n np.save(nda_bytes, nda, allow_pickle=False)\n\n return NDArray(ndarray=nda_bytes.getvalue())\n\n\ndef proto_to_ndarray(nda_proto: NDArray) -> np.ndarray:\n \"\"\"Deserializes an NDArray protobuf message into a numpy array.\n\n Args:\n nda_proto (NDArray): NDArray protobuf message to deserialize.\n\n Returns:\n Returns a numpy.ndarray.\n \"\"\"\n nda_bytes = BytesIO(nda_proto.ndarray)\n\n return np.load(nda_bytes, allow_pickle=False)\n\n\ndef bytes_to_proto(data: bytes) -> NDArray:\n \"\"\"Serializes a bytes into an NDArray protobuf message.\n\n Args:\n data : bytes data\n\n Returns:\n Returns an NDArray protobuf message.\n \"\"\"\n if not isinstance(data, bytes):\n raise TypeError(\"data must be bytes but got {}\".format(type(data)))\n return NDArray(ndarray=data)\n\n\ndef proto_to_bytes(nda_proto: NDArray) -> bytes:\n \"\"\"Deserializes an NDArray protobuf message into bytes.\n\n Args:\n nda_proto (NDArray): bytes.\n\n Returns:\n Returns bytes.\n \"\"\"\n nda_bytes = BytesIO(nda_proto.ndarray)\n\n return nda_bytes.read()\n" ]
[ [ "numpy.load", "numpy.save" ] ]