repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
bkm009/captcha-it
|
[
"bf9875d909cf0ae59aab7c1e92988e2d90b79d9a"
] |
[
"Captcha/Captcha/CaptchaGenerator.py"
] |
[
"import numpy as np\nimport cv2\nfrom random import randint, uniform\nimport base64\n\nW = 80\nH = 80\n\nVERTICAL_LAYER = 2\nHORIZONTAL_LAYER = 3\n\nfont = cv2.FONT_ITALIC\n\nINPUT_CHOICES = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',\n 'U', 'V', 'W', 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n\n\nclass Captcha:\n\n def generate_captcha(self):\n try:\n GENERATED_CHOICES = []\n ans = \"\"\n root_layer = np.full((H * VERTICAL_LAYER, W * HORIZONTAL_LAYER, 3), fill_value=[255, 255, 255],\n dtype=np.uint8)\n\n for x in range(VERTICAL_LAYER):\n for y in range(HORIZONTAL_LAYER):\n R, G, B = randint(0, 255), randint(0, 255), randint(0, 255)\n\n generated_index = randint(0, len(INPUT_CHOICES) - 1)\n gi = 0\n while generated_index in GENERATED_CHOICES and gi < len(INPUT_CHOICES):\n generated_index = randint(0, len(INPUT_CHOICES) - 1)\n gi += 1\n\n GENERATED_CHOICES.append(generated_index)\n input_text = INPUT_CHOICES[generated_index]\n ans += input_text\n\n temp_image = np.full((H, W, 3), fill_value=[R, G, B], dtype=np.uint8)\n font_scale = uniform(1.2, 2.0)\n cv2.putText(temp_image, str(input_text), (10, 55), font, font_scale, (255 - R, 255 - G, 255 - B), 4)\n\n if randint(2, 9) % 3 == 0 and (255 - R, 255 - G, 255 - B) != (0, 0, 0):\n kernel = np.ones((2, 2), np.uint8)\n temp_image = cv2.morphologyEx(temp_image, cv2.MORPH_GRADIENT, kernel)\n\n root_layer[x * H:(x + 1) * H, y * W:(y + 1) * W] = temp_image\n\n r, buffer = cv2.imencode('.jpg', root_layer)\n return {\"data\": 'data:image/jpeg;base64, ' + base64.b64encode(buffer).decode(), \"answer\": ans }\n\n\n except Exception as e:\n return \"Exception as {}\".format(e)\n"
] |
[
[
"numpy.ones",
"numpy.full"
]
] |
sharmapulkit/few-shot-domain-adaptation-by-causal-mechanism-transfer
|
[
"05b4cab288dbb2ad7e30bbd174c22beb39d5c4cd"
] |
[
"causal_da/components/inn_torch/glow_nonexponential.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n#import inn_torch.layers as layers\n#from inn_torch.wrappers import SequentialFlow\n\nfrom .wrappers import SequentialFlow\nfrom . import layers as layers\n\n# Type hinting\nfrom torch import FloatTensor\n\n\nclass GlowNonExponential(nn.Module):\n \"\"\"A Glow model based on the exponential-activation-free affine coupling layers.\"\"\"\n def __init__(self, depth: int, dim: int, n_hidden: int = 20):\n \"\"\"\n Parameters:\n depth: The number of the (invertible linear, affine coupling) flow layer pairs to stack.\n dim: The dimension of the input data.\n n_hidden: The number of the hidden units of the parametrization of $s$ and $t$ in each affine coupling layer (the number of layers is fixed at one-hidden-layer).\n\n Note:\n Since we employ invertible linear layers, we do not require\n dimension-swap layers in between the affine coupling layers.\n \"\"\"\n super().__init__()\n chain = []\n D, d = dim, dim // 2\n chain.append(layers.ActNorm(dim))\n for _ in range(depth):\n chain.append(layers.InvertibleLinear(dim))\n chain.append(\n layers.NonexponentialAffineCouplingLayer(\n d, _NN(d, n_hidden, D - d)))\n self.net = SequentialFlow(chain)\n\n def forward(self, x: FloatTensor) -> FloatTensor:\n \"\"\"Perform forward propagation.\n\n Parameters:\n x: input tensor.\n \"\"\"\n # x = x.to('cuda')\n return self.net(x)\n\n def inv(self, x: FloatTensor) -> FloatTensor:\n \"\"\"Perform the inverse computation in a batch.\n\n Parameters:\n x: input tensor.\n \"\"\"\n return self.net.inv(x)\n\n def randomize_weights(self):\n \"\"\"Perform random initialization of the trainable parameters.\"\"\"\n for net in self.net.chain:\n if isinstance(net, layers.AffineCouplingLayer):\n net.net.randomize_weights()\n return self\n\n\nclass _NN(nn.Module):\n \"\"\"A utility neural network model (one-hidden-layer network) for affine coupling layers.\"\"\"\n def __init__(self,\n n_input: int,\n n_hidden: int,\n n_out: int,\n scale: bool = False):\n \"\"\"\n Parameters:\n n_input: the input dimension.\n n_hidden: the number of the hidden units.\n n_out: the output dimension.\n scale: whether to train an extra coefficient on the output of $s$.\n \"\"\"\n super().__init__()\n self.fc1 = nn.Linear(n_input, n_hidden)\n self.fc_s = nn.Linear(n_hidden, n_out)\n self.fc_t = nn.Linear(n_hidden, n_out)\n\n nn.init.constant_(self.fc1.weight, 0.0)\n nn.init.constant_(self.fc_s.weight, 0.0)\n nn.init.constant_(self.fc_t.weight, 0.0)\n\n self.scale = scale\n if self.scale:\n self.scaler = nn.Parameter(torch.FloatTensor(1, n_out))\n nn.init.constant_(self.scaler, 1.0)\n\n def forward(self, x: FloatTensor) -> FloatTensor:\n \"\"\"Perform forward propagation.\n\n Parameters:\n x: input tensor.\n \"\"\"\n # print(\"X Device::\", x.device)\n x = x.to('cuda')\n hidden = F.relu(self.fc1(x))\n if self.scale:\n s = self.scaler * torch.tanh(self.fc_s(hidden))\n else:\n s = torch.tanh(self.fc_s(hidden))\n t = self.fc_t(hidden)\n return s, t\n\n def randomize_weights(self):\n \"\"\"Perform random initialization of the trainable parameters.\"\"\"\n nn.init.normal_(self.fc1.weight, 0, 1. / self.fc1.weight.shape[1])\n nn.init.normal_(self.fc_s.weight, 0, 1. / self.fc_s.weight.shape[1])\n nn.init.normal_(self.fc_t.weight, 0, 1. / self.fc_t.weight.shape[1])\n if self.scale:\n nn.init.uniform_(self.scaler)\n"
] |
[
[
"torch.nn.init.uniform_",
"torch.nn.init.constant_",
"torch.nn.Linear",
"torch.FloatTensor",
"torch.nn.init.normal_"
]
] |
TiagoDaFonseca/lazypredict-docker
|
[
"3574bc6d1a2a442a0914b581f8522607b5cc7baf"
] |
[
"modules/utils.py"
] |
[
"from sklearn.model_selection import train_test_split, cross_val_score\nfrom lazypredict.Supervised import LazyRegressor, LazyClassifier\nfrom scipy.signal import savgol_filter\nimport pandas as pd\nimport numpy as np\nimport logging\nimport os\n\nlogging.basicConfig(level=logging.INFO)\n\npath_to_data = os.path.join(os.path.curdir, \"data\")\n\ndef detect_dataset() -> pd.DataFrame:\n logging.info('Searching for datasets')\n files = os.listdir(path_to_data)\n if len(files) > 1:\n logging.warning('Please put only one dataset in folder.')\n \n # Chooses the first file in list\n filepath = files[0]\n \n dataframe = None\n if filepath.split('.')[1] == 'csv':\n dataframe = pd.read_csv(os.path.join(path_to_data, filepath))\n else:\n try:\n dataframe = pd.read_excel(os.path.join(path_to_data, filepath), engine='openpyxl')\n except Exception as e:\n logging.error(str(e))\n\n return dataframe\n\ndef load_data(data: pd.DataFrame) -> np.ndarray:\n \"\"\" returns the X and y data in numpy format\"\"\"\n logging.info('Splitting data into X and y')\n # Ensures that the trainning runs smoothly. Keep in mind lazypredict is just an exploratory step\n # No need to use all the available data\n if data.shape[0]>50000:\n sample = data.sample(frac=0.2) # Shuffles data automatically\n else:\n sample = data\n \n # Edit code is necessary\n X = sample.drop(['Brix'], axis=1).to_numpy().astype(np.float32)\n y = sample['Brix'].to_numpy().astype(np.float32)\n return X, y\n\ndef preprocess_data(input_data: np.ndarray) -> np.ndarray:\n \"\"\" Put your pre-processing algorithms here \"\"\"\n logging.info('Preprocessing data')\n Xgolay = savgol_filter(input_data, window_length=11, polyorder=2, deriv=1)\n return Xgolay\n \ndef run_models(X : np.ndarray, y: np.ndarray, test_size=0.25):\n \"\"\" Runs all the models using lazypredict module \"\"\"\n logging.info('Training data...')\n # Prepare data for trainning\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n\n # Train for classification\n reg = LazyRegressor(verbose=0,ignore_warnings=False, custom_metric=None)\n models,predictions = reg.fit(X_train, X_test, y_train, y_test)\n\n return models, predictions"
] |
[
[
"sklearn.model_selection.train_test_split",
"scipy.signal.savgol_filter"
]
] |
howardyclo/maskrcnn-benchmark
|
[
"64a98927eee33b1b62223e7782537ceaf8418ea7"
] |
[
"demo/predictor.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport cv2\nimport torch\nfrom torchvision import transforms as T\n\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.structures.image_list import to_image_list\nfrom maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker\nfrom maskrcnn_benchmark import layers as L\nfrom maskrcnn_benchmark.utils import cv2_util\n\n\nclass COCODemo(object):\n # COCO categories for pretty print\n CATEGORIES = [\n \"__background\",\n \"person\",\n \"bicycle\",\n \"car\",\n \"motorcycle\",\n \"airplane\",\n \"bus\",\n \"train\",\n \"truck\",\n \"boat\",\n \"traffic light\",\n \"fire hydrant\",\n \"stop sign\",\n \"parking meter\",\n \"bench\",\n \"bird\",\n \"cat\",\n \"dog\",\n \"horse\",\n \"sheep\",\n \"cow\",\n \"elephant\",\n \"bear\",\n \"zebra\",\n \"giraffe\",\n \"backpack\",\n \"umbrella\",\n \"handbag\",\n \"tie\",\n \"suitcase\",\n \"frisbee\",\n \"skis\",\n \"snowboard\",\n \"sports ball\",\n \"kite\",\n \"baseball bat\",\n \"baseball glove\",\n \"skateboard\",\n \"surfboard\",\n \"tennis racket\",\n \"bottle\",\n \"wine glass\",\n \"cup\",\n \"fork\",\n \"knife\",\n \"spoon\",\n \"bowl\",\n \"banana\",\n \"apple\",\n \"sandwich\",\n \"orange\",\n \"broccoli\",\n \"carrot\",\n \"hot dog\",\n \"pizza\",\n \"donut\",\n \"cake\",\n \"chair\",\n \"couch\",\n \"potted plant\",\n \"bed\",\n \"dining table\",\n \"toilet\",\n \"tv\",\n \"laptop\",\n \"mouse\",\n \"remote\",\n \"keyboard\",\n \"cell phone\",\n \"microwave\",\n \"oven\",\n \"toaster\",\n \"sink\",\n \"refrigerator\",\n \"book\",\n \"clock\",\n \"vase\",\n \"scissors\",\n \"teddy bear\",\n \"hair drier\",\n \"toothbrush\",\n ]\n\n def __init__(\n self,\n cfg,\n confidence_threshold=0.7,\n show_mask_heatmaps=False,\n masks_per_dim=2,\n min_image_size=224,\n ):\n self.cfg = cfg.clone()\n self.model = build_detection_model(cfg)\n self.model.eval()\n self.device = torch.device(cfg.MODEL.DEVICE)\n self.model.to(self.device)\n self.min_image_size = min_image_size\n\n save_dir = cfg.OUTPUT_DIR\n checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)\n _ = checkpointer.load(cfg.MODEL.WEIGHT)\n\n self.transforms = self.build_transform()\n\n mask_threshold = -1 if show_mask_heatmaps else 0.5\n self.masker = Masker(threshold=mask_threshold, padding=1)\n\n # used to make colors for each class\n self.palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n\n self.cpu_device = torch.device(\"cpu\")\n self.confidence_threshold = confidence_threshold\n self.show_mask_heatmaps = show_mask_heatmaps\n self.masks_per_dim = masks_per_dim\n\n def build_transform(self):\n \"\"\"\n Creates a basic transformation that was used to train the models\n \"\"\"\n cfg = self.cfg\n\n # we are loading images with OpenCV, so we don't need to convert them\n # to BGR, they are already! So all we need to do is to normalize\n # by 255 if we want to convert to BGR255 format, or flip the channels\n # if we want it to be in RGB in [0-1] range.\n if cfg.INPUT.TO_BGR255:\n to_bgr_transform = T.Lambda(lambda x: x * 255)\n else:\n to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])\n\n normalize_transform = T.Normalize(\n mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD\n )\n\n transform = T.Compose(\n [\n T.ToPILImage(),\n T.Resize(self.min_image_size),\n T.ToTensor(),\n to_bgr_transform,\n normalize_transform,\n ]\n )\n return transform\n\n def run_on_opencv_image(self, image, return_predictions=False):\n \"\"\"\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n\n Returns:\n prediction (BoxList): the detected objects. Additional information\n of the detection properties can be found in the fields of\n the BoxList via `prediction.fields()`\n \"\"\"\n predictions = self.compute_prediction(image)\n top_predictions = self.select_top_predictions(predictions)\n\n result = image.copy()\n if self.show_mask_heatmaps:\n return self.create_mask_montage(result, top_predictions)\n result = self.overlay_boxes(result, top_predictions)\n if self.cfg.MODEL.MASK_ON:\n result = self.overlay_mask(result, top_predictions)\n if self.cfg.MODEL.KEYPOINT_ON:\n result = self.overlay_keypoints(result, top_predictions)\n result = self.overlay_class_names(result, top_predictions)\n\n if return_predictions:\n return result, top_predictions\n else:\n return result\n \n def compute_prediction(self, original_image):\n \"\"\"\n Arguments:\n original_image (np.ndarray): an image as returned by OpenCV\n\n Returns:\n prediction (BoxList): the detected objects. Additional information\n of the detection properties can be found in the fields of\n the BoxList via `prediction.fields()`\n \"\"\"\n # apply pre-processing to image\n image = self.transforms(original_image)\n # convert to an ImageList, padded so that it is divisible by\n # cfg.DATALOADER.SIZE_DIVISIBILITY\n image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)\n image_list = image_list.to(self.device)\n # compute predictions\n with torch.no_grad():\n predictions = self.model(image_list)\n predictions = [o.to(self.cpu_device) for o in predictions]\n\n # always single image is passed at a time\n prediction = predictions[0]\n\n # reshape prediction (a BoxList) into the original image size\n height, width = original_image.shape[:-1]\n prediction = prediction.resize((width, height))\n\n if prediction.has_field(\"mask\"):\n # if we have masks, paste the masks in the right position\n # in the image, as defined by the bounding boxes\n masks = prediction.get_field(\"mask\")\n # always single image is passed at a time\n masks = self.masker([masks], [prediction])[0]\n prediction.add_field(\"mask\", masks)\n return prediction\n\n def select_top_predictions(self, predictions):\n \"\"\"\n Select only predictions which have a `score` > self.confidence_threshold,\n and returns the predictions in descending order of score\n\n Arguments:\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `scores`.\n\n Returns:\n prediction (BoxList): the detected objects. Additional information\n of the detection properties can be found in the fields of\n the BoxList via `prediction.fields()`\n \"\"\"\n scores = predictions.get_field(\"scores\")\n keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)\n predictions = predictions[keep]\n scores = predictions.get_field(\"scores\")\n _, idx = scores.sort(0, descending=True)\n return predictions[idx]\n\n def compute_colors_for_labels(self, labels):\n \"\"\"\n Simple function that adds fixed colors depending on the class\n \"\"\"\n colors = labels[:, None] * self.palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n return colors\n\n def overlay_boxes(self, image, predictions):\n \"\"\"\n Adds the predicted boxes on top of the image\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `labels`.\n \"\"\"\n labels = predictions.get_field(\"labels\")\n boxes = predictions.bbox\n\n colors = self.compute_colors_for_labels(labels).tolist()\n\n for box, color in zip(boxes, colors):\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 1\n )\n\n return image\n\n def overlay_mask(self, image, predictions):\n \"\"\"\n Adds the instances contours for each predicted object.\n Each label has a different color.\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `mask` and `labels`.\n \"\"\"\n masks = predictions.get_field(\"mask\").numpy()\n labels = predictions.get_field(\"labels\")\n\n colors = self.compute_colors_for_labels(labels).tolist()\n\n for mask, color in zip(masks, colors):\n thresh = mask[0, :, :, None]\n contours, hierarchy = cv2_util.findContours(\n thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n )\n image = cv2.drawContours(image, contours, -1, color, 3)\n\n composite = image\n\n return composite\n\n def overlay_keypoints(self, image, predictions):\n keypoints = predictions.get_field(\"keypoints\")\n kps = keypoints.keypoints\n scores = keypoints.get_field(\"logits\")\n kps = torch.cat((kps[:, :, 0:2], scores[:, :, None]), dim=2).numpy()\n for region in kps:\n image = vis_keypoints(image, region.transpose((1, 0)))\n return image\n\n def create_mask_montage(self, image, predictions):\n \"\"\"\n Create a montage showing the probability heatmaps for each one one of the\n detected objects\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `mask`.\n \"\"\"\n masks = predictions.get_field(\"mask\")\n masks_per_dim = self.masks_per_dim\n masks = L.interpolate(\n masks.float(), scale_factor=1 / masks_per_dim\n ).byte()\n height, width = masks.shape[-2:]\n max_masks = masks_per_dim ** 2\n masks = masks[:max_masks]\n # handle case where we have less detections than max_masks\n if len(masks) < max_masks:\n masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8)\n masks_padded[: len(masks)] = masks\n masks = masks_padded\n masks = masks.reshape(masks_per_dim, masks_per_dim, height, width)\n result = torch.zeros(\n (masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8\n )\n for y in range(masks_per_dim):\n start_y = y * height\n end_y = (y + 1) * height\n for x in range(masks_per_dim):\n start_x = x * width\n end_x = (x + 1) * width\n result[start_y:end_y, start_x:end_x] = masks[y, x]\n return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)\n\n def overlay_class_names(self, image, predictions):\n \"\"\"\n Adds detected class names and scores in the positions defined by the\n top-left corner of the predicted bounding box\n\n Arguments:\n image (np.ndarray): an image as returned by OpenCV\n predictions (BoxList): the result of the computation by the model.\n It should contain the field `scores` and `labels`.\n \"\"\"\n scores = predictions.get_field(\"scores\").tolist()\n labels = predictions.get_field(\"labels\").tolist()\n labels = [self.CATEGORIES[i] for i in labels]\n boxes = predictions.bbox\n\n template = \"{}: {:.2f}\"\n for box, score, label in zip(boxes, scores, labels):\n x, y = box[:2]\n s = template.format(label, score)\n cv2.putText(\n image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1\n )\n\n return image\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom maskrcnn_benchmark.structures.keypoint import PersonKeypoints\n\ndef vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):\n \"\"\"Visualizes keypoints (adapted from vis_one_image).\n kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).\n \"\"\"\n dataset_keypoints = PersonKeypoints.NAMES\n kp_lines = PersonKeypoints.CONNECTIONS\n\n # Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.\n cmap = plt.get_cmap('rainbow')\n colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]\n colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]\n\n # Perform the drawing on a copy of the image, to allow for blending.\n kp_mask = np.copy(img)\n\n # Draw mid shoulder / mid hip first for better visualization.\n mid_shoulder = (\n kps[:2, dataset_keypoints.index('right_shoulder')] +\n kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0\n sc_mid_shoulder = np.minimum(\n kps[2, dataset_keypoints.index('right_shoulder')],\n kps[2, dataset_keypoints.index('left_shoulder')])\n mid_hip = (\n kps[:2, dataset_keypoints.index('right_hip')] +\n kps[:2, dataset_keypoints.index('left_hip')]) / 2.0\n sc_mid_hip = np.minimum(\n kps[2, dataset_keypoints.index('right_hip')],\n kps[2, dataset_keypoints.index('left_hip')])\n nose_idx = dataset_keypoints.index('nose')\n if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:\n cv2.line(\n kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),\n color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)\n if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:\n cv2.line(\n kp_mask, tuple(mid_shoulder), tuple(mid_hip),\n color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)\n\n # Draw the keypoints.\n for l in range(len(kp_lines)):\n i1 = kp_lines[l][0]\n i2 = kp_lines[l][1]\n p1 = kps[0, i1], kps[1, i1]\n p2 = kps[0, i2], kps[1, i2]\n if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:\n cv2.line(\n kp_mask, p1, p2,\n color=colors[l], thickness=2, lineType=cv2.LINE_AA)\n if kps[2, i1] > kp_thresh:\n cv2.circle(\n kp_mask, p1,\n radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n if kps[2, i2] > kp_thresh:\n cv2.circle(\n kp_mask, p2,\n radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)\n\n # Blend the keypoints.\n return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0)\n"
] |
[
[
"torch.zeros",
"torch.cat",
"matplotlib.pyplot.get_cmap",
"torch.tensor",
"numpy.copy",
"torch.no_grad",
"torch.nonzero",
"torch.device"
]
] |
nicholas512/seabird
|
[
"23073b2b9a550b86ec155cbe43be9b50e50b8310"
] |
[
"seabird/utils.py"
] |
[
"import os\nimport re\nimport logging\nimport pkg_resources\nimport json\n\n# import codecs\n\nfrom seabird.exceptions import CNVError\n\nmodule_logger = logging.getLogger('seabird.utils')\n\n\ndef make_file_list(inputdir, inputpattern=\".*\\.cnv\"):\n \"\"\" Search inputdir recursively for inputpattern\n \"\"\"\n inputfiles = []\n for dirpath, dirnames, filenames in os.walk(inputdir):\n for filename in filenames:\n if re.match(inputpattern, filename):\n inputfiles.append(os.path.join(dirpath, filename))\n inputfiles.sort()\n return inputfiles\n\n\ndef basic_logger(logger=None):\n if logger is not None:\n assert type(logger) is logging.Logger\n else:\n # create logger\n logger = logging.getLogger('CNV logger')\n logger.setLevel(logging.DEBUG)\n\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # create formatter\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # add formatter to ch\n ch.setFormatter(formatter)\n\n # add ch to logger\n logger.addHandler(ch)\n\n return logger\n\n\ndef press2depth(press, latitude):\n \"\"\" calculate depth from pressure\n http://www.seabird.com/application_notes/AN69.htm\n\n ATENTION, move it to fluid.\n \"\"\"\n import numpy as np\n x = np.sin((np.pi/180) * latitude / 57.29578)**2\n g = 9.780318 * (1.0 + (5.2788e-3 + 2.36e-5 * x) * x) + 1.092e-6 * press\n depth = -((((-1.82e-15 * press + 2.279e-10) * press - 2.2512e-5) *\n press + 9.72659) * press) / g\n return depth\n\n\ndef load_rule(raw_text):\n \"\"\" Load the adequate rules to parse the data\n\n It should try all available rules, one by one, and use the one\n which fits.\n \"\"\"\n rules_dir = 'rules'\n rule_files = pkg_resources.resource_listdir(__name__, rules_dir)\n rule_files = [f for f in rule_files if re.match('^cnv.*\\.json$', f)]\n for rule_file in rule_files:\n text = pkg_resources.resource_string(\n __name__, os.path.join(rules_dir, rule_file))\n rule = json.loads(text.decode('utf-8'), encoding=\"utf-8\")\n # Should I load using codec, for UTF8?? Do I need it?\n # f = codecs.open(rule_file, 'r', 'utf-8')\n # rule = yaml.load(f.read())\n\n # Transitioning for the new rules concept for regexp.\n if 'sep' in rule:\n r = rule['header'] + rule['sep'] + rule['data']\n else:\n r = \"(?P<header> \" + rule['header'] + \")\" + \\\n \"(?P<data> (?:\" + rule['data'] + \")+)\"\n content_re = re.compile(r, re.VERBOSE)\n if re.search(r, raw_text, re.VERBOSE):\n #logging.debug(\"Using rules from: %s\" % rule_file)\n #self.rule = rule\n parsed = content_re.search(raw_text).groupdict()\n return rule, parsed\n\n # If haven't returned a rule by this point, raise an exception.\n #logging.error(\"No rules able to parse it\")\n raise CNVError(tag='noparsingrule')\n\n\ndef seabird_dir(subdir=None):\n \"\"\"Return the local support/config directory\n\n Returns a local directory used to store testing data. The default path\n (~/.config/seabird) can be overwritten by the environment variable\n SEABIRD_DIR.\n \"\"\"\n spath = os.getenv('SEABIRD_DIR', '~/.config/seabird')\n return os.path.expanduser(spath).replace('/', os.path.sep)\n\n\ndef sampledata():\n try:\n import supportdata\n except:\n print(\"Missing package supportdata. Try:\\npip install supportdata\")\n\n data_path = os.path.join(seabird_dir(), 'data')\n if not os.path.isdir(data_path):\n os.makedirs(data_path)\n\n src = 'https://raw.githubusercontent.com/castelao/seabird/dev/sampledata'\n files = [\n ['CTD', 'PIRA001.cnv', '5ded777144300b63c8775b1d7f033f92'],\n ['CTD', 'dPIRX003.cnv', '4b941b902a3aea7d99e1cf4c78c51877'],\n ['CTD', 'Hotin.cnv', '814dc769c0775327bbe5b0f489dfb571'],\n ['CTD', 'sta0860.cnv', '1c788c4d9b82b527ebf0c2fb9200600e'],\n ['CTD', 'missing_whitespace.cnv',\n 'c1f00cebb5f00f6aaebc316bac3fd86a'],\n #['CTD', 'laurynas.cnv', '6f188d53ac2d7aaaf4ce69c0e5c514ec'],\n ['TSG', 'TSG_PIR_001.cnv', '2950ccb9f77e0802557b011c63d2e39b'],\n ['TSG', 'TSG_PIR_010.cnv', 'd87cea33bfe37e22dc8e563f77cbf307'],\n ['btl', 'MI18MHDR.btl', '775f2a6c6585f1cffb0038111580e5a1'],\n ]\n\n for f in files:\n supportdata.download_file(\n os.path.join(data_path, f[0]),\n os.path.join(src, f[0], f[1]),\n f[1], f[2])\n"
] |
[
[
"numpy.sin"
]
] |
CeadeS/PyTorchH5Dataset
|
[
"9ee6e49f2a780345abd708abf2e0c47bb5475e0a"
] |
[
"pytorch_h5dataset/dataset/imageDataset.py"
] |
[
"from .metaDataset import H5MetaDataset\nfrom torch import device, version, cat, jit, nn, stack, as_tensor\nfrom torchvision.transforms import Resize\nimport warnings\nfrom ..fn import image\nfrom functools import partial\nfrom pytorch_h5dataset.fn.transforms import Transform\n\n\n\nclass ImageDataset(H5MetaDataset):\n\n\n def __getitem__(self, sub_batch_idx):\n sample_reference, meta = super(ImageDataset, self).__getitem__(sub_batch_idx=sub_batch_idx)\n sample = self.image_transforms(sample_reference[()]) ## [read sample from disk] and transform\n \n if self.tensor_transforms is not None:\n sample = self.tensor_transforms(sample)\n\n return sample, meta\n\n @staticmethod\n def convert_samples_to_dataset(dataset_dataframe,\n dataset_destination_h5_file='./data/test_dataset.h5',\n sub_batch_size=50, max_n_group= 10):\n\n ImageDataset._convert_samples_to_dataset(dataset_dataframe=dataset_dataframe,\n dataset_destination_h5_file=dataset_destination_h5_file,\n sub_batch_size=sub_batch_size, data_mode='image',\n max_n_group=max_n_group)\n\n def __init__(self,\n dataset_name='dataset_name',\n dataset_root='/path/to/dataset',\n split_mode = 'full',\n split_ratio = 1.0,\n split_number = 0,\n tr_crop_strategy = None,\n tr_crop_size = None,\n tr_crop_area_ratio_range = None,\n tr_output_size = None,\n tr_random_rotation_angles = None,\n tr_random_flip = None,\n tr_random_scale_range = None,\n decode = None, ## None, cpu, cuda\n output_device: device = device('cpu'), #cpu or cuda\n tensor_transforms = None,\n quality=83\n ):\n decode = None if decode is None else device(decode)\n assert decode is not None or all(a is None for a in [tr_crop_strategy,\n tr_crop_size,\n tr_crop_area_ratio_range,\n tr_output_size,\n tr_random_rotation_angles,\n tr_random_flip,\n tr_random_scale_range]), \"If image preprocessing is activated, a decode device must be specified\"\n\n assert tr_crop_strategy in ['random', 'center', None]\n\n output_device = device(output_device)\n\n super(ImageDataset, self).__init__(dataset_name, dataset_root, split_mode, split_ratio, split_number)\n assert self.data_dtype == str(bytes)\n\n if float(version.cuda) < 11.6 and decode is not None and decode.type == 'cuda':\n s = str(f\"Function not available for cuda version {version.cuda} is < 11.6, using cpu instead\")\n warnings.warn(s)\n decode = device('cpu')\n\n #JPEG Transforms\n\n transforms = []\n if tr_random_scale_range is not None:\n assert isinstance(tr_random_scale_range, tuple) and len(tr_random_scale_range) < 3 \\\n or isinstance(tr_random_scale_range, float)\n transforms.append(partial(image.ImageInterface.random_scale, scale_range= tr_random_scale_range, quality=quality))\n\n if tr_crop_strategy is not None:\n assert tr_crop_strategy.lower() in ['random', 'center']\n crop_function = image.ImageInterface.get_random_crop_function(\n random_location=tr_crop_strategy.lower() == 'random',\n crop_size =tr_crop_size,\n crop_area_ratio_range = tr_crop_area_ratio_range)\n transforms.append(crop_function)\n\n if tr_output_size is not None:\n transforms.append(partial(image.ImageInterface.scale, heights = tr_output_size[0], widths = tr_output_size[1]))\n\n if tr_random_rotation_angles is not None:\n assert isinstance(tr_random_rotation_angles, tuple) and all((a in [-90,0,90,180,270]) for a in tr_random_rotation_angles)\n transforms.append(partial(image.ImageInterface.random_rotation, angles = tr_random_rotation_angles))\n\n if tr_random_flip is not None:\n if 'v' in tr_random_flip.lower():\n transforms.append(image.ImageInterface.random_v_flip)\n if 'h' in tr_random_flip.lower():\n transforms.append(image.ImageInterface.random_h_flip)\n\n if decode is not None:\n if 'cuda' == decode.type:\n transforms.append(partial(image.ImageInterface.sub_batch_as_tensor, device=device(decode)))\n transforms.append(partial(image.ImageInterface.sub_batch_decode, device=device(decode)))\n\n\n\n if tr_output_size is not None and decode is not None:\n transforms.append(partial(image.ImageInterface.scale_torch, heights = tr_output_size[0], widths = tr_output_size[1]))\n transforms.append(stack)\n transforms.append(partial(as_tensor, device=device(output_device)))\n\n elif output_device.type == 'cuda' or decode is not None or tensor_transforms is not None:\n transforms.append(partial(image.ImageInterface.sub_batch_as_tensor, device=device(output_device)))\n\n\n\n\n\n\n\n self.image_transforms = Transform(transforms=transforms)\n self.tensor_transforms = tensor_transforms\n\n"
] |
[
[
"torch.device"
]
] |
anandmoghan/natural-language-query
|
[
"0ebdf61e76ad0cfd5789c23b98f94e6ce5893a58"
] |
[
"src/models/embedding.py"
] |
[
"import numpy as np\nimport torch\nimport torch.nn as nn\n\nimport constants.main_constants as const\n\n\nclass EmbeddingLayer(nn.Module):\n def forward(self, *input):\n pass\n\n def __init__(self, emb_size=const.EMBEDDING_SIZE, gpu=False, token_to_index=None, token_weights=None):\n super(EmbeddingLayer, self).__init__()\n self.gpu = gpu\n self.emb_size = emb_size\n self.token_to_index = token_to_index\n\n self.total_unique_tokens = len(token_to_index)\n self.embedding = nn.Embedding(self.total_unique_tokens, emb_size)\n self.embedding.weight.data.copy_(torch.from_numpy(token_weights.astype(np.float32)))\n\n def get_embedding(self, input):\n return self.embedding(input)\n\n# TODO: Make embedding batch functions\n"
] |
[
[
"torch.nn.Embedding"
]
] |
JYWa/federated
|
[
"8073bfa355f56db073349eefb0bcbafbf4e73733"
] |
[
"differential_privacy/emnist/run_federated.py"
] |
[
"# Copyright 2019, Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Trains and evaluates an EMNIST classification model with DP-FedAvg.\"\"\"\n\nimport functools\nimport os.path\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nfrom optimization.shared import optimizer_utils\nfrom utils import training_loop\nfrom utils import training_utils\nfrom utils import utils_impl\nfrom utils.datasets import emnist_dataset\nfrom utils.models import emnist_models\n\nwith utils_impl.record_hparam_flags():\n # Experiment hyperparameters\n flags.DEFINE_enum(\n 'model', 'cnn', ['cnn', '2nn'], 'Which model to use. This '\n 'can be a convolutional model (cnn) or a two hidden-layer '\n 'densely connected network (2nn).')\n flags.DEFINE_integer('client_batch_size', 20,\n 'Batch size used on the client.')\n flags.DEFINE_integer('clients_per_round', 10,\n 'How many clients to sample per round.')\n flags.DEFINE_integer(\n 'client_epochs_per_round', 1,\n 'Number of client (inner optimizer) epochs per federated round.')\n flags.DEFINE_boolean(\n 'uniform_weighting', False,\n 'Whether to weigh clients uniformly. If false, clients '\n 'are weighted by the number of samples.')\n\n # Optimizer configuration (this defines one or more flags per optimizer).\n utils_impl.define_optimizer_flags('server')\n utils_impl.define_optimizer_flags('client')\n\n # Differential privacy flags\n flags.DEFINE_float('clip', 0.05, 'Initial clip.')\n flags.DEFINE_float('noise_multiplier', None,\n 'Noise multiplier. If None, no DP is used.')\n flags.DEFINE_float('adaptive_clip_learning_rate', 0,\n 'Adaptive clip learning rate.')\n flags.DEFINE_float('target_unclipped_quantile', 0.5,\n 'Target unclipped quantile.')\n\nwith utils_impl.record_new_flags() as training_loop_flags:\n flags.DEFINE_integer('total_rounds', 200, 'Number of total training rounds.')\n flags.DEFINE_string(\n 'experiment_name', None, 'The name of this experiment. Will be append to '\n '--root_output_dir to separate experiment results.')\n flags.DEFINE_string('root_output_dir', '/tmp/differential_privacy/',\n 'Root directory for writing experiment output.')\n flags.DEFINE_integer(\n 'rounds_per_eval', 1,\n 'How often to evaluate the global model on the validation dataset.')\n flags.DEFINE_integer('rounds_per_checkpoint', 50,\n 'How often to checkpoint the global model.')\n\nFLAGS = flags.FLAGS\n\n# End of hyperparameter flags.\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Expected no command-line arguments, '\n 'got: {}'.format(argv))\n\n emnist_train, _ = emnist_dataset.get_federated_datasets(\n train_client_batch_size=FLAGS.client_batch_size,\n train_client_epochs_per_round=FLAGS.client_epochs_per_round,\n only_digits=False)\n\n _, emnist_test = emnist_dataset.get_centralized_datasets()\n\n if FLAGS.model == 'cnn':\n model_builder = functools.partial(\n emnist_models.create_conv_dropout_model, only_digits=False)\n elif FLAGS.model == '2nn':\n model_builder = functools.partial(\n emnist_models.create_two_hidden_layer_model, only_digits=False)\n else:\n raise ValueError('Cannot handle model flag [{!s}].'.format(FLAGS.model))\n\n loss_builder = tf.keras.losses.SparseCategoricalCrossentropy\n metrics_builder = lambda: [tf.keras.metrics.SparseCategoricalAccuracy()]\n\n if FLAGS.uniform_weighting:\n client_weighting = tff.learning.ClientWeighting.UNIFORM\n else:\n client_weighting = tff.learning.ClientWeighting.NUM_EXAMPLES\n\n def model_fn():\n return tff.learning.from_keras_model(\n model_builder(),\n loss_builder(),\n input_spec=emnist_test.element_spec,\n metrics=metrics_builder())\n\n if FLAGS.noise_multiplier is not None:\n if not FLAGS.uniform_weighting:\n raise ValueError(\n 'Differential privacy is only implemented for uniform weighting.')\n if FLAGS.noise_multiplier <= 0:\n raise ValueError('noise_multiplier must be positive if DP is enabled.')\n if FLAGS.clip is None or FLAGS.clip <= 0:\n raise ValueError('clip must be positive if DP is enabled.')\n\n if not FLAGS.adaptive_clip_learning_rate:\n aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_fixed(\n noise_multiplier=FLAGS.noise_multiplier,\n clients_per_round=FLAGS.clients_per_round,\n clip=FLAGS.clip)\n else:\n if FLAGS.adaptive_clip_learning_rate <= 0:\n raise ValueError('adaptive_clip_learning_rate must be positive if '\n 'adaptive clipping is enabled.')\n aggregation_factory = tff.aggregators.DifferentiallyPrivateFactory.gaussian_adaptive(\n noise_multiplier=FLAGS.noise_multiplier,\n clients_per_round=FLAGS.clients_per_round,\n initial_l2_norm_clip=FLAGS.clip,\n target_unclipped_quantile=FLAGS.target_unclipped_quantile,\n learning_rate=FLAGS.adaptive_clip_learning_rate)\n else:\n if FLAGS.uniform_weighting:\n aggregation_factory = tff.aggregators.UnweightedMeanFactory()\n else:\n aggregation_factory = tff.aggregators.MeanFactory()\n\n server_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('server')\n client_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('client')\n iterative_process = tff.learning.build_federated_averaging_process(\n model_fn=model_fn,\n server_optimizer_fn=server_optimizer_fn,\n client_weighting=client_weighting,\n client_optimizer_fn=client_optimizer_fn,\n model_update_aggregation_factory=aggregation_factory)\n\n client_datasets_fn = training_utils.build_client_datasets_fn(\n emnist_train, FLAGS.clients_per_round)\n\n evaluate_fn = training_utils.build_centralized_evaluate_fn(\n eval_dataset=emnist_test,\n model_builder=model_builder,\n loss_builder=loss_builder,\n metrics_builder=metrics_builder)\n validation_fn = lambda state, round_num: evaluate_fn(state.model)\n\n logging.info('Training model:')\n logging.info(model_builder().summary())\n\n # Log hyperparameters to CSV\n hparam_dict = utils_impl.lookup_flag_values(utils_impl.get_hparam_flags())\n results_dir = os.path.join(FLAGS.root_output_dir, 'results',\n FLAGS.experiment_name)\n utils_impl.create_directory_if_not_exists(results_dir)\n hparam_file = os.path.join(results_dir, 'hparams.csv')\n utils_impl.atomic_write_series_to_csv(hparam_dict, hparam_file)\n\n training_loop.run(\n iterative_process=iterative_process,\n client_datasets_fn=client_datasets_fn,\n validation_fn=validation_fn,\n total_rounds=FLAGS.total_rounds,\n experiment_name=FLAGS.experiment_name,\n root_output_dir=FLAGS.root_output_dir,\n rounds_per_eval=FLAGS.rounds_per_eval,\n rounds_per_checkpoint=FLAGS.rounds_per_checkpoint)\n\n\nif __name__ == '__main__':\n app.run(main)\n"
] |
[
[
"tensorflow.keras.metrics.SparseCategoricalAccuracy"
]
] |
neale/alf
|
[
"33c0ba212b1ca4f7dbb0986be695676c309d5549"
] |
[
"alf/algorithms/algorithm.py"
] |
[
"# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Algorithm base class.\"\"\"\n\nfrom absl import logging\nimport copy\nfrom collections import OrderedDict\nimport itertools\nimport json\nimport numpy as np\nimport os\nimport psutil\nimport torch\nimport torch.nn as nn\nfrom torch.nn.modules.module import _IncompatibleKeys, _addindent\n\nimport alf\nfrom alf.data_structures import AlgStep, LossInfo, StepType, TimeStep, experience_to_time_step\nfrom alf.experience_replayers.experience_replay import (\n OnetimeExperienceReplayer, SyncExperienceReplayer)\nfrom alf.utils.checkpoint_utils import is_checkpoint_enabled\nfrom alf.utils import common, dist_utils, spec_utils, summary_utils\nfrom alf.utils.summary_utils import record_time\nfrom alf.utils.math_ops import add_ignore_empty\nfrom .algorithm_interface import AlgorithmInterface\nfrom .config import TrainerConfig\nfrom .data_transformer import IdentityDataTransformer\n\n\ndef _get_optimizer_params(optimizer: torch.optim.Optimizer):\n return sum([g['params'] for g in optimizer.param_groups], [])\n\n\ndef _flatten_module(module):\n if isinstance(module, nn.ModuleList):\n return sum(map(_flatten_module, module), [])\n elif isinstance(module, nn.ModuleDict):\n return sum(map(_flatten_module, module.values()), [])\n elif isinstance(module, nn.ParameterList):\n return list(module)\n elif isinstance(module, nn.ParameterDict):\n return list(module.values())\n else:\n # `module` is an nn.Module or nn.Parameter\n return [module]\n\n\nclass Algorithm(AlgorithmInterface):\n \"\"\"Base implementation for AlgorithmInterface.\"\"\"\n\n def __init__(self,\n train_state_spec=(),\n rollout_state_spec=None,\n predict_state_spec=None,\n is_on_policy=None,\n optimizer=None,\n config: TrainerConfig = None,\n debug_summaries=False,\n name=\"Algorithm\"):\n \"\"\"Each algorithm can have a default optimimzer. By default, the parameters\n and/or modules under an algorithm are optimized by the default\n optimizer. One can also specify an optimizer for a set of parameters\n and/or modules using add_optimizer. You can find out which parameter is\n handled by which optimizer using ``get_optimizer_info()``.\n\n A requirement for this optimizer structure to work is that there is no\n algorithm which is a submodule of a non-algorithm module. Currently,\n this is not checked by the framework. It's up to the user to make sure\n this is true.\n\n Args:\n train_state_spec (nested TensorSpec): for the network state of\n ``train_step()``.\n rollout_state_spec (nested TensorSpec): for the network state of\n ``predict_step()``. If None, it's assumed to be the same as\n ``train_state_spec``.\n predict_state_spec (nested TensorSpec): for the network state of\n ``predict_step()``. If None, it's assume to be same as\n ``rollout_state_spec``.\n is_on_policy (None|bool):\n optimizer (None|Optimizer): The default optimizer for\n training. See comments above for detail.\n config (TrainerConfig): config for training. ``config`` only needs to\n be provided to the algorithm which performs a training iteration\n by itself.\n debug_summaries (bool): True if debug summaries should be created.\n name (str): name of this algorithm.\n \"\"\"\n super(Algorithm, self).__init__()\n\n self._name = name\n self._config = config\n self._proc = psutil.Process(os.getpid())\n\n self._train_state_spec = train_state_spec\n if rollout_state_spec is not None:\n self._rollout_state_spec = rollout_state_spec\n else:\n self._rollout_state_spec = self._train_state_spec\n if predict_state_spec is not None:\n self._predict_state_spec = predict_state_spec\n else:\n self._predict_state_spec = self._rollout_state_spec\n\n self._initial_train_states = {}\n self._initial_rollout_states = {}\n self._initial_predict_states = {}\n self._initial_transform_states = {}\n\n self._experience_spec = None\n self._train_info_spec = None\n self._processed_experience_spec = None\n\n if config and config.data_transformer:\n self._data_transformer = config.data_transformer\n else:\n self._data_transformer = IdentityDataTransformer()\n self._num_earliest_frames_ignored = self._data_transformer.stack_size - 1\n self._transform_state_spec = self._data_transformer.state_spec\n\n self._observers = []\n self._metrics = []\n self._exp_replayer = None\n self._exp_replayer_type = None\n\n self._use_rollout_state = False\n if config:\n self.use_rollout_state = config.use_rollout_state\n if config.temporally_independent_train_step is None:\n config.temporally_independent_train_step = (len(\n alf.nest.flatten(self.train_state_spec)) == 0)\n\n self._is_rnn = len(alf.nest.flatten(train_state_spec)) > 0\n\n self._debug_summaries = debug_summaries\n self._default_optimizer = optimizer\n self._optimizers = []\n self._opt_keys = []\n self._module_to_optimizer = {}\n self._path = ''\n if optimizer:\n self._optimizers.append(optimizer)\n self._is_on_policy = is_on_policy\n\n def forward(self, *input):\n raise RuntimeError(\"forward() should not be called\")\n\n @property\n def path(self):\n return self._path\n\n def set_path(self, path):\n self._path = path\n\n @property\n def on_policy(self):\n return self._is_on_policy\n\n def set_on_policy(self, is_on_policy):\n if self.on_policy is not None:\n assert self.on_policy == is_on_policy, (\n \"set_on_policy() can\"\n \"only be called to change is_on_policy if is_on_policy is None.\"\n )\n self._is_on_policy = is_on_policy\n\n def is_rl(self):\n \"\"\"Always returns False for non-RL algorithms.\"\"\"\n return False\n\n @property\n def name(self):\n \"\"\"The name of this algorithm.\"\"\"\n return self._name\n\n def _set_children_property(self, property_name, value):\n \"\"\"Set the property named ``property_name`` in child Algorithm to\n ``value``.\n \"\"\"\n for child in self._get_children():\n if isinstance(child, Algorithm):\n child.__setattr__(property_name, value)\n\n def need_full_rollout_state(self):\n \"\"\"Whether ``AlgStep.state`` from ``rollout_step`` should be full.\n\n If True, it means that ``rollout_step()`` should return the complete state\n for ``train_step()``.\n \"\"\"\n return self._is_rnn and self._use_rollout_state\n\n @property\n def use_rollout_state(self):\n \"\"\"If True, when off-policy training, the RNN states will be taken\n from the replay buffer; otherwise they will be set to 0.\n\n In the case of True, the ``train_state_spec`` of an algorithm should always\n be a subset of the ``rollout_state_spec``.\n \"\"\"\n return self._use_rollout_state\n\n @use_rollout_state.setter\n def use_rollout_state(self, flag):\n self._use_rollout_state = flag\n self._set_children_property('use_rollout_state', flag)\n\n def set_exp_replayer(self,\n exp_replayer: str,\n num_envs,\n max_length: int,\n prioritized_sampling=False):\n \"\"\"Set experience replayer.\n\n Args:\n exp_replayer (str): type of experience replayer. One of (\"one_time\",\n \"uniform\")\n num_envs (int): the total number of environments from all batched\n environments.\n max_length (int): the maximum number of steps the replay\n buffer store for each environment.\n prioritized_sampling (bool): Use prioritized sampling if this is True.\n \"\"\"\n assert exp_replayer in (\"one_time\", \"uniform\"), (\n \"Unsupported exp_replayer: %s\" % exp_replayer)\n self._exp_replayer_type = exp_replayer\n self._exp_replayer_num_envs = num_envs\n self._exp_replayer_length = max_length\n self._prioritized_sampling = prioritized_sampling\n\n def _set_exp_replayer(self, sample_exp):\n \"\"\"Initialize the experience replayer for the very first time given a\n sample experience which is used to infer the specs for the buffer\n initialization.\n\n Args:\n sample_exp (nested Tensor):\n \"\"\"\n self._experience_spec = dist_utils.extract_spec(sample_exp, from_dim=1)\n self._exp_contains_step_type = ('step_type' in dict(\n alf.nest.extract_fields_from_nest(sample_exp)))\n\n if self._exp_replayer_type == \"one_time\":\n self._exp_replayer = OnetimeExperienceReplayer()\n elif self._exp_replayer_type == \"uniform\":\n exp_spec = dist_utils.to_distribution_param_spec(\n self._experience_spec)\n self._exp_replayer = SyncExperienceReplayer(\n exp_spec,\n self._exp_replayer_num_envs,\n self._exp_replayer_length,\n prioritized_sampling=self._prioritized_sampling,\n num_earliest_frames_ignored=self._num_earliest_frames_ignored,\n name=\"exp_replayer\")\n else:\n raise ValueError(\"invalid experience replayer name\")\n self._observers.append(self._exp_replayer.observe)\n\n def observe_for_replay(self, exp):\n r\"\"\"Record an experience in a replay buffer.\n\n Args:\n exp (nested Tensor): exp (nested Tensor): The shape is\n :math:`[B, \\ldots]`, where :math:`B` is the batch size of the\n batched environment.\n \"\"\"\n if not self._use_rollout_state:\n exp = exp._replace(state=())\n elif id(self.rollout_state_spec) != id(self.train_state_spec):\n # Prune exp's state (rollout_state) according to the train state spec\n exp = exp._replace(\n state=alf.nest.prune_nest_like(\n exp.state, self.train_state_spec, value_to_match=()))\n\n if self._exp_replayer is None and self._exp_replayer_type:\n self._set_exp_replayer(exp)\n\n exp = dist_utils.distributions_to_params(exp)\n for observer in self._observers:\n observer(exp)\n\n def observe_for_metrics(self, time_step):\n r\"\"\"Observe a time step for recording environment metrics.\n\n Args:\n time_step (TimeStep): the current time step during ``unroll()``.\n \"\"\"\n for metric in self._metrics:\n metric(time_step)\n\n def transform_timestep(self, time_step, state):\n \"\"\"Transform time_step.\n\n ``transform_timestep`` is called for all raw time_step got from\n the environment before passing to ``predict_step`` and ``rollout_step``. For\n off-policy algorithms, the replay buffer stores raw time_step. So when\n experiences are retrieved from the replay buffer, they are tranformed by\n ``transform_timestep`` in ``OffPolicyAlgorithm`` before passing to\n ``_update()``.\n\n The transformation should be stateless. By default, only observation\n is transformed.\n\n Args:\n time_step (TimeStep or Experience): time step\n state (nested Tensor): state of the transformer(s)\n Returns:\n TimeStep or Experience: transformed time step\n \"\"\"\n return self._data_transformer.transform_timestep(time_step, state)\n\n def transform_experience(self, experience):\n \"\"\"Transform an Experience structure.\n\n This is used on the experience data retrieved from replay buffer.\n\n Args:\n experience (Experience): the experience retrieved from replay buffer.\n Note that ``experience.batch_info``, ``experience.replay_buffer``\n need to be set.\n Returns:\n Experience: transformed experience\n \"\"\"\n return self._data_transformer.transform_experience(experience)\n\n def summarize_train(self, experience, train_info, loss_info, params):\n \"\"\"Generate summaries for training & loss info after each gradient update.\n The default implementation of this function only summarizes params\n (with grads) and the loss. An algorithm can override this for additional\n summaries. See ``RLAlgorithm.summarize_train()`` for an example.\n\n Args:\n experience (nested Tensor): samples used for the most recent\n ``update_with_gradient()``. By default it's not summarized.\n train_info (nested Tensor): ``AlgStep.info`` returned by either\n ``rollout_step()`` (on-policy training) or ``train_step()``\n (off-policy training). By default it's not summarized.\n loss_info (LossInfo): loss\n params (list[Parameter]): list of parameters with gradients\n \"\"\"\n if self._config.summarize_grads_and_vars:\n summary_utils.summarize_variables(params)\n summary_utils.summarize_gradients(params)\n if self._debug_summaries:\n summary_utils.summarize_loss(loss_info)\n obs = alf.nest.find_field(experience, \"observation\")\n if len(obs) == 1:\n summary_utils.summarize_nest(\"observation\", obs[0])\n\n mem = self._proc.memory_info().rss // 1e6\n alf.summary.scalar(name='memory/cpu', data=mem)\n if torch.cuda.is_available():\n mem = torch.cuda.memory_allocated() // 1e6\n alf.summary.scalar(name='memory/gpu_allocated', data=mem)\n mem = torch.cuda.memory_reserved() // 1e6\n alf.summary.scalar(name='memory/gpu_reserved', data=mem)\n mem = torch.cuda.max_memory_allocated() // 1e6\n alf.summary.scalar(name='memory/max_gpu_allocated', data=mem)\n mem = torch.cuda.max_memory_reserved() // 1e6\n alf.summary.scalar(name='memory/max_gpu_reserved', data=mem)\n torch.cuda.reset_max_memory_allocated()\n # TODO: consider using torch.cuda.empty_cache() to save memory.\n\n def add_optimizer(self, optimizer: torch.optim.Optimizer,\n modules_and_params):\n \"\"\"Add an optimizer.\n\n Note that the modules and params contained in ``modules_and_params``\n should still be the attributes of the algorithm (i.e., they can be\n retrieved in ``self.children()`` or ``self.parameters()``).\n\n Args:\n optimizer (Optimizer): optimizer\n modules_and_params (list of Module or Parameter): The modules and\n parameters to be optimized by ``optimizer``.\n \"\"\"\n assert optimizer is not None, \"You shouldn't add a None optimizer!\"\n for module in modules_and_params:\n for m in _flatten_module(module):\n self._module_to_optimizer[m] = optimizer\n self._optimizers.append(optimizer)\n\n def _trainable_attributes_to_ignore(self):\n \"\"\"Algorithms can overwrite this function to provide which class\n member names should be ignored when getting trainable parameters, to\n avoid being assigned to the default optimizer.\n\n For example, if in your algorithm you've created a member ``self._vars``\n pointing to the parameters of a module for some purpose, you can avoid\n assigning an optimizer to ``self._vars`` (because the module will be assigned\n with one) by doing:\n\n .. code-block:: python\n\n def _trainable_attributes_to_ignore(self):\n return [\"_vars\"]\n\n Returns:\n list[str]: a list of attribute names to ignore.\n \"\"\"\n return []\n\n def _get_children(self):\n children = []\n for name, module in self.named_children():\n if name in self._trainable_attributes_to_ignore():\n continue\n children.extend(_flatten_module(module))\n\n for name, param in self.named_parameters(recurse=False):\n if name in self._trainable_attributes_to_ignore():\n continue\n children.append(param)\n\n return children\n\n @property\n def default_optimizer(self):\n \"\"\"Get the default optimizer for this algorithm.\"\"\"\n return self._default_optimizer\n\n def _assert_no_cycle_or_duplicate(self):\n visited = set()\n to_be_visited = [self]\n while to_be_visited:\n node = to_be_visited.pop(0)\n visited.add(node)\n for child in node._get_children():\n assert child not in visited, (\n \"There is a cycle or duplicate in the \"\n \"algorithm tree caused by '%s'\" % child.name)\n if isinstance(child, Algorithm):\n to_be_visited.append(child)\n\n def get_param_name(self, param):\n \"\"\"Get the name of the parameter.\n\n Returns:\n string: the name if the parameter can be found; otherwise ``None``.\n \"\"\"\n return self._param_to_name.get(param)\n\n def _setup_optimizers(self):\n \"\"\"Setup the param groups for optimizers.\n\n Returns:\n list: a list of parameters not handled by any optimizers under this\n algorithm.\n \"\"\"\n self._assert_no_cycle_or_duplicate()\n self._param_to_name = {}\n\n for name, param in self.named_parameters():\n self._param_to_name[param] = name\n\n return self._setup_optimizers_()[0]\n\n def _setup_optimizers_(self):\n \"\"\"Setup the param groups for optimizers.\n\n Returns:\n tuple:\n\n - list of parameters not handled by any optimizers under this algorithm\n - list of parameters handled under this algorithm\n \"\"\"\n default_optimizer = self.default_optimizer\n # The reason of using dict instead of set to hold the parameters is that\n # dict is guaranteed to preserve the insertion order so that we can get\n # deterministic ordering of the parameters.\n new_params = dict()\n handled = dict()\n duplicate_error = \"Parameter %s is handled by multiple optimizers.\"\n\n def _add_params_to_optimizer(params, opt):\n existing_params = set(_get_optimizer_params(opt))\n params = list(filter(lambda p: p not in existing_params, params))\n if params:\n opt.add_param_group({'params': params})\n return params\n\n for child in self._get_children():\n if child in handled:\n continue\n assert id(child) != id(self), \"Child should not be self\"\n if isinstance(child, Algorithm):\n params, child_handled = child._setup_optimizers_()\n for m in child_handled:\n assert m not in handled, duplicate_error % self.get_param_name(\n m)\n handled[m] = 1\n elif isinstance(child, nn.Module):\n params = list(child.parameters())\n elif isinstance(child, nn.Parameter):\n params = [child]\n optimizer = self._module_to_optimizer.get(child, None)\n if optimizer is None:\n new_params.update((p, 1) for p in params)\n if default_optimizer is not None:\n self._module_to_optimizer[child] = default_optimizer\n else:\n for m in params:\n assert m not in handled, duplicate_error % self.get_param_name(\n m)\n params = _add_params_to_optimizer(params, optimizer)\n handled.update((p, 1) for p in params)\n\n for p in handled:\n if p in new_params:\n del new_params[p]\n if default_optimizer is not None:\n new_params = _add_params_to_optimizer(new_params.keys(),\n default_optimizer)\n return [], list(handled.keys())\n else:\n return list(new_params.keys()), list(handled.keys())\n\n def optimizers(self, recurse=True, include_ignored_attributes=False):\n \"\"\"Get all the optimizers used by this algorithm.\n\n Args:\n recurse (bool): If True, including all the sub-algorithms\n include_ignored_attributes (bool): If True, still include all child\n attributes without ignoring any.\n Returns:\n list: list of ``Optimizer``s.\n \"\"\"\n opts = copy.copy(self._optimizers)\n if recurse:\n if include_ignored_attributes:\n children = self.children()\n else:\n children = self._get_children()\n for module in children:\n if isinstance(module, Algorithm):\n opts.extend(\n module.optimizers(recurse, include_ignored_attributes))\n return opts\n\n def get_optimizer_info(self):\n \"\"\"Return the optimizer info for all the modules in a string.\n\n TODO: for a subalgorithm that's an ignored attribute, its optimizer info\n won't be obtained.\n \"\"\"\n unhandled = self._setup_optimizers()\n\n optimizer_info = []\n if unhandled:\n optimizer_info.append(\n dict(\n optimizer=\"None\",\n parameters=[self._param_to_name[p] for p in unhandled]))\n\n for optimizer in self.optimizers(include_ignored_attributes=True):\n parameters = _get_optimizer_params(optimizer)\n optimizer_info.append(\n dict(\n optimizer=optimizer.__class__.__name__,\n hypers=optimizer.defaults,\n parameters=sorted(\n [self._param_to_name[p] for p in parameters])))\n json_pretty_str_info = json.dumps(obj=optimizer_info, indent=2)\n\n return json_pretty_str_info\n\n def get_unoptimized_parameter_info(self):\n \"\"\"Return the information about the parameters not being optimized.\n\n Note: the difference of this with the parameters contained in the optimizer\n 'None' from ``get_optimizer_info()`` is that ``get_optimizer_info()`` does not\n traverse all the parameters (e.g., parameters in list, tuple, dict, or set).\n\n Returns:\n str: path of all parameters not being optimized\n \"\"\"\n self._setup_optimizers()\n optimized_parameters = []\n for optimizer in self.optimizers(include_ignored_attributes=True):\n optimized_parameters.extend(_get_optimizer_params(optimizer))\n optimized_parameters = set(optimized_parameters)\n all_parameters = common.get_all_parameters(self)\n unoptimized_parameters = []\n for name, p in all_parameters:\n if p not in optimized_parameters:\n unoptimized_parameters.append(name)\n return json.dumps(obj=sorted(unoptimized_parameters), indent=2)\n\n @property\n def predict_state_spec(self):\n \"\"\"Returns the RNN state spec for ``predict_step()``.\"\"\"\n return self._predict_state_spec\n\n @property\n def rollout_state_spec(self):\n \"\"\"Returns the RNN state spec for ``rollout_step()``.\"\"\"\n return self._rollout_state_spec\n\n @property\n def train_state_spec(self):\n \"\"\"Returns the RNN state spec for ``train_step()``.\"\"\"\n return self._train_state_spec\n\n @property\n def train_info_spec(self):\n \"\"\"The spec for the ``AlgStep.info`` returned from ``train_step()``.\"\"\"\n assert self._train_info_spec is not None, (\n \"train_step() has not been called. train_info_spec is not available.\"\n )\n return self._train_info_spec\n\n @property\n def experience_spec(self):\n \"\"\"Spec for experience.\"\"\"\n assert self._experience_spec is not None, (\n \"observe() has not been called. experience_spec is not avaialble.\")\n return self._experience_spec\n\n @property\n def processed_experience_spec(self):\n \"\"\"Spec for processed experience.\n\n Returns:\n TensorSpec: Spec for the experience returned by ``preprocess_experience()``.\n \"\"\"\n assert self._processed_experience_spec is not None, (\n \"preprocess_experience() has not been used. processed_experience_spec\"\n \"is not available\")\n return self._processed_experience_spec\n\n def convert_train_state_to_predict_state(self, state):\n \"\"\"Convert RNN state for ``train_step()`` to RNN state for\n ``predict_step()``.\"\"\"\n alf.nest.assert_same_structure(self._train_state_spec,\n self._predict_state_spec)\n return state\n\n def get_initial_transform_state(self, batch_size):\n r = self._initial_transform_states.get(batch_size)\n if r is None:\n r = spec_utils.zeros_from_spec(self._transform_state_spec,\n batch_size)\n self._initial_transform_states[batch_size] = r\n return r\n\n def get_initial_predict_state(self, batch_size):\n r = self._initial_predict_states.get(batch_size)\n if r is None:\n r = spec_utils.zeros_from_spec(self._predict_state_spec,\n batch_size)\n self._initial_predict_states[batch_size] = r\n return r\n\n def get_initial_rollout_state(self, batch_size):\n r = self._initial_rollout_states.get(batch_size)\n if r is None:\n r = spec_utils.zeros_from_spec(self._rollout_state_spec,\n batch_size)\n self._initial_rollout_states[batch_size] = r\n return r\n\n def get_initial_train_state(self, batch_size):\n r = self._initial_train_states.get(batch_size)\n if r is None:\n r = spec_utils.zeros_from_spec(self._train_state_spec, batch_size)\n self._initial_train_states[batch_size] = r\n return r\n\n @common.add_method(nn.Module)\n def state_dict(self, destination=None, prefix='', visited=None):\n \"\"\"Get state dictionary recursively, including both model state\n and optimizers' state (if any). It can handle a number of special cases:\n\n - graph with cycle: save all the states and avoid infinite loop\n - parameter sharing: save only one copy of the shared module/param\n - optimizers: save the optimizers for all the (sub-)algorithms\n\n Args:\n destination (OrderedDict): the destination for storing the state.\n prefix (str): a string to be added before the name of the items\n (modules, params, algorithms etc) as the key used in the\n state dictionary.\n visited (set): a set keeping track of the visited objects.\n\n Returns:\n OrderedDict: the dictionary including both model state and optimizers'\n state (if any).\n \"\"\"\n\n if destination is None:\n destination = OrderedDict()\n destination._metadata = OrderedDict()\n destination._metadata[prefix[:-1]] = local_metadata = dict(\n version=self._version)\n\n if visited is None:\n visited = {self}\n\n if not is_checkpoint_enabled(self):\n return destination\n\n self._save_to_state_dict(destination, prefix, visited)\n opts_dict = OrderedDict()\n for name, child in self._modules.items():\n if child is not None and child not in visited:\n visited.add(child)\n child.state_dict(\n destination, prefix + name + '.', visited=visited)\n if isinstance(self, Algorithm):\n self._setup_optimizers()\n for i, opt in enumerate(self._optimizers):\n new_key = prefix + '_optimizers.%d' % i\n if new_key not in self._opt_keys:\n self._opt_keys.append(new_key)\n opts_dict[self._opt_keys[i]] = opt.state_dict()\n\n destination.update(opts_dict)\n\n return destination\n\n @common.add_method(nn.Module)\n def load_state_dict(self, state_dict, strict=True):\n \"\"\"Load state dictionary for the algorithm.\n\n Args:\n state_dict (dict): a dict containing parameters and persistent buffers.\n strict (bool, optional): whether to strictly enforce that the keys\n in ``state_dict`` match the keys returned by this module's\n ``torch.nn.Module.state_dict`` function. If ``strict=True``, will\n keep lists of missing and unexpected keys; if ``strict=False``,\n missing/unexpected keys will be omitted. (Default: ``True``)\n\n Returns:\n namedtuple:\n - missing_keys: a list of str containing the missing keys.\n - unexpected_keys: a list of str containing the unexpected keys.\n \"\"\"\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, '_metadata', None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def _load(module, prefix='', visited=None):\n if visited is None:\n visited = {self}\n if not is_checkpoint_enabled(module):\n return\n if isinstance(module, Algorithm):\n module._setup_optimizers()\n for i, opt in enumerate(module._optimizers):\n opt_key = prefix + '_optimizers.%d' % i\n if opt_key in state_dict:\n opt.load_state_dict(state_dict[opt_key])\n del state_dict[opt_key]\n elif strict:\n missing_keys.append(opt_key)\n\n for name, child in module._modules.items():\n if child is not None and child not in visited:\n visited.add(child)\n _load(child, prefix + name + '.', visited=visited)\n\n local_metadata = {} if metadata is None else metadata.get(\n prefix[:-1], {})\n if type(module)._load_from_state_dict in (\n Algorithm._load_from_state_dict,\n nn.Module._load_from_state_dict):\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys,\n unexpected_keys, error_msgs, visited)\n else:\n # Some pytorch modules (e.g. BatchNorm layers) override\n # _load_from_state_dict, which uses the original\n # Module._load_from_state_dict. So we have to handle them\n # differently. Not using `visited` should not cause a problem\n # because those modules are not implemented by ALF and will not\n # have cycle through them.\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys,\n unexpected_keys, error_msgs)\n\n _load(self)\n\n if len(error_msgs) > 0:\n raise RuntimeError(\n 'Error(s) in loading state_dict for {}:\\n\\t{}'.format(\n self.__class__.__name__, \"\\n\\t\".join(error_msgs)))\n return _IncompatibleKeys(missing_keys, unexpected_keys)\n\n @common.add_method(nn.Module)\n def _save_to_state_dict(self, destination, prefix, visited=None):\n r\"\"\"Saves module state to ``destination`` dictionary, containing a state\n of the module, but not its descendants. This is called on every\n submodule in ``torch.nn.Module.state_dict``. In rare cases, subclasses\n can achieve class-specific behavior by overriding this method with custom\n logic.\n\n Args:\n destination (dict): a dict where state will be stored.\n prefix (str): the prefix for parameters and buffers used in this\n module.\n visited (set): a set keeping track of the visited objects.\n \"\"\"\n if visited is None:\n visited = set()\n\n for name, param in self._parameters.items():\n if param is not None and param not in visited:\n visited.add(param)\n destination[prefix + name] = param.detach()\n for name, buf in self._buffers.items():\n if buf is not None and buf not in visited:\n visited.add(buf)\n destination[prefix + name] = buf.detach()\n\n @common.add_method(nn.Module)\n def _load_from_state_dict(self,\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n visited=None):\n \"\"\"Copies parameters and buffers from ``state_dict`` into only\n this module, but not its descendants. This is called on every submodule\n in ``torch.nn.Module.load_state_dict``. Metadata saved for this\n module in input ``state_dict`` is provided as ``local_metadata``.\n For state dicts without metadata, ``local_metadata`` is empty.\n Subclasses can achieve class-specific backward compatible loading using\n the version number at ``local_metadata.get(\"version\", None)``.\n\n .. note::\n\n ``state_dict`` is not the same object as the input ``state_dict`` to\n ``torch.nn.Module.load_state_dict``. So it can be modified.\n\n Args:\n state_dict (dict): a dict containing parameters and\n persistent buffers.\n prefix (str): the prefix for parameters and buffers used in this\n module.\n local_metadata (dict): a dict containing the metadata for this module.\n strict (bool): whether to strictly enforce that the keys in\n ``state_dict`` with ``prefix`` match the names of\n parameters and buffers in this module; if ``strict=True``,\n will keep a list of missing and unexpected keys.\n missing_keys (list of str): if ``strict=True``, add missing keys to\n this list.\n unexpected_keys (list of str): if ``strict=True``, add unexpected\n keys to this list.\n error_msgs (list of str): error messages should be added to this\n list, and will be reported together in\n ``torch.nn.Module.load_state_dict``.\n visited (set): a set keeping track of the visited objects.\n \"\"\"\n if visited is None:\n visited = set()\n\n for hook in self._load_state_dict_pre_hooks.values():\n hook(state_dict, prefix, local_metadata, strict, missing_keys,\n unexpected_keys, error_msgs)\n\n local_name_params = itertools.chain(self._parameters.items(),\n self._buffers.items())\n local_state = {k: v for k, v in local_name_params if v is not None}\n\n for name, param in local_state.items():\n if param is not None and param not in visited:\n visited.add(param)\n else:\n continue\n key = prefix + name\n if key in state_dict:\n input_param = state_dict[key]\n\n # Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+\n if len(param.shape) == 0 and len(input_param.shape) == 1:\n input_param = input_param[0]\n\n if input_param.shape != param.shape:\n # local shape should match the one in checkpoint\n error_msgs.append(\n 'size mismatch for {}: copying a param with shape {} from checkpoint, '\n 'the shape in current model is {}.'.format(\n key, input_param.shape, param.shape))\n continue\n\n try:\n with torch.no_grad():\n param.copy_(input_param)\n except Exception as ex:\n error_msgs.append(\n 'While copying the parameter named \"{}\", '\n 'whose dimensions in the model are {} and '\n 'whose dimensions in the checkpoint are {}, '\n 'an exception occured : {}.'.format(\n key, param.size(), input_param.size(), ex.args))\n elif strict:\n missing_keys.append(key)\n\n if strict:\n for key in state_dict.keys():\n if key.startswith(prefix):\n input_name = key[len(prefix):]\n input_name = input_name.split(\n '.', 1)[0] # get the name of param/buffer/child\n if input_name not in self._modules and input_name not in local_state:\n unexpected_keys.append(key)\n\n @common.add_method(nn.Module)\n def __repr__(self):\n return self._repr()\n\n @common.add_method(nn.Module)\n def _repr(self, visited=None):\n \"\"\"Adapted from __repr__() in torch/nn/modules/module.nn. to handle cycles.\"\"\"\n\n if visited is None:\n visited = [self]\n\n # We treat the extra repr like the sub-module, one item per line\n extra_lines = []\n extra_repr = self.extra_repr()\n # empty string will be split into list ['']\n if extra_repr:\n extra_lines = extra_repr.split('\\n')\n child_lines = []\n for key, module in self._modules.items():\n if module in visited:\n continue\n visited.append(module)\n if isinstance(module, nn.Module):\n mod_str = module._repr(visited)\n else:\n mod_str = repr(module)\n mod_str = _addindent(mod_str, 2)\n child_lines.append('(' + key + '): ' + mod_str)\n lines = extra_lines + child_lines\n\n main_str = self._get_name() + '('\n if lines:\n # simple one-liner info, which most builtin Modules will use\n if len(extra_lines) == 1 and not child_lines:\n main_str += extra_lines[0]\n else:\n main_str += '\\n ' + '\\n '.join(lines) + '\\n'\n\n main_str += ')'\n return main_str\n\n # Subclass may override update_with_gradient() to allow customized training\n def update_with_gradient(self,\n loss_info,\n valid_masks=None,\n weight=1.0,\n batch_info=None):\n \"\"\"Complete one iteration of training.\n\n Update parameters using the gradient with respect to ``loss_info``.\n\n Args:\n loss_info (LossInfo): loss with shape :math:`(T, B)` (except for\n ``loss_info.scalar_loss``)\n valid_masks (Tensor): masks indicating which samples are valid.\n (``shape=(T, B), dtype=torch.float32``)\n weight (float): weight for this batch. Loss will be multiplied with\n this weight before calculating gradient.\n batch_info (BatchInfo): information about this batch returned by\n ``ReplayBuffer.get_batch()``\n Returns:\n tuple:\n - loss_info (LossInfo): loss information.\n - params (list[(name, Parameter)]): list of parameters being updated.\n \"\"\"\n masks = None\n if (batch_info is not None and batch_info.importance_weights is not ()\n and self._config.priority_replay_beta != 0):\n masks = batch_info.importance_weights.pow(\n -self._config.priority_replay_beta).unsqueeze(0)\n\n if valid_masks is not None:\n if masks is not None:\n masks = masks * valid_masks\n else:\n masks = valid_masks\n\n if masks is not None:\n loss_info = alf.nest.map_structure(\n lambda l: torch.mean(l * masks) if len(l.shape) == 2 else l,\n loss_info)\n else:\n loss_info = alf.nest.map_structure(lambda l: torch.mean(l),\n loss_info)\n if isinstance(loss_info.scalar_loss, torch.Tensor):\n assert len(loss_info.scalar_loss.shape) == 0\n loss_info = loss_info._replace(\n loss=add_ignore_empty(loss_info.loss, loss_info.scalar_loss))\n\n unhandled = self._setup_optimizers()\n unhandled = [self._param_to_name[p] for p in unhandled]\n assert not unhandled, (\"'%s' has some modules/parameters do not have \"\n \"optimizer: %s\" % (self.name, unhandled))\n optimizers = self.optimizers()\n for optimizer in optimizers:\n optimizer.zero_grad(set_to_none=True)\n\n if isinstance(loss_info.loss, torch.Tensor):\n loss = weight * loss_info.loss\n loss.backward()\n\n all_params = []\n for optimizer in optimizers:\n params = []\n for param_group in optimizer.param_groups:\n params.extend(param_group['params'])\n assert params, (\n \"The recorded optimizer '\" + optimizer.name +\n \"' haven't been used for learning any parameters! Please check.\"\n )\n all_params.extend(params)\n optimizer.step()\n\n all_params = [(self._param_to_name[p], p) for p in all_params]\n return loss_info, all_params\n\n # Subclass may override calc_loss() to allow more sophisticated loss\n def calc_loss(self, info):\n \"\"\"Calculate the loss at each step for each sample.\n\n Args:\n info (nest): information collected for training. It is batched\n from each ``AlgStep.info`` returned by ``rollout_step()``\n (on-policy training) or ``train_step()`` (off-policy training).\n Returns:\n LossInfo: loss at each time step for each sample in the\n batch. The shapes of the tensors in loss info should be\n :math:`(T, B)`.\n \"\"\"\n assert isinstance(info, LossInfo), (\n \"info returned by\"\n \" train_step() should be LossInfo. Otherwise you need override\"\n \" calc_loss() to generate LossInfo from info\")\n return info\n\n def train_from_unroll(self, experience, train_info):\n \"\"\"Train given the info collected from ``unroll()``. This function can\n be called by any child algorithm that doesn't have the unroll logic but\n has a different training logic with its parent (e.g., off-policy).\n\n Args:\n experience (Experience): collected during ``unroll()``.\n train_info (nest): ``AlgStep.info`` returned by ``rollout_step()``.\n\n Returns:\n int: number of steps that have been trained\n \"\"\"\n if self.is_rl():\n valid_masks = (experience.step_type != StepType.LAST).to(\n torch.float32)\n else:\n valid_masks = None\n experience = experience._replace(rollout_info_field='rollout_info')\n loss_info = self.calc_loss(train_info)\n loss_info, params = self.update_with_gradient(loss_info, valid_masks)\n time_step = experience_to_time_step(experience)\n self.after_update(time_step, train_info)\n self.summarize_train(experience, train_info, loss_info, params)\n return torch.tensor(alf.nest.get_nest_shape(experience)).prod()\n\n @common.mark_replay\n def train_from_replay_buffer(self, update_global_counter=False):\n \"\"\"This function can be called by any algorithm that has its own\n replay buffer configured. There are several parameters specified in\n ``self._config`` that will affect how the training is performed:\n\n - ``initial_collect_steps``: only start replaying and training after so\n many time steps have been stored in the replay buffer\n - ``mini_batch_size``: the batch size of a minibatch\n - ``mini_batch_length``: the temporal extension of a minibatch. An\n algorithm can sample temporally correlated experiences for training\n stateful models by setting this value greater than 1.\n - ``num_updates_per_train_iter``: how many updates to perform in each\n training iteration. Its behavior might be different depending on the\n value of ``config.whole_replay_buffer_training``:\n\n - If ``True``, each update will scan over the entire buffer to get\n chopped minibatches and a random experience shuffling is performed\n before each update;\n - If ``False``, each update will sample a new minibatch from the replay\n buffer.\n\n - ``whole_replay_buffer_training``: a very special case where all data in\n the replay buffer will be used for training (e.g., PPO). In this case,\n for every update in ``num_updates_per_train_iter``, the data will\n be shuffled and divided into\n ``buffer_size//(mini_batch_size * mini_batch_length)`` \"mini-updates\".\n If ``mini_batch_length`` is None, then ``unroll_length`` will be used\n for this calculation.\n\n Args:\n update_global_counter (bool): controls whether this function changes\n the global counter for summary. If there are multiple\n algorithms, then only the parent algorithm should change this\n quantity and child algorithms should disable the flag. When it's\n ``True``, it will affect the counter only if\n ``config.update_counter_every_mini_batch=True``.\n \"\"\"\n config: TrainerConfig = self._config\n\n if self._exp_replayer.total_size < config.initial_collect_steps:\n # returns 0 if haven't started training yet; throughput will be 0\n return 0\n\n # TODO: If this function can be called asynchronously, and using\n # prioritized replay, then make sure replay and train below is atomic.\n with record_time(\"time/replay\"):\n mini_batch_size = config.mini_batch_size\n if mini_batch_size is None:\n mini_batch_size = self._exp_replayer.batch_size\n if config.whole_replay_buffer_training:\n experience = self._exp_replayer.replay_all()\n if config.clear_replay_buffer:\n self._exp_replayer.clear()\n num_updates = config.num_updates_per_train_iter\n batch_info = None\n else:\n assert config.mini_batch_length is not None, (\n \"No mini_batch_length is specified for off-policy training\"\n )\n experience, batch_info = self._exp_replayer.replay(\n sample_batch_size=(\n mini_batch_size * config.num_updates_per_train_iter),\n mini_batch_length=config.mini_batch_length)\n num_updates = 1\n\n with record_time(\"time/train\"):\n return self._train_experience(\n experience, batch_info, num_updates, mini_batch_size,\n config.mini_batch_length,\n (config.update_counter_every_mini_batch\n and update_global_counter))\n\n def _train_experience(self, experience, batch_info, num_updates,\n mini_batch_size, mini_batch_length,\n update_counter_every_mini_batch):\n \"\"\"Train using experience.\"\"\"\n experience = dist_utils.params_to_distributions(\n experience, self.experience_spec)\n if self._exp_replayer_type != \"one_time\":\n # The experience put in one_time replayer is already transformed\n # in unroll().\n experience = alf.data_structures.add_batch_info(\n experience, batch_info, self._exp_replayer.replay_buffer)\n experience = self.transform_experience(experience)\n # allow data_transformers to change batch_info\n if experience.batch_info != ():\n batch_info = experience.batch_info\n experience = alf.data_structures.clear_batch_info(experience)\n time_step = experience_to_time_step(experience)\n time_step, rollout_info = self.preprocess_experience(\n time_step, experience.rollout_info, batch_info)\n experience = experience._replace(\n step_type=time_step.step_type,\n reward=time_step.reward,\n discount=time_step.discount,\n observation=time_step.observation,\n prev_action=time_step.prev_action,\n env_id=time_step.env_id,\n rollout_info=rollout_info)\n if self._processed_experience_spec is None:\n self._processed_experience_spec = dist_utils.extract_spec(\n experience, from_dim=2)\n experience = dist_utils.distributions_to_params(experience)\n\n length = alf.nest.get_nest_size(experience, dim=1)\n mini_batch_length = (mini_batch_length or length)\n if batch_info is not None:\n assert mini_batch_length == length, (\n \"mini_batch_length (%s) is \"\n \"different from length (%s). Not supported.\" %\n (mini_batch_length, length))\n\n if mini_batch_length > length:\n common.warning_once(\n \"mini_batch_length=%s is set to a smaller length=%s\" %\n (mini_batch_length, length))\n mini_batch_length = length\n elif length % mini_batch_length:\n common.warning_once(\n \"length=%s not a multiple of mini_batch_length=%s\" %\n (length, mini_batch_length))\n length = length // mini_batch_length * mini_batch_length\n experience = alf.nest.map_structure(lambda x: x[:, :length, ...],\n experience)\n common.warning_once(\n \"Experience length has been cut to %s\" % length)\n\n if len(alf.nest.flatten(self.train_state_spec)) > 0:\n if not self._use_rollout_state:\n # If not using rollout states, then we will assume zero initial\n # training states. To have a proper state warm up,\n # mini_batch_length should be greater than 1. Otherwise the states\n # are always 0s.\n if mini_batch_length == 1:\n logging.fatal(\n \"Should use TrainerConfig.use_rollout_state=True \"\n \"for training from a replay buffer when minibatch_length==1, \"\n \"otherwise the initial states are always zeros!\")\n else:\n # In this case, a state warm up is recommended. For example,\n # having mini_batch_length>1 and discarding first several\n # steps when computing losses. For a warm up, make sure to\n # leave a mini_batch_length > 1 if any recurrent model is to\n # be trained.\n common.warning_once(\n \"Consider using TrainerConfig.use_rollout_state=True \"\n \"for training from a replay buffer.\")\n elif mini_batch_length == 1:\n # If using rollout states and mini_batch_length=1, there will be\n # no gradient flowing in any recurrent matrix. Only the output\n # layers on top of the recurrent output will be trained.\n common.warning_once(\n \"Using rollout states but with mini_batch_length=1. In \"\n \"this case, any recurrent model can't be properly trained!\"\n )\n else:\n # If using rollout states with mini_batch_length>1. In theory,\n # any recurrent model can be properly trained. With a greater\n # mini_batch_length, the temporal correlation can be better\n # captured.\n pass\n\n experience = alf.nest.map_structure(\n lambda x: x.reshape(-1, mini_batch_length, *x.shape[2:]),\n experience)\n\n batch_size = alf.nest.get_nest_batch_size(experience)\n\n def _make_time_major(nest):\n \"\"\"Put the time dim to axis=0.\"\"\"\n return alf.nest.map_structure(lambda x: x.transpose(0, 1), nest)\n\n for u in range(num_updates):\n if mini_batch_size < batch_size:\n # here we use the cpu version of torch.randperm(n) to generate\n # the permuted indices, as the cuda version of torch.randperm(n)\n # seems to have a bug when n is a large number, generating\n # negative or very large values that cause out of bound kernel\n # error: https://github.com/pytorch/pytorch/issues/59756\n indices = alf.nest.utils.convert_device(\n torch.randperm(batch_size, device='cpu'))\n experience = alf.nest.map_structure(lambda x: x[indices],\n experience)\n if batch_info is not None:\n batch_info = alf.nest.map_structure(\n lambda x: x[indices]\n if isinstance(x, torch.Tensor) else x, batch_info)\n for b in range(0, batch_size, mini_batch_size):\n if update_counter_every_mini_batch:\n alf.summary.increment_global_counter()\n is_last_mini_batch = (u == num_updates - 1\n and b + mini_batch_size >= batch_size)\n do_summary = (is_last_mini_batch\n or update_counter_every_mini_batch)\n alf.summary.enable_summary(do_summary)\n batch = alf.nest.map_structure(\n lambda x: x[b:min(batch_size, b + mini_batch_size)],\n experience)\n if batch_info:\n binfo = alf.nest.map_structure(\n lambda x: x[b:min(batch_size, b + mini_batch_size)]\n if isinstance(x, torch.Tensor) else x, batch_info)\n else:\n binfo = None\n batch = _make_time_major(batch)\n exp, train_info, loss_info, params = self._update(\n batch,\n binfo,\n weight=alf.nest.get_nest_size(batch, 1) / mini_batch_size)\n if do_summary:\n self.summarize_train(exp, train_info, loss_info, params)\n\n train_steps = batch_size * mini_batch_length * num_updates\n return train_steps\n\n def _collect_train_info_sequentially(self, experience):\n batch_size = alf.nest.get_nest_size(experience, dim=1)\n initial_train_state = self.get_initial_train_state(batch_size)\n if self._use_rollout_state:\n policy_state = alf.nest.map_structure(lambda state: state[0, ...],\n experience.state)\n else:\n policy_state = initial_train_state\n\n num_steps = alf.nest.get_nest_size(experience, dim=0)\n info_list = []\n for counter in range(num_steps):\n exp = alf.nest.map_structure(lambda ta: ta[counter], experience)\n exp = dist_utils.params_to_distributions(\n exp, self.processed_experience_spec)\n if self._exp_contains_step_type:\n policy_state = common.reset_state_if_necessary(\n policy_state, initial_train_state,\n exp.step_type == StepType.FIRST)\n elif policy_state is not ():\n common.warning_once(\n \"Policy state is non-empty but the experience doesn't \"\n \"contain the 'step_type' field. No way to reinitialize \"\n \"the state but will simply keep updating it.\")\n time_step = experience_to_time_step(exp)\n policy_step = self.train_step(time_step, policy_state,\n exp.rollout_info)\n if self._train_info_spec is None:\n self._train_info_spec = dist_utils.extract_spec(\n policy_step.info)\n info_list.append(\n dist_utils.distributions_to_params(policy_step.info))\n policy_state = policy_step.state\n\n info = alf.nest.utils.stack_nests(info_list)\n info = dist_utils.params_to_distributions(info, self.train_info_spec)\n return info\n\n def _collect_train_info_parallelly(self, experience):\n shape = alf.nest.get_nest_shape(experience)\n length, batch_size = shape[:2]\n\n exp = alf.nest.map_structure(lambda x: x.reshape(-1, *x.shape[2:]),\n experience)\n\n if self._use_rollout_state:\n policy_state = exp.state\n else:\n size = alf.nest.get_nest_size(exp, dim=0)\n policy_state = self.get_initial_train_state(size)\n\n exp = dist_utils.params_to_distributions(\n exp, self.processed_experience_spec)\n time_step = experience_to_time_step(exp)\n policy_step = self.train_step(time_step, policy_state,\n exp.rollout_info)\n\n if self._train_info_spec is None:\n self._train_info_spec = dist_utils.extract_spec(policy_step.info)\n info = dist_utils.distributions_to_params(policy_step.info)\n info = alf.nest.map_structure(\n lambda x: x.reshape(length, batch_size, *x.shape[1:]), info)\n info = dist_utils.params_to_distributions(info, self.train_info_spec)\n return info\n\n def _update(self, experience, batch_info, weight):\n length = alf.nest.get_nest_size(experience, dim=0)\n if self._config.temporally_independent_train_step or length == 1:\n train_info = self._collect_train_info_parallelly(experience)\n else:\n train_info = self._collect_train_info_sequentially(experience)\n\n experience = dist_utils.params_to_distributions(\n experience, self.processed_experience_spec)\n\n loss_info = self.calc_loss(train_info)\n if loss_info.priority is not ():\n priority = (loss_info.priority**self._config.priority_replay_alpha\n + self._config.priority_replay_eps)\n self._exp_replayer.update_priority(batch_info.env_ids,\n batch_info.positions, priority)\n if self._debug_summaries and alf.summary.should_record_summaries():\n with alf.summary.scope(\"PriorityReplay\"):\n summary_utils.add_mean_hist_summary(\n \"new_priority\", priority)\n summary_utils.add_mean_hist_summary(\n \"old_importance_weight\", batch_info.importance_weights)\n else:\n assert batch_info is None or batch_info.importance_weights is (), (\n \"Priority replay is enabled. But priority is not calculated.\")\n\n if self.is_rl():\n valid_masks = (experience.step_type != StepType.LAST).to(\n torch.float32)\n else:\n valid_masks = None\n loss_info, params = self.update_with_gradient(loss_info, valid_masks,\n weight, batch_info)\n time_step = experience_to_time_step(experience)\n self.after_update(time_step, train_info)\n\n return experience, train_info, loss_info, params\n\n\nclass Loss(Algorithm):\n \"\"\"Algorithm that uses its input as loss.\n\n It can be subclassed to customize calc_loss().\n \"\"\"\n\n def __init__(self, loss_weight=1.0, name=\"LossAlg\"):\n super().__init__(name=name)\n self._loss_weight = loss_weight\n\n def predict_step(self, inputs, state=None):\n return AlgStep()\n\n def rollout_step(self, inputs, state=None):\n if self.on_policy:\n return AlgStep(info=inputs)\n else:\n return AlgStep()\n\n def train_step(self, inputs, state=None, rollout_info=None):\n return AlgStep(info=inputs)\n\n def calc_loss(self, info):\n return LossInfo(loss=self._loss_weight * info, extra=info)\n"
] |
[
[
"torch.nn.modules.module._addindent",
"torch.cuda.memory_reserved",
"torch.mean",
"torch.randperm",
"torch.cuda.max_memory_reserved",
"torch.cuda.max_memory_allocated",
"torch.no_grad",
"torch.nn.modules.module._IncompatibleKeys",
"torch.cuda.reset_max_memory_allocated",
"torch.cuda.is_available",
"torch.cuda.memory_allocated"
]
] |
tvlenin/blueoil
|
[
"810680df75e2640f67d515c377ba2b4531b9e584"
] |
[
"lmnet/lmnet/datasets/base.py"
] |
[
"# -*- coding: utf-8 -*-\n# Copyright 2018 The Blueoil Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nimport os\n\nimport numpy as np\nimport PIL\n\nfrom lmnet import environment\n\n\nclass Base(metaclass=ABCMeta):\n \"\"\"Dataset base class\"\"\"\n\n def __init__(\n self,\n subset=\"train\",\n batch_size=10,\n augmentor=None,\n pre_processor=None,\n data_format='NHWC',\n seed=None,\n **kwargs\n ):\n assert subset in self.available_subsets, self.available_subsets\n self.subset = subset\n self.batch_size = batch_size\n self.augmentor = augmentor\n self.pre_processor = pre_processor\n self.data_format = data_format\n self.seed = seed or 0\n\n @property\n def data_dir(self):\n extend_dir = self.__class__.extend_dir\n\n if extend_dir is None:\n data_dir = environment.DATA_DIR\n else:\n data_dir = os.path.join(environment.DATA_DIR, extend_dir)\n return data_dir\n\n @property\n @staticmethod\n @abstractmethod\n def classes():\n \"\"\"Return the classes list in the data set.\"\"\"\n pass\n\n @property\n @staticmethod\n @abstractmethod\n def num_classes():\n \"\"\"Return the number of classes in the data set.\"\"\"\n pass\n\n @property\n @staticmethod\n @abstractmethod\n def extend_dir():\n \"\"\"Return the extend dir path of the data set.\"\"\"\n pass\n\n @property\n @staticmethod\n @abstractmethod\n def available_subsets():\n \"\"\"Returns the list of available subsets.\"\"\"\n return ['train', 'train_validation_saving', 'validation']\n\n @property\n @abstractmethod\n def num_per_epoch(self):\n \"\"\"Returns the number of datas in the data subset.\"\"\"\n pass\n\n @property\n @abstractmethod\n def __getitem__(self, i, type=None):\n \"\"\"Returns the i-th item of the dataset.\"\"\"\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def __len__(self):\n \"\"\"returns the number of items in the dataset.\"\"\"\n raise NotImplementedError()\n\n\nclass SegmentationBase(Base, metaclass=ABCMeta):\n\n def __init__(self, *args, label_colors=None, **kwargs):\n super(SegmentationBase, self).__init__(*args, **kwargs)\n self._label_colors = label_colors\n\n @property\n def label_colors(self):\n if self._label_colors:\n return self._label_colors\n random_state = np.random.RandomState(seed=self.seed)\n self._label_colors = random_state.choice(256, (self.num_classes, 3))\n return self._label_colors\n\n\nclass ObjectDetectionBase(Base, metaclass=ABCMeta):\n\n @classmethod\n @abstractmethod\n def count_max_boxes(cls):\n \"\"\"Count max boxes size over all subsets.\"\"\"\n pass\n\n @property\n @abstractmethod\n def num_max_boxes(self):\n \"\"\"Returns conunt max box size of available subsets.\"\"\"\n pass\n\n def _fill_dummy_boxes(self, gt_boxes):\n dummy_gt_box = [0, 0, 0, 0, -1]\n if len(gt_boxes) == 0:\n gt_boxes = np.array(dummy_gt_box * self.num_max_boxes)\n return gt_boxes.reshape([self.num_max_boxes, 5])\n elif len(gt_boxes) < self.num_max_boxes:\n diff = self.num_max_boxes - len(gt_boxes)\n gt_boxes = np.append(gt_boxes, [dummy_gt_box] * diff, axis=0)\n return gt_boxes\n return gt_boxes\n\n def _change_gt_boxes_shape(self, gt_boxes_list):\n \"\"\"Change gt boxes list shape from [batch_size, num_boxes, 5] to [batch_size, num_max_boxes, 5].\n\n fill dummy box when num boxes < num max boxes.\n\n Args:\n gt_boxes_list: python list of gt_boxes(np.ndarray). gt_boxes's shape is [batch_size, num_boxes, 5]\n\n Return:\n gt_boxes_list: numpy ndarray [batch_size, num_max_boxes, 5].\n \"\"\"\n results = []\n\n for gt_boxes in gt_boxes_list:\n gt_boxes = self._fill_dummy_boxes(gt_boxes)\n results.append(gt_boxes)\n\n return np.array(results)\n\n def _get_image(self, target_file):\n image = PIL.Image.open(target_file)\n image = image.convert(\"RGB\")\n image = np.array(image)\n return image\n\n\nclass DistributionInterface(metaclass=ABCMeta):\n\n @abstractmethod\n def update_dataset(self, indices):\n \"\"\"Update own dataset by indices.\"\"\"\n pass\n\n @abstractmethod\n def get_shuffle_index(self):\n \"\"\"Return list of shuffled index.\"\"\"\n pass\n\n\nclass StoragePathCustomizable():\n \"\"\"Make it possible to specify train, validation path.\n\n class.extend_dir: specify train path.\n class.validation_extend_dir: specify validation path.\n\n When validation_extend_dir doesn't set, generate validation data from train set.\n You should implement the validation subset split from train data with `validation_size` in sub class.\n \"\"\"\n\n available_subsets = ['train', 'validation']\n\n def __init__(\n self,\n validation_size=0.1,\n *args,\n **kwargs\n ):\n # validation subset size\n self.validation_size = validation_size\n if hasattr(self.__class__, \"validation_extend_dir\"):\n self.validation_size = 0\n\n super().__init__(*args, **kwargs)\n\n @property\n def _train_data_dir(self):\n extend_dir = self.__class__.extend_dir\n if extend_dir is None:\n data_dir = environment.DATA_DIR\n else:\n data_dir = os.path.join(environment.DATA_DIR, extend_dir)\n return data_dir\n\n @property\n def _validation_data_dir(self):\n extend_dir = self.__class__.extend_dir\n if hasattr(self.__class__, \"validation_extend_dir\"):\n extend_dir = self.__class__.validation_extend_dir\n\n if extend_dir is None:\n data_dir = environment.DATA_DIR\n else:\n data_dir = os.path.join(environment.DATA_DIR, extend_dir)\n return data_dir\n\n @property\n def data_dir(self):\n if self.subset is \"train\":\n return self._train_data_dir\n\n if self.subset is \"validation\":\n return self._validation_data_dir\n"
] |
[
[
"numpy.array",
"numpy.random.RandomState",
"numpy.append"
]
] |
dalexa10/EngineeringDesignOptimization
|
[
"eb5b5e4edd773aef629f59aea8a9771af41bd224"
] |
[
"main.py"
] |
[
"import numpy as np\nimport pandas as pd\n\nX = np.array([1, 2, 3, 4])\n\nprint(X)"
] |
[
[
"numpy.array"
]
] |
gjy3035/WSAL_released
|
[
"fad6490d032eb37dfb62bc0a1eb30fc062dc27a4"
] |
[
"datasets/SYN/SYN.py"
] |
[
"import os\r\nimport sys\r\n\r\nimport numpy as np\r\nimport pdb\r\nfrom PIL import Image\r\nfrom torch.utils import data\r\n\r\nfrom .config_SYN import root, raw_img_path, raw_mask_path\r\nfrom ..encoder import DataEncoder\r\nfrom ..shuffleData import getShuffleIdx\r\nfrom config import cfg\r\nfrom utils.timer import Timer\r\n\r\nimport torch\r\n\r\n\r\n\r\ndef default_loader(path):\r\n return Image.open(path)\r\n\r\n\r\n\r\n\r\nclass SYN(data.Dataset):\r\n def __init__(self, mode, list_filename, simul_transform=None, transform=None, target_transform=None):\r\n \r\n self.img_root = raw_img_path\r\n self.mask_root = raw_mask_path\r\n list_file = root + '/' + list_filename\r\n\r\n self.simul_transform = simul_transform\r\n self.transform = transform\r\n self.target_transform = target_transform\r\n\r\n self.data_encoder = DataEncoder()\r\n\r\n self.fnames = []\r\n self.boxes = []\r\n self.labels = []\r\n # self.ori_boxes = []\r\n # self.ori_labels = []\r\n\r\n\r\n \r\n with open(list_file) as f:\r\n lines = f.readlines()\r\n self.num_samples = len(lines)\r\n\r\n for line in lines:\r\n splited = line.strip().split()\r\n self.fnames.append(splited[0])\r\n\r\n num_objs = int(splited[1])\r\n box = []\r\n label = []\r\n for i in range(num_objs):\r\n xmin = splited[2+5*i]\r\n ymin = splited[3+5*i]\r\n xmax = splited[4+5*i]\r\n ymax = splited[5+5*i]\r\n c = splited[6+5*i]\r\n box.append([float(xmin),float(ymin),float(xmax),float(ymax)])\r\n label.append(int(c))\r\n self.boxes.append(torch.Tensor(box))\r\n # self.ori_boxes.append(torch.Tensor(box))\r\n self.labels.append(torch.LongTensor(label))\r\n self.img_loader = default_loader\r\n\r\n def __getitem__(self, idx):\r\n\r\n fname = self.fnames[idx]\r\n # _t = {'trans':Timer(), 'load' : Timer(), 'compute':Timer()}\r\n # _t['load'].tic()\r\n img = self.img_loader(os.path.join(self.img_root,fname))\r\n mask = self.img_loader(os.path.join(self.mask_root,fname))\r\n \r\n # _t['load'].toc(average=False)\r\n boxes = self.boxes[idx].clone()\r\n labels = self.labels[idx]\r\n ori_labels = self.labels[idx].clone()\r\n \r\n # _t['trans'].tic()\r\n # flip and rescale\r\n if self.simul_transform is not None:\r\n img, mask, boxes = self.simul_transform(img, mask, boxes)\r\n # _t['trans'].toc(average=False)\r\n\r\n # _t['compute'].tic()\r\n ori_boxes = boxes.clone()\r\n # Scale bbox locaitons to [0,1]\r\n w,h = img.size\r\n boxes = boxes/torch.Tensor([w,h,w,h]).expand_as(boxes)\r\n \r\n\r\n # Encode bbx & objects labels.\r\n boxes, labels = self.data_encoder.encode(boxes, labels)\r\n\r\n # _t['compute'].toc(average=False)\r\n # print '{:.3f}s {:.3f}s {:.3f}s'.format(_t['trans'].average_time,_t['load'].average_time,_t['compute'].average_time)\r\n # gen roi data for roipooling \r\n shuffle_idx = getShuffleIdx(ori_boxes.size()[0])\r\n\r\n shuffle_idx = torch.from_numpy(shuffle_idx.astype(np.int64))\r\n ori_boxes = torch.index_select(ori_boxes, 0, shuffle_idx)\r\n\r\n ori_labels = torch.index_select(ori_labels, 0, shuffle_idx)\r\n # Normalize\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n # change the seg labels 255->19 \r\n if self.target_transform is not None:\r\n mask = self.target_transform(mask)\r\n # pdb.set_trace()\r\n \r\n return img, mask, boxes, labels, ori_boxes, ori_labels\r\n\r\n def __len__(self):\r\n return self.num_samples\r\n"
] |
[
[
"torch.LongTensor",
"torch.index_select",
"torch.Tensor"
]
] |
Pavelrst/denoising_diffusion
|
[
"71a2ca62f612c9c5af432558e8954ed522f10f53"
] |
[
"lib/dataset.py"
] |
[
"import torch\r\nimport torchvision.transforms as T\r\nimport torchvision.datasets\r\nimport numpy as np\r\nfrom torch.utils.data import Subset\r\n\r\nclass ReshapeTransform:\r\n def __init__(self, new_size):\r\n self.new_size = new_size\r\n\r\n def __call__(self, img):\r\n return torch.reshape(img, self.new_size)\r\n\r\n\r\nclass CropTransform:\r\n def __init__(self, bbox):\r\n self.bbox = bbox\r\n\r\n def __call__(self, img):\r\n return img.crop(self.bbox)\r\n\r\n\r\ndef get_train_data(conf):\r\n if conf.dataset.name == 'cifar10':\r\n transform = T.Compose(\r\n [\r\n T.RandomHorizontalFlip(),\r\n T.ToTensor(),\r\n T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\r\n ]\r\n )\r\n transform_test = T.Compose(\r\n [\r\n T.ToTensor(),\r\n T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\r\n ]\r\n )\r\n\r\n train_set = torchvision.datasets.CIFAR10(conf.dataset.path,\r\n train=True,\r\n transform=transform,\r\n download=True)\r\n valid_set = torchvision.datasets.CIFAR10(conf.dataset.path,\r\n train=True,\r\n transform=transform_test,\r\n download=True)\r\n\r\n num_train = len(train_set)\r\n indices = torch.randperm(num_train).tolist()\r\n valid_size = int(np.floor(0.05 * num_train))\r\n\r\n train_idx, valid_idx = indices[valid_size:], indices[:valid_size]\r\n\r\n train_set = Subset(train_set, train_idx)\r\n valid_set = Subset(valid_set, valid_idx)\r\n\r\n elif conf.dataset.name == 'svhn':\r\n transform = T.Compose(\r\n [\r\n T.ToTensor(),\r\n T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\r\n ]\r\n )\r\n transform_test = T.Compose(\r\n [\r\n T.ToTensor(),\r\n T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\r\n ]\r\n )\r\n\r\n train_set = torchvision.datasets.SVHN(conf.dataset.path,\r\n split='train',\r\n transform=transform,\r\n download=True)\r\n valid_set = torchvision.datasets.SVHN(conf.dataset.path,\r\n split='train',\r\n transform=transform_test,\r\n download=True)\r\n\r\n num_train = len(train_set)\r\n indices = torch.randperm(num_train).tolist()\r\n valid_size = int(np.floor(0.05 * num_train))\r\n\r\n train_idx, valid_idx = indices[valid_size:], indices[:valid_size]\r\n\r\n train_set = Subset(train_set, train_idx)\r\n valid_set = Subset(valid_set, valid_idx)\r\n\r\n elif conf.dataset.name == 'celeba':\r\n transform = T.Compose(\r\n [\r\n CropTransform((25, 50, 25 + 128, 50 + 128)),\r\n T.Resize(128),\r\n T.RandomHorizontalFlip(),\r\n T.ToTensor(),\r\n T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\r\n ]\r\n )\r\n transform_test = T.Compose(\r\n [\r\n CropTransform((25, 50, 25 + 128, 50 + 128)),\r\n T.Resize(128),\r\n T.ToTensor(),\r\n T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),\r\n ]\r\n )\r\n\r\n train_set = torchvision.datasets.CelebA(conf.dataset.path,\r\n split='train',\r\n transform=transform,\r\n download=True)\r\n valid_set = torchvision.datasets.CelebA(conf.dataset.path,\r\n split='train',\r\n transform=transform_test,\r\n download=True)\r\n\r\n num_train = len(train_set)\r\n indices = torch.randperm(num_train).tolist()\r\n valid_size = int(np.floor(0.05 * num_train))\r\n\r\n train_idx, valid_idx = indices[valid_size:], indices[:valid_size]\r\n\r\n train_set = Subset(train_set, train_idx)\r\n valid_set = Subset(valid_set, valid_idx)\r\n\r\n else:\r\n raise FileNotFoundError\r\n\r\n return train_set, valid_set"
] |
[
[
"torch.utils.data.Subset",
"torch.reshape",
"torch.randperm",
"numpy.floor"
]
] |
noapadan/WCT2
|
[
"56c819bebb9f023e9eb8603f1f56a37650231730"
] |
[
"generate_blur_images.py"
] |
[
"import os\nimport numpy as np\nimport cv2\nfrom PIL import Image\nif __name__ == '__main__':\n imgs_dir = 'examples/content'\n fnames = set(os.listdir(imgs_dir))\n\n for fname in fnames:\n img_path = os.path.join(imgs_dir, fname)\n img_path_out = os.path.join('examples/content_blur_imgs', fname)\n\n # read the image with OpenCV\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0\n img = img + np.random.normal(loc=0.0, scale=0.08, size=img.shape)\n img *= 255.0\n img = np.clip(img, 0.0, 255.0)\n img = img.astype(np.uint8)\n img = Image.fromarray(img, 'RGB')\n img.save(img_path_out)\n #img.show()"
] |
[
[
"numpy.random.normal",
"numpy.clip"
]
] |
flozi00/transformers
|
[
"f80775df2b418716acce76d56826ed59183869b6"
] |
[
"src/transformers/models/wavlm/modeling_wavlm.py"
] |
[
"# coding=utf-8\n# Copyright 2021 The Fairseq Authors, Microsoft Research, and The HuggingFace Inc. team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch WavLM model.\"\"\"\n\nimport math\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom ...activations import ACT2FN\nfrom ...deepspeed import is_deepspeed_zero3_enabled\nfrom ...file_utils import (\n ModelOutput,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_model_forward,\n)\nfrom ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, TokenClassifierOutput\nfrom ...modeling_utils import PreTrainedModel\nfrom ...utils import logging\nfrom .configuration_wavlm import WavLMConfig\n\n\nlogger = logging.get_logger(__name__)\n\n_CONFIG_FOR_DOC = \"WavLMConfig\"\n_PROCESSOR_FOR_DOC = \"Wav2Vec2Processor\"\n_CHECKPOINT_FOR_DOC = \"patrickvonplaten/wavlm-libri-clean-100h-base-plus\"\n\n_SEQ_CLASS_CHECKPOINT = \"microsoft/wavlm-base\"\n_FEAT_EXTRACTOR_FOR_DOC = \"Wav2Vec2FeatureExtractor\"\n\n_SEQ_CLASS_CHECKPOINT = \"microsoft/wavlm-base-plus\"\n_FRAME_CLASS_CHECKPOINT = \"microsoft/wavlm-base-plus-sd\"\n_XVECTOR_CHECKPOINT = \"microsoft/wavlm-base-plus-sv\"\n\n_HIDDEN_STATES_START_POSITION = 2\n\nWAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"microsoft/wavlm-base\",\n \"microsoft/wavlm-base-plus\",\n \"microsoft/wavlm-large\",\n # See all WavLM models at https://huggingface.co/models?filter=wavlm\n]\n\n\n@dataclass\nclass WavLMBaseModelOutput(ModelOutput):\n \"\"\"\n Output type of [`WavLMBaseModelOutput`], with potential hidden states and attentions.\n\n Args:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n extract_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, conv_dim[-1])`):\n Sequence of extracted feature vectors of the last convolutional layer of the model.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n last_hidden_state: torch.FloatTensor = None\n extract_features: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n@dataclass\nclass XVectorOutput(ModelOutput):\n \"\"\"\n Output type of [`Wav2Vec2ForXVector`].\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`):\n Classification hidden states before AMSoftmax.\n embeddings (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`):\n Utterance embeddings used for vector similarity-based retrieval.\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of\n shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n embeddings: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices\ndef _compute_mask_indices(\n shape: Tuple[int, int],\n mask_prob: float,\n mask_length: int,\n attention_mask: Optional[torch.LongTensor] = None,\n min_masks: int = 0,\n) -> np.ndarray:\n \"\"\"\n Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for\n ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on\n CPU as part of the preprocessing during training.\n\n Args:\n shape: The shape for which to compute masks. This should be of a tuple of size 2 where\n the first element is the batch size and the second element is the length of the axis to span.\n mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of\n independently generated mask spans of length `mask_length` is computed by\n `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the\n actual percentage will be smaller.\n mask_length: size of the mask\n min_masks: minimum number of masked spans\n attention_mask: A (right-padded) attention mask which independently shortens the feature axis of\n each batch dimension.\n \"\"\"\n batch_size, sequence_length = shape\n\n if mask_length < 1:\n raise ValueError(\"`mask_length` has to be bigger than 0.\")\n\n if mask_length > sequence_length:\n raise ValueError(\n f\"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}\"\n f\" and `sequence_length`: {sequence_length}`\"\n )\n\n # epsilon is used for probabilistic rounding\n epsilon = np.random.rand(1).item()\n\n def compute_num_masked_span(input_length):\n \"\"\"Given input length, compute how many spans should be masked\"\"\"\n num_masked_span = int(mask_prob * input_length / mask_length + epsilon)\n num_masked_span = max(num_masked_span, min_masks)\n\n # make sure num masked indices <= sequence_length\n if num_masked_span * mask_length > sequence_length:\n num_masked_span = sequence_length // mask_length\n\n return num_masked_span\n\n # compute number of masked spans in batch\n input_lengths = (\n attention_mask.sum(-1).detach().tolist()\n if attention_mask is not None\n else [sequence_length for _ in range(batch_size)]\n )\n\n # SpecAugment mask to fill\n spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)\n spec_aug_mask_idxs = []\n\n max_num_masked_span = compute_num_masked_span(sequence_length)\n\n if max_num_masked_span == 0:\n return spec_aug_mask\n\n for input_length in input_lengths:\n # compute num of masked spans for this input\n num_masked_span = compute_num_masked_span(input_length)\n\n # get random indices to mask\n spec_aug_mask_idx = np.random.choice(\n np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False\n )\n\n # pick first sampled index that will serve as a dummy index to pad vector\n # to ensure same dimension for all batches due to probabilistic rounding\n # Picking first sample just pads those vectors twice.\n dummy_mask_idx = spec_aug_mask_idx[0]\n\n spec_aug_mask_idx = np.concatenate(\n [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]\n )\n spec_aug_mask_idxs.append(spec_aug_mask_idx)\n\n spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)\n\n # expand masked indices to masked spans\n spec_aug_mask_idxs = np.broadcast_to(\n spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)\n )\n spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)\n\n # add offset to the starting indexes so that that indexes now create a span\n offsets = np.arange(mask_length)[None, None, :]\n offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(\n batch_size, max_num_masked_span * mask_length\n )\n spec_aug_mask_idxs = spec_aug_mask_idxs + offsets\n\n # scatter indices to mask\n np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)\n\n return spec_aug_mask\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->WavLM\nclass WavLMNoLayerNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->WavLM\nclass WavLMLayerNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n\n hidden_states = hidden_states.transpose(-2, -1)\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = hidden_states.transpose(-2, -1)\n\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->WavLM\nclass WavLMGroupNormConvLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1\n self.out_conv_dim = config.conv_dim[layer_id]\n\n self.conv = nn.Conv1d(\n self.in_conv_dim,\n self.out_conv_dim,\n kernel_size=config.conv_kernel[layer_id],\n stride=config.conv_stride[layer_id],\n bias=config.conv_bias,\n )\n self.activation = ACT2FN[config.feat_extract_activation]\n\n self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->WavLM\nclass WavLMPositionalConvEmbedding(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.conv = nn.Conv1d(\n config.hidden_size,\n config.hidden_size,\n kernel_size=config.num_conv_pos_embeddings,\n padding=config.num_conv_pos_embeddings // 2,\n groups=config.num_conv_pos_embedding_groups,\n )\n\n if is_deepspeed_zero3_enabled():\n import deepspeed\n\n with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):\n self.conv = nn.utils.weight_norm(self.conv, name=\"weight\", dim=2)\n deepspeed.zero.register_external_parameter(self, self.conv.weight_v)\n deepspeed.zero.register_external_parameter(self, self.conv.weight_g)\n else:\n self.conv = nn.utils.weight_norm(self.conv, name=\"weight\", dim=2)\n\n self.padding = WavLMSamePadLayer(config.num_conv_pos_embeddings)\n self.activation = ACT2FN[config.feat_extract_activation]\n\n def forward(self, hidden_states):\n hidden_states = hidden_states.transpose(1, 2)\n\n hidden_states = self.conv(hidden_states)\n hidden_states = self.padding(hidden_states)\n hidden_states = self.activation(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->WavLM\nclass WavLMSamePadLayer(nn.Module):\n def __init__(self, num_conv_pos_embeddings):\n super().__init__()\n self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0\n\n def forward(self, hidden_states):\n if self.num_pad_remove > 0:\n hidden_states = hidden_states[:, :, : -self.num_pad_remove]\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureExtractor with Wav2Vec2->WavLM\nclass WavLMFeatureExtractor(nn.Module):\n \"\"\"Construct the features from raw audio waveform\"\"\"\n\n def __init__(self, config):\n super().__init__()\n\n if config.feat_extract_norm == \"group\":\n conv_layers = [WavLMGroupNormConvLayer(config, layer_id=0)] + [\n WavLMNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)\n ]\n elif config.feat_extract_norm == \"layer\":\n conv_layers = [WavLMLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]\n else:\n raise ValueError(\n f\"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']\"\n )\n self.conv_layers = nn.ModuleList(conv_layers)\n self.gradient_checkpointing = False\n self._requires_grad = True\n\n def _freeze_parameters(self):\n for param in self.parameters():\n param.requires_grad = False\n self._requires_grad = False\n\n def forward(self, input_values):\n hidden_states = input_values[:, None]\n\n # make sure hidden_states require grad for gradient_checkpointing\n if self._requires_grad and self.training:\n hidden_states.requires_grad = True\n\n for conv_layer in self.conv_layers:\n if self._requires_grad and self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs)\n\n return custom_forward\n\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(conv_layer),\n hidden_states,\n )\n else:\n hidden_states = conv_layer(hidden_states)\n\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->WavLM\nclass WavLMFeatureProjection(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)\n self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)\n self.dropout = nn.Dropout(config.feat_proj_dropout)\n\n def forward(self, hidden_states):\n # non-projected hidden states are needed for quantization\n norm_hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.projection(norm_hidden_states)\n hidden_states = self.dropout(hidden_states)\n return hidden_states, norm_hidden_states\n\n\nclass WavLMAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n num_heads: int,\n dropout: float = 0.0,\n num_buckets: int = 320,\n max_distance: int = 800,\n has_relative_position_bias: bool = True,\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.head_dim = embed_dim // num_heads\n\n if (self.head_dim * num_heads) != self.embed_dim:\n raise ValueError(\n f\"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}\"\n f\" and `num_heads`: {num_heads}).\"\n )\n self.scaling = self.head_dim ** -0.5\n\n self.k_proj = nn.Linear(embed_dim, embed_dim)\n self.v_proj = nn.Linear(embed_dim, embed_dim)\n self.q_proj = nn.Linear(embed_dim, embed_dim)\n self.out_proj = nn.Linear(embed_dim, embed_dim)\n\n self.num_buckets = num_buckets\n self.max_distance = max_distance\n\n self.gru_rel_pos_const = nn.Parameter(torch.ones(1, self.num_heads, 1, 1))\n self.gru_rel_pos_linear = nn.Linear(self.head_dim, 8)\n\n if has_relative_position_bias:\n self.rel_attn_embed = nn.Embedding(self.num_buckets, self.num_heads)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_bias: Optional[torch.Tensor] = None,\n output_attentions: bool = False,\n index=0,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n \"\"\"Attention layer with relative attention\"\"\"\n bsz, tgt_len, _ = hidden_states.size()\n\n # first pass of attention layer creates position bias\n if position_bias is None:\n position_bias = self.compute_bias(tgt_len, tgt_len)\n position_bias = (\n position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, tgt_len)\n )\n\n # Compute relative position bias:\n # 1) get reshape hidden_states\n gated_hidden_states = hidden_states.view(hidden_states.shape[:-1] + (self.num_heads, -1))\n gated_hidden_states = gated_hidden_states.permute(0, 2, 1, 3)\n\n # 2) project hidden states\n relative_position_proj = self.gru_rel_pos_linear(gated_hidden_states)\n relative_position_proj = relative_position_proj.view(gated_hidden_states.shape[:-1] + (2, 4)).sum(-1)\n\n # 3) compute gate for position bias from projected hidden states\n gate_a, gate_b = torch.sigmoid(relative_position_proj).chunk(2, dim=-1)\n gate_output = gate_a * (gate_b * self.gru_rel_pos_const - 1.0) + 2.0\n\n # 4) apply gate to position bias to compute gated position_bias\n gated_position_bias = gate_output.view(bsz * self.num_heads, -1, 1) * position_bias\n gated_position_bias = gated_position_bias.view((-1, tgt_len, tgt_len))\n\n attn_output, attn_weights = self.torch_multi_head_self_attention(\n hidden_states, attention_mask, gated_position_bias, output_attentions\n )\n\n return attn_output, attn_weights, position_bias\n\n def torch_multi_head_self_attention(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Union[torch.LongTensor, torch.BoolTensor],\n gated_position_bias: torch.FloatTensor,\n output_attentions: bool,\n ) -> (torch.FloatTensor, torch.FloatTensor):\n \"\"\"simple wrapper around torch's multi_head_attention_forward function\"\"\"\n # self-attention assumes q = k = v\n query = key = value = hidden_states.transpose(0, 1)\n key_padding_mask = attention_mask.ne(1) if attention_mask is not None else None\n\n # disable bias and add_zero_attn\n bias_k = bias_v = None\n add_zero_attn = False\n\n # PyTorch 1.3.0 has F.multi_head_attention_forward defined\n # so no problem with backwards compatibility\n attn_output, attn_weights = F.multi_head_attention_forward(\n query,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),\n bias_k,\n bias_v,\n add_zero_attn,\n self.dropout,\n self.out_proj.weight,\n self.out_proj.bias,\n self.training,\n key_padding_mask,\n output_attentions,\n gated_position_bias,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj.weight,\n k_proj_weight=self.k_proj.weight,\n v_proj_weight=self.v_proj.weight,\n )\n\n # [Seq_Len, Batch Size, ...] -> [Batch Size, Seq_Len, ...]\n attn_output = attn_output.transpose(0, 1)\n\n if attn_weights is not None:\n # IMPORTANT: Attention weights are averaged weights\n # here which should not be the case. This is an open issue\n # on PyTorch: https://github.com/pytorch/pytorch/issues/32590\n attn_weights = attn_weights[:, None].broadcast_to(\n attn_weights.shape[:1] + (self.num_heads,) + attn_weights.shape[1:]\n )\n\n return attn_output, attn_weights\n\n def compute_bias(self, query_length: int, key_length: int) -> torch.FloatTensor:\n context_position = torch.arange(query_length, dtype=torch.long)[:, None]\n memory_position = torch.arange(key_length, dtype=torch.long)[None, :]\n relative_position = memory_position - context_position\n relative_position_bucket = self._relative_positions_bucket(relative_position)\n relative_position_bucket = relative_position_bucket.to(self.rel_attn_embed.weight.device)\n values = self.rel_attn_embed(relative_position_bucket)\n values = values.permute([2, 0, 1])\n return values\n\n def _relative_positions_bucket(self, relative_positions: torch.FloatTensor) -> torch.FloatTensor:\n num_buckets = self.num_buckets // 2\n\n relative_buckets = (relative_positions > 0).to(torch.long) * num_buckets\n relative_positions = torch.abs(relative_positions)\n\n max_exact = num_buckets // 2\n is_small = relative_positions < max_exact\n\n relative_positions_if_large = torch.log(relative_positions.float() / max_exact)\n relative_positions_if_large = relative_positions_if_large / math.log(self.max_distance / max_exact)\n relative_positions_if_large = relative_positions_if_large * (num_buckets - max_exact)\n relative_postion_if_large = (max_exact + relative_positions_if_large).to(torch.long)\n relative_postion_if_large = torch.min(\n relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)\n )\n\n relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large)\n return relative_buckets\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->WavLM\nclass WavLMFeedForward(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.intermediate_dropout = nn.Dropout(config.activation_dropout)\n\n self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)\n if isinstance(config.hidden_act, str):\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n else:\n self.intermediate_act_fn = config.hidden_act\n\n self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.output_dropout = nn.Dropout(config.hidden_dropout)\n\n def forward(self, hidden_states):\n hidden_states = self.intermediate_dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n hidden_states = self.intermediate_dropout(hidden_states)\n\n hidden_states = self.output_dense(hidden_states)\n hidden_states = self.output_dropout(hidden_states)\n return hidden_states\n\n\nclass WavLMEncoderLayer(nn.Module):\n def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True):\n super().__init__()\n self.attention = WavLMAttention(\n embed_dim=config.hidden_size,\n num_heads=config.num_attention_heads,\n dropout=config.attention_dropout,\n num_buckets=config.num_buckets,\n max_distance=config.max_bucket_distance,\n has_relative_position_bias=has_relative_position_bias,\n )\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.feed_forward = WavLMFeedForward(config)\n self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False, index=0):\n attn_residual = hidden_states\n hidden_states, attn_weights, position_bias = self.attention(\n hidden_states,\n attention_mask=attention_mask,\n position_bias=position_bias,\n output_attentions=output_attentions,\n index=index,\n )\n hidden_states = self.dropout(hidden_states)\n hidden_states = attn_residual + hidden_states\n\n hidden_states = self.layer_norm(hidden_states)\n\n hidden_states = hidden_states + self.feed_forward(hidden_states)\n hidden_states = self.final_layer_norm(hidden_states)\n\n outputs = (hidden_states, position_bias)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\nclass WavLMEncoderLayerStableLayerNorm(nn.Module):\n def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True):\n super().__init__()\n self.attention = WavLMAttention(\n embed_dim=config.hidden_size,\n num_heads=config.num_attention_heads,\n dropout=config.attention_dropout,\n num_buckets=config.num_buckets,\n max_distance=config.max_bucket_distance,\n has_relative_position_bias=has_relative_position_bias,\n )\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.feed_forward = WavLMFeedForward(config)\n self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False):\n attn_residual = hidden_states\n hidden_states = self.layer_norm(hidden_states)\n hidden_states, attn_weights, position_bias = self.attention(\n hidden_states,\n attention_mask=attention_mask,\n position_bias=position_bias,\n output_attentions=output_attentions,\n )\n hidden_states = self.dropout(hidden_states)\n hidden_states = attn_residual + hidden_states\n hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))\n\n outputs = (hidden_states, position_bias)\n\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n\nclass WavLMEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.pos_conv_embed = WavLMPositionalConvEmbedding(config)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layers = nn.ModuleList(\n [WavLMEncoderLayer(config, has_relative_position_bias=(i == 0)) for i in range(config.num_hidden_layers)]\n )\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n if attention_mask is not None:\n # make sure padded tokens output 0\n hidden_states[~attention_mask] = 0.0\n\n position_embeddings = self.pos_conv_embed(hidden_states)\n hidden_states = hidden_states + position_embeddings\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()\n position_bias = None\n\n for i, layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = np.random.uniform(0, 1)\n\n skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop)\n if not skip_the_layer or deepspeed_zero3_is_enabled:\n # under deepspeed zero3 all gpus must run in sync\n if self.gradient_checkpointing and self.training:\n # create gradient checkpointing function\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer),\n hidden_states,\n attention_mask,\n position_bias,\n )\n else:\n layer_outputs = layer(\n hidden_states,\n attention_mask=attention_mask,\n position_bias=position_bias,\n output_attentions=output_attentions,\n index=i,\n )\n\n hidden_states, position_bias = layer_outputs[:2]\n\n if skip_the_layer:\n layer_outputs = (None, None)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[2],)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\nclass WavLMEncoderStableLayerNorm(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.pos_conv_embed = WavLMPositionalConvEmbedding(config)\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout)\n self.layers = nn.ModuleList(\n [\n WavLMEncoderLayerStableLayerNorm(config, has_relative_position_bias=(i == 0))\n for i in range(config.num_hidden_layers)\n ]\n )\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n ):\n all_hidden_states = () if output_hidden_states else None\n all_self_attentions = () if output_attentions else None\n\n if attention_mask is not None:\n # make sure padded tokens are not attended to\n hidden_states[~attention_mask] = 0\n\n position_embeddings = self.pos_conv_embed(hidden_states)\n hidden_states = hidden_states + position_embeddings\n hidden_states = self.dropout(hidden_states)\n\n deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()\n position_bias = None\n\n for i, layer in enumerate(self.layers):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)\n dropout_probability = np.random.uniform(0, 1)\n\n skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop)\n if not skip_the_layer or deepspeed_zero3_is_enabled:\n # under deepspeed zero3 all gpus must run in sync\n # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication\n if self.gradient_checkpointing and self.training:\n # create gradient checkpointing function\n def create_custom_forward(module):\n def custom_forward(*inputs):\n return module(*inputs, output_attentions)\n\n return custom_forward\n\n layer_outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(layer),\n hidden_states,\n attention_mask,\n position_bias,\n )\n else:\n layer_outputs = layer(\n hidden_states,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n position_bias=position_bias,\n )\n hidden_states, position_bias = layer_outputs[:2]\n\n if skip_the_layer:\n layer_outputs = (None, None)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (layer_outputs[2],)\n\n hidden_states = self.layer_norm(hidden_states)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)\n return BaseModelOutput(\n last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions\n )\n\n\nclass WavLMGumbelVectorQuantizer(nn.Module):\n \"\"\"\n Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH\n GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.num_groups = config.num_codevector_groups\n self.num_vars = config.num_codevectors_per_group\n\n if config.codevector_dim % self.num_groups != 0:\n raise ValueError(\n f\"`config.codevector_dim {config.codevector_dim} must be divisible\"\n f\" by `config.num_codevector_groups` {self.num_groups} \"\n \"for concatenation.\"\n )\n\n # storage for codebook variables (codewords)\n self.codevectors = nn.Parameter(\n torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)\n )\n self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)\n\n # can be decayed for training\n self.temperature = 2\n\n @staticmethod\n def _compute_perplexity(probs):\n marginal_probs = probs.mean(dim=0)\n perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()\n return perplexity\n\n def forward(self, hidden_states):\n batch_size, sequence_length, hidden_size = hidden_states.shape\n\n # project to codevector dim\n hidden_states = self.weight_proj(hidden_states)\n hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)\n\n if self.training:\n # sample code vector probs via gumbel in differentiateable way\n codevector_probs = nn.functional.gumbel_softmax(hidden_states.float(), tau=self.temperature, hard=True)\n codevector_probs = codevector_probs.type_as(hidden_states)\n\n # compute perplexity\n codevector_soft_dist = torch.softmax(\n hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1\n )\n perplexity = self._compute_perplexity(codevector_soft_dist)\n else:\n # take argmax in non-differentiable way\n # comptute hard codevector distribution (one hot)\n codevector_idx = hidden_states.argmax(dim=-1)\n codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(\n -1, codevector_idx.view(-1, 1), 1.0\n )\n codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)\n\n perplexity = self._compute_perplexity(codevector_probs)\n\n codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)\n # use probs to retrieve codevectors\n codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors\n codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)\n codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)\n\n return codevectors, perplexity\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Adapter with Wav2Vec2->WavLM\nclass WavLMAdapter(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n # feature dim might need to be down-projected\n if config.output_hidden_size != config.hidden_size:\n self.proj = nn.Linear(config.hidden_size, config.output_hidden_size)\n self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size)\n else:\n self.proj = self.proj_layer_norm = None\n\n self.layers = nn.ModuleList(WavLMAdapterLayer(config) for _ in range(config.num_adapter_layers))\n self.layerdrop = config.layerdrop\n\n def forward(self, hidden_states):\n # down project hidden_states if necessary\n if self.proj is not None and self.proj_layer_norm is not None:\n hidden_states = self.proj(hidden_states)\n hidden_states = self.proj_layer_norm(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2)\n\n for layer in self.layers:\n layerdrop_prob = np.random.random()\n if not self.training or (layerdrop_prob > self.layerdrop):\n hidden_states = layer(hidden_states)\n\n hidden_states = hidden_states.transpose(1, 2)\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AdapterLayer with Wav2Vec2->WavLM\nclass WavLMAdapterLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.conv = nn.Conv1d(\n config.output_hidden_size,\n 2 * config.output_hidden_size,\n config.adapter_kernel_size,\n stride=config.adapter_stride,\n padding=1,\n )\n\n def forward(self, hidden_states):\n hidden_states = self.conv(hidden_states)\n hidden_states = nn.functional.glu(hidden_states, dim=1)\n\n return hidden_states\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PreTrainedModel with Wav2Vec2->WavLM, wav2vec2->wavlm\nclass WavLMPreTrainedModel(PreTrainedModel):\n \"\"\"\n An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n models.\n \"\"\"\n\n config_class = WavLMConfig\n base_model_prefix = \"wavlm\"\n main_input_name = \"input_values\"\n _keys_to_ignore_on_load_missing = [r\"position_ids\"]\n supports_gradient_checkpointing = True\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n # gumbel softmax requires special init\n if isinstance(module, WavLMGumbelVectorQuantizer):\n module.weight_proj.weight.data.normal_(mean=0.0, std=1)\n module.weight_proj.bias.data.zero_()\n nn.init.uniform_(module.codevectors)\n elif isinstance(module, WavLMPositionalConvEmbedding):\n nn.init.normal_(\n module.conv.weight,\n mean=0,\n std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),\n )\n nn.init.constant_(module.conv.bias, 0)\n elif isinstance(module, WavLMFeatureProjection):\n k = math.sqrt(1 / module.projection.in_features)\n nn.init.uniform_(module.projection.weight, a=-k, b=k)\n nn.init.uniform_(module.projection.bias, a=-k, b=k)\n elif isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n elif isinstance(module, nn.Conv1d):\n nn.init.kaiming_normal_(module.weight)\n\n if module.bias is not None:\n k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))\n nn.init.uniform_(module.bias, a=-k, b=k)\n\n def _get_feat_extract_output_lengths(\n self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None\n ):\n \"\"\"\n Computes the output length of the convolutional layers\n \"\"\"\n\n add_adapter = self.config.add_adapter if add_adapter is None else add_adapter\n\n def _conv_out_length(input_length, kernel_size, stride):\n # 1D convolutional layer output length formula taken\n # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html\n return (input_length - kernel_size) // stride + 1\n\n for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):\n input_lengths = _conv_out_length(input_lengths, kernel_size, stride)\n\n if add_adapter:\n for _ in range(self.config.num_adapter_layers):\n input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride)\n\n return input_lengths\n\n def _get_feature_vector_attention_mask(\n self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None\n ):\n # Effectively attention_mask.sum(-1), but not inplace to be able to run\n # on inference mode.\n non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]\n\n output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter)\n output_lengths = output_lengths.to(torch.long)\n\n batch_size = attention_mask.shape[0]\n\n attention_mask = torch.zeros(\n (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device\n )\n # these two operations makes sure that all values before the output lengths idxs are attended to\n attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1\n attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()\n return attention_mask\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (WavLMEncoder, WavLMEncoderStableLayerNorm, WavLMFeatureExtractor)):\n module.gradient_checkpointing = value\n\n\nWAVLM_START_DOCSTRING = r\"\"\"\n WavLM was proposed in [WavLM: Unified Speech Representation Learning with Labeled and Unlabeled\n Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei,\n Michael Zeng, Xuedong Huang.\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving etc.).\n\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`WavLMConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\n\nWAVLM_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file\n into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install\n soundfile*). To prepare the array into *input_values*, the [`WavLMProcessor`] should be used for padding\n and conversion into a tensor of type *torch.FloatTensor*. See [`WavLMProcessor.__call__`] for details.\n attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,\n 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n <Tip warning={true}>\n\n `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==\n True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should\n **not** be passed to avoid degraded performance when doing batched inference. For such models\n `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these\n models also yield slightly different results depending on whether `input_values` is padded or not.\n\n </Tip>\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare WavLM Model transformer outputting raw hidden-states without any specific head on top.\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM\nclass WavLMModel(WavLMPreTrainedModel):\n def __init__(self, config: WavLMConfig):\n super().__init__(config)\n self.config = config\n self.feature_extractor = WavLMFeatureExtractor(config)\n self.feature_projection = WavLMFeatureProjection(config)\n\n # model only needs masking vector if mask prob is > 0.0\n if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:\n self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())\n\n if config.do_stable_layer_norm:\n self.encoder = WavLMEncoderStableLayerNorm(config)\n else:\n self.encoder = WavLMEncoder(config)\n\n self.adapter = WavLMAdapter(config) if config.add_adapter else None\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature extractor so that its parameters\n will not be updated during training.\n \"\"\"\n self.feature_extractor._freeze_parameters()\n\n def _mask_hidden_states(\n self,\n hidden_states: torch.FloatTensor,\n mask_time_indices: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n ):\n \"\"\"\n Masks extracted features along time axis and/or along feature axis according to\n [SpecAugment](https://arxiv.org/abs/1904.08779).\n \"\"\"\n\n # `config.apply_spec_augment` can set masking to False\n if not getattr(self.config, \"apply_spec_augment\", True):\n return hidden_states\n\n # generate indices & apply SpecAugment along time axis\n batch_size, sequence_length, hidden_size = hidden_states.size()\n\n if mask_time_indices is not None:\n # apply SpecAugment along time axis with given mask_time_indices\n hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)\n elif self.config.mask_time_prob > 0 and self.training:\n mask_time_indices = _compute_mask_indices(\n (batch_size, sequence_length),\n mask_prob=self.config.mask_time_prob,\n mask_length=self.config.mask_time_length,\n attention_mask=attention_mask,\n min_masks=self.config.mask_time_min_masks,\n )\n mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)\n hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)\n\n if self.config.mask_feature_prob > 0 and self.training:\n # generate indices & apply SpecAugment along feature axis\n mask_feature_indices = _compute_mask_indices(\n (batch_size, hidden_size),\n mask_prob=self.config.mask_feature_prob,\n mask_length=self.config.mask_feature_length,\n min_masks=self.config.mask_feature_min_masks,\n )\n mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)\n mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)\n hidden_states[mask_feature_indices] = 0\n\n return hidden_states\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_PROCESSOR_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=WavLMBaseModelOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n )\n def forward(\n self,\n input_values,\n attention_mask=None,\n mask_time_indices=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n extract_features = self.feature_extractor(input_values)\n extract_features = extract_features.transpose(1, 2)\n\n if attention_mask is not None:\n # compute reduced attention_mask corresponding to feature vectors\n attention_mask = self._get_feature_vector_attention_mask(\n extract_features.shape[1], attention_mask, add_adapter=False\n )\n\n hidden_states, extract_features = self.feature_projection(extract_features)\n hidden_states = self._mask_hidden_states(\n hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask\n )\n\n encoder_outputs = self.encoder(\n hidden_states,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = encoder_outputs[0]\n\n if self.adapter is not None:\n hidden_states = self.adapter(hidden_states)\n\n if not return_dict:\n return (hidden_states, extract_features) + encoder_outputs[1:]\n\n return WavLMBaseModelOutput(\n last_hidden_state=hidden_states,\n extract_features=extract_features,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"WavLM Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).\"\"\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM\nclass WavLMForCTC(WavLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.wavlm = WavLMModel(config)\n self.dropout = nn.Dropout(config.final_dropout)\n\n if config.vocab_size is None:\n raise ValueError(\n f\"You are trying to instantiate {self.__class__} with a configuration that \"\n \"does not define the vocabulary size of the language model head. Please \"\n \"instantiate the model as follows: `WavLMForCTC.from_pretrained(..., vocab_size=vocab_size)`. \"\n \"or define `vocab_size` of your model's configuration.\"\n )\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature extractor so that its parameter\n will not be updated during training.\n \"\"\"\n self.wavlm.feature_extractor._freeze_parameters()\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_PROCESSOR_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=CausalLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_values,\n attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n ):\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):\n Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to\n the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.\n All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,\n config.vocab_size - 1]`.\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.wavlm(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n hidden_states = self.dropout(hidden_states)\n\n logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n\n if labels.max() >= self.config.vocab_size:\n raise ValueError(f\"Label values must be <= vocab_size: {self.config.vocab_size}\")\n\n # retrieve loss input_lengths from attention_mask\n attention_mask = (\n attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)\n )\n input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)\n\n # assuming that padded tokens are filled with -100\n # when not being attended to\n labels_mask = labels >= 0\n target_lengths = labels_mask.sum(-1)\n flattened_targets = labels.masked_select(labels_mask)\n\n # ctc_loss doesn't support fp16\n log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)\n\n with torch.backends.cudnn.flags(enabled=False):\n loss = nn.functional.ctc_loss(\n log_probs,\n flattened_targets,\n input_lengths,\n target_lengths,\n blank=self.config.pad_token_id,\n reduction=self.config.ctc_loss_reduction,\n zero_infinity=self.config.ctc_zero_infinity,\n )\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions\n )\n\n\n@add_start_docstrings(\n \"\"\"\n WavLM Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like\n SUPERB Keyword Spotting.\n \"\"\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM\nclass WavLMForSequenceClassification(WavLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.wavlm = WavLMModel(config)\n num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings\n if config.use_weighted_layer_sum:\n self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)\n self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)\n self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature extractor so that its parameters\n will not be updated during training.\n \"\"\"\n self.wavlm.feature_extractor._freeze_parameters()\n\n def freeze_base_model(self):\n \"\"\"\n Calling this function will disable the gradient computation for the base model so that its parameters will not\n be updated during training. Only the classification head will be updated.\n \"\"\"\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_FEAT_EXTRACTOR_FOR_DOC,\n checkpoint=_SEQ_CLASS_CHECKPOINT,\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n )\n def forward(\n self,\n input_values,\n attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n ):\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states\n\n outputs = self.wavlm(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if self.config.use_weighted_layer_sum:\n hidden_states = outputs[_HIDDEN_STATES_START_POSITION]\n hidden_states = torch.stack(hidden_states, dim=1)\n norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)\n hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)\n else:\n hidden_states = outputs[0]\n\n hidden_states = self.projector(hidden_states)\n if attention_mask is None:\n pooled_output = hidden_states.mean(dim=1)\n else:\n padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)\n hidden_states[~padding_mask] = 0.0\n pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)\n\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"\n WavLM Model with a frame classification head on top for tasks like Speaker Diarization.\n \"\"\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM\nclass WavLMForAudioFrameClassification(WavLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.wavlm = WavLMModel(config)\n num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings\n if config.use_weighted_layer_sum:\n self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature extractor so that its parameters\n will not be updated during training.\n \"\"\"\n self.wavlm.feature_extractor._freeze_parameters()\n\n def freeze_base_model(self):\n \"\"\"\n Calling this function will disable the gradient computation for the base model so that its parameters will not\n be updated during training. Only the classification head will be updated.\n \"\"\"\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_FEAT_EXTRACTOR_FOR_DOC,\n checkpoint=_FRAME_CLASS_CHECKPOINT,\n output_type=TokenClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n )\n def forward(\n self,\n input_values,\n attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states\n\n outputs = self.wavlm(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if self.config.use_weighted_layer_sum:\n hidden_states = outputs[_HIDDEN_STATES_START_POSITION]\n hidden_states = torch.stack(hidden_states, dim=1)\n norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)\n hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)\n else:\n hidden_states = outputs[0]\n\n logits = self.classifier(hidden_states)\n\n if not return_dict:\n output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]\n return output\n\n return TokenClassifierOutput(\n loss=None,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.AMSoftmaxLoss\nclass AMSoftmaxLoss(nn.Module):\n def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4):\n super(AMSoftmaxLoss, self).__init__()\n self.scale = scale\n self.margin = margin\n self.num_labels = num_labels\n self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True)\n self.loss = nn.CrossEntropyLoss()\n\n def forward(self, hidden_states, labels):\n labels = labels.flatten()\n weight = nn.functional.normalize(self.weight, dim=0)\n hidden_states = nn.functional.normalize(hidden_states, dim=1)\n cos_theta = torch.mm(hidden_states, weight)\n psi = cos_theta - self.margin\n\n onehot = nn.functional.one_hot(labels, self.num_labels)\n logits = self.scale * torch.where(onehot.bool(), psi, cos_theta)\n loss = self.loss(logits, labels)\n\n return loss\n\n\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.TDNNLayer\nclass TDNNLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id]\n self.out_conv_dim = config.tdnn_dim[layer_id]\n self.kernel_size = config.tdnn_kernel[layer_id]\n self.dilation = config.tdnn_dilation[layer_id]\n\n self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim)\n self.activation = nn.ReLU()\n\n def forward(self, hidden_states):\n hidden_states = hidden_states.unsqueeze(1)\n hidden_states = nn.functional.unfold(\n hidden_states,\n (self.kernel_size, self.in_conv_dim),\n stride=(1, self.in_conv_dim),\n dilation=(self.dilation, 1),\n )\n hidden_states = hidden_states.transpose(1, 2)\n hidden_states = self.kernel(hidden_states)\n\n hidden_states = self.activation(hidden_states)\n return hidden_states\n\n\n@add_start_docstrings(\n \"\"\"\n WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification.\n \"\"\",\n WAVLM_START_DOCSTRING,\n)\n# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM\nclass WavLMForXVector(WavLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.wavlm = WavLMModel(config)\n num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings\n if config.use_weighted_layer_sum:\n self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)\n self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0])\n\n tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))]\n self.tdnn = nn.ModuleList(tdnn_layers)\n\n self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim)\n self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim)\n\n self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels)\n\n self.init_weights()\n\n def freeze_feature_extractor(self):\n \"\"\"\n Calling this function will disable the gradient computation for the feature extractor so that its parameters\n will not be updated during training.\n \"\"\"\n self.wavlm.feature_extractor._freeze_parameters()\n\n def freeze_base_model(self):\n \"\"\"\n Calling this function will disable the gradient computation for the base model so that its parameters will not\n be updated during training. Only the classification head will be updated.\n \"\"\"\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):\n \"\"\"\n Computes the output length of the TDNN layers\n \"\"\"\n\n def _conv_out_length(input_length, kernel_size, stride):\n # 1D convolutional layer output length formula taken\n # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html\n return (input_length - kernel_size) // stride + 1\n\n for kernel_size in self.config.tdnn_kernel:\n input_lengths = _conv_out_length(input_lengths, kernel_size, 1)\n\n return input_lengths\n\n @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_FEAT_EXTRACTOR_FOR_DOC,\n checkpoint=_XVECTOR_CHECKPOINT,\n output_type=XVectorOutput,\n config_class=_CONFIG_FOR_DOC,\n modality=\"audio\",\n )\n def forward(\n self,\n input_values,\n attention_mask=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n labels=None,\n ):\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states\n\n outputs = self.wavlm(\n input_values,\n attention_mask=attention_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if self.config.use_weighted_layer_sum:\n hidden_states = outputs[_HIDDEN_STATES_START_POSITION]\n hidden_states = torch.stack(hidden_states, dim=1)\n norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)\n hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)\n else:\n hidden_states = outputs[0]\n\n hidden_states = self.projector(hidden_states)\n\n for tdnn_layer in self.tdnn:\n hidden_states = tdnn_layer(hidden_states)\n\n # Statistic Pooling\n if attention_mask is None:\n mean_features = hidden_states.mean(dim=1)\n std_features = hidden_states.std(dim=1)\n else:\n feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1))\n tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths)\n mean_features = []\n std_features = []\n for i, length in enumerate(tdnn_output_lengths):\n mean_features.append(hidden_states[i, :length].mean(dim=0))\n std_features.append(hidden_states[i, :length].std(dim=0))\n mean_features = torch.stack(mean_features)\n std_features = torch.stack(std_features)\n statistic_pooling = torch.cat([mean_features, std_features], dim=-1)\n\n output_embeddings = self.feature_extractor(statistic_pooling)\n logits = self.classifier(output_embeddings)\n\n loss = None\n if labels is not None:\n loss = self.objective(logits, labels)\n\n if not return_dict:\n output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:]\n return ((loss,) + output) if loss is not None else output\n\n return XVectorOutput(\n loss=loss,\n logits=logits,\n embeddings=output_embeddings,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n"
] |
[
[
"torch.abs",
"torch.nn.init.uniform_",
"torch.nn.functional.softmax",
"torch.nn.functional.glu",
"torch.zeros",
"torch.cat",
"torch.nn.Embedding",
"torch.FloatTensor",
"torch.where",
"torch.full_like",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.mm",
"torch.ones",
"numpy.arange",
"torch.randn",
"torch.backends.cudnn.flags",
"torch.tensor",
"torch.arange",
"torch.nn.GroupNorm",
"numpy.zeros",
"torch.ones_like",
"torch.sigmoid",
"torch.empty",
"torch.nn.init.constant_",
"numpy.put_along_axis",
"torch.nn.ModuleList",
"torch.nn.Linear",
"torch.log",
"numpy.random.rand",
"torch.nn.Conv1d",
"torch.stack",
"numpy.array",
"torch.nn.functional.ctc_loss",
"torch.nn.functional.normalize",
"numpy.random.random",
"torch.nn.functional.log_softmax",
"torch.nn.utils.weight_norm",
"torch.nn.LayerNorm",
"numpy.ones",
"numpy.random.uniform",
"numpy.broadcast_to",
"torch.nn.functional.one_hot",
"torch.nn.functional.unfold",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] |
matthieumeo/prism
|
[
"4cd00a07b7bae956eb74d57806fa10d9608ab266"
] |
[
"examples/elec_equip.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom prism import SeasonalTrendRegression\nfrom prism import elec_equip_dataset\n\nplt.rcParams['figure.figsize'] = [12, 8]\nplt.rcParams['figure.dpi'] = 100\n\ndata = elec_equip_dataset()\n\nperiod = 1\nsample_times = data.time_decimal.values\nsample_values = data.value.values\nforecast_times = np.linspace(sample_times.min(), sample_times.max(), 4096)\nseasonal_forecast_times = np.linspace(0, period, 1024)\n\nstreg = SeasonalTrendRegression(sample_times=sample_times, sample_values=sample_values, period=period,\n forecast_times=forecast_times, seasonal_forecast_times=seasonal_forecast_times,\n nb_of_knots=(32, 32), spline_orders=(3, 2), penalty_strength=1, penalty_tuning=True,\n test_times=None, test_values=None, robust=True, theta=0.5)\nstreg.plot_data()\nxx, mu = streg.fit(verbose=1)\nmin_values, max_values, samples = streg.sample_credible_region(return_samples=True)\nstreg.summary_plot(min_values=min_values, max_values=max_values)\nstreg.plot_seasonal_component(min_values=min_values['seasonal'], max_values=max_values['seasonal'],\n samples_seasonal=samples['seasonal'])\nstreg.plot_trend_component(min_values=min_values['trend'], max_values=max_values['trend'],\n samples_trend=samples['trend'])\nstreg.plot_sum(min_values=min_values['sum'], max_values=max_values['sum'], samples_sum=samples['sum'])\n\n\n"
] |
[
[
"numpy.linspace"
]
] |
mtoqeerpk/seismic-simulation-complex-media
|
[
"701d3af7ac84120202bc9a741bfdf2320ad06ddc",
"701d3af7ac84120202bc9a741bfdf2320ad06ddc"
] |
[
"autoencoder/main.py",
"autoencoder/losses.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 13 17:04:43 2018\n\n@author: bmoseley\n\"\"\"\n\n\n# This script trains a conditional autoencoder model, given a Constants object which contains\n# all of the training hyperparameters of the model.\n# It defines the loss function, optimiser and training operations used to train the network, \n# as well as the summary statistics used for displaying the results in TensorBoard.\n# This script is the main entry point for training the conditional autoencoder network.\n\n\nimport sys\nimport os\nimport time\n\nimport matplotlib\nif 'linux' in sys.platform.lower(): matplotlib.use('Agg')# use a non-interactive backend (ie plotting without windows)\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nfrom tensorboardX import SummaryWriter\nimport torch\nimport torch.optim\nfrom torch.utils.data import RandomSampler, DataLoader\nfrom mysampler import BatchSampler\nfrom constants import Constants\nimport losses\nfrom torch_utils import get_weights, get_weights_update_percent\n\n\n\n## This needs to be specified - problem dependent\ndef plot_result(inputs_array, outputs_array, labels_array, sample_batch, ib=0, isource=0,\n aspect=0.2):\n \"Plot a network prediction, compare to ground truth and input\"\n f = plt.figure(figsize=(12,5))\n \n # define gain profile for display\n t_gain = np.arange(outputs_array.shape[-1], dtype=np.float32)**2.5\n t_gain = t_gain/np.median(t_gain)\n t_gain = t_gain.reshape((1,1,1,outputs_array.shape[-1]))# along NSTEPS\n \n plt.subplot2grid((1, 4), (0, 0), colspan=2)\n plt.imshow(inputs_array[ib,0,:,:].T, vmin=-1, vmax=1)\n plt.colorbar()\n \n plt.subplot2grid((1, 4), (0, 2), colspan=1)\n plt.imshow((t_gain*outputs_array)[ib,isource,:,:].T,\n aspect=aspect, cmap=\"Greys\", vmin=-1, vmax=1)\n plt.colorbar()\n plt.title(\"%f, %f\"%(np.min(outputs_array),np.max(outputs_array)))\n \n plt.subplot2grid((1, 4), (0, 3), colspan=1)\n plt.imshow((t_gain*labels_array)[ib,isource,:,:].T,\n aspect=aspect, cmap=\"Greys\", vmin=-1, vmax=1)\n plt.colorbar()\n plt.title(\"%s\"%(sample_batch[\"inputs\"][1].detach().cpu().numpy().copy()[ib,:,0,0]))# label with source position\n \n return f\n \n\n\nclass Trainer:\n \"Generic model trainer class\"\n \n def __init__(self, c):\n \"Initialise torch, output directories, training dataset and model\"\n \n \n ## INITIALISE\n \n # set seed\n if c.SEED == None: c.SEED = torch.initial_seed()\n else: torch.manual_seed(c.SEED)# likely independent of numpy\n np.random.seed(c.SEED)\n \n # clear directories\n c.get_outdirs()\n c.save_constants_file()# saves torch seed too\n print(c)\n \n # set device/ threads\n device = torch.device(\"cuda:%i\"%(c.DEVICE) if torch.cuda.is_available() else \"cpu\")\n print(\"Device: %s\"%(device))\n torch.backends.cudnn.benchmark = False#let cudnn find the best algorithm to use for your hardware (not good for dynamic nets)\n torch.set_num_threads(1)# for main inference\n \n print(\"Main thread ID: %i\"%os.getpid())\n print(\"Number of CPU threads: \", torch.get_num_threads())\n print(\"Torch seed: \", torch.initial_seed())\n \n # initialise summary writer\n writer = SummaryWriter(c.SUMMARY_OUT_DIR)\n\n\n ### DEFINE TRAIN/TEST DATASETS\n \n # split dataset 80:20\n irange = np.arange(0, c.N_EXAMPLES)\n np.random.shuffle(irange)# randomly shuffle the indicies (in place) before splitting. To get diversity in train/test split.\n traindataset = c.DATASET(c,\n irange=irange[0:(8*c.N_EXAMPLES//10)],\n verbose=True)\n testdataset = c.DATASET(c,\n irange=irange[(8*c.N_EXAMPLES//10):c.N_EXAMPLES],\n verbose=True)\n assert len(set(traindataset.irange).intersection(testdataset.irange)) == 0# make sure examples aren't shared!\n \n #### DEFINE MODEL\n \n model = c.MODEL(c)\n \n # load previous weights\n if c.MODEL_LOAD_PATH != None:\n cp = torch.load(c.MODEL_LOAD_PATH,\n map_location=torch.device('cpu'))# remap tensors from gpu to cpu if needed\n model.load_state_dict(cp['model_state_dict'])\n ioffset = cp[\"i\"]\n print(\"Loaded model weights from: %s\"%(c.MODEL_LOAD_PATH))\n else: ioffset = 0\n \n # print out parameters\n #writer.add_graph(model, torch.zeros((1,)+c.VELOCITY_SHAPE))# write graph before placing on GPU\n print()\n print(\"Model: %s\"%(model.name))\n total_params = sum(p.numel() for p in model.parameters())\n total_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(\"Total number of parameters: %i\"%(total_params))\n print(\"Total number of trainable parameters: %i\"%(total_trainable_params))\n #for p in model.parameters(): print(p.size(), p.numel())\n \n model.to(device)\n\n self.c, self.device, self.writer = c, device, writer\n self.traindataset, self.testdataset = traindataset, testdataset\n self.model, self.ioffset = model, ioffset\n\n def train(self):\n \"train model\"\n \n c, device, writer = self.c, self.device, self.writer\n traindataset, testdataset = self.traindataset, self.testdataset\n model, ioffset = self.model, self.ioffset\n \n ### TRAIN\n \n print()\n print(\"Training..\")\n \n N_BATCHES = len(traindataset)//c.BATCH_SIZE\n N_EPOCHS = int(np.ceil(c.N_STEPS/N_BATCHES))\n \n # below uses my own batch sampler so that dataloader iterators run over n_epochs\n # also uses dataset.initialise_file_reader method to open a file handle in each worker process, instead of a shared one on the main thread\n # DataLoader essentially iterates through iter(batch_sampler) or iter(sampler) depending on inputs\n # calling worker_init in each worker process\n trainloader = DataLoader(traindataset,\n batch_sampler=BatchSampler(RandomSampler(traindataset, replacement=True),# randomly sample with replacement\n batch_size=c.BATCH_SIZE,\n drop_last=True,\n n_epochs=1),\n worker_init_fn=traindataset.initialise_worker_fn,\n num_workers=c.N_CPU_WORKERS,# num_workers = spawns multiprocessing subprocess workers\n timeout=300)# timeout after 5 mins of no data loading\n \n testloader = DataLoader(testdataset,\n batch_sampler=BatchSampler(RandomSampler(testdataset, replacement=True),# randomly sample with replacement\n batch_size=c.BATCH_SIZE,\n drop_last=True,\n n_epochs=N_EPOCHS),\n worker_init_fn=testdataset.initialise_worker_fn,\n num_workers=1,# num_workers = spawns multiprocessing subprocess workers\n timeout=300)# timeout after 5 mins of no data loading\n \n testloader_iterator = iter(testloader)\n trainloader_iterator = iter(trainloader)\n #assert len(trainloader_iterator) == N_EPOCHS * N_BATCHES\n \n #optimizer = torch.optim.SGD(model.parameters(), lr=c.LRATE, momentum=0.9)\n optimizer = torch.optim.Adam(model.parameters(), lr=c.LRATE, weight_decay=c.WEIGHT_DECAY)\n \n start0 = start1 = time.time(); w1 = get_weights(model)\n for ie in range(N_EPOCHS): # loop over the dataset multiple times\n \n wait_start, wait_time, gpu_time, gpu_utilisation = time.time(), 0., 0., 0.\n for ib in range(N_BATCHES):\n i = ioffset + ie*N_BATCHES+ib\n \n try:# get next sample_batch\n sample_batch = next(trainloader_iterator)\n except StopIteration:# restart iterator\n del trainloader_iterator\n trainloader_iterator = iter(trainloader)# re-initiates batch/sampler iterators, with new random starts\n sample_batch = next(trainloader_iterator)\n \n #sample_batch = next(trainloader_iterator)\n #if ib == 0: print(sample_batch[\"i\"])# check\n \n wait_time += time.time()-wait_start\n \n \n ## TRAIN\n \n gpu_start = time.time()\n \n model.train()# switch to train mode (for dropout/ batch norm layers)\n \n # get the data\n inputs = sample_batch[\"inputs\"]# expects list of inputs\n labels = sample_batch[\"labels\"]# expects list of labels\n inputs = [inp.to(device) for inp in inputs]\n labels = [lab.to(device) for lab in labels]\n \n # zero the parameter gradients AT EACH STEP\n optimizer.zero_grad()# zeros all parameter gradient buffers\n \n # forward + backward + optimize\n outputs = model(*inputs)# expect tuple of outputs\n loss = c.LOSS(*labels, *outputs, c)# note loss is on cuda if labels/ outputs on cuda\n loss.backward()# updates all gradients in model\n optimizer.step()# updates all parameters using their gradients\n \n gpu_time += time.time()-gpu_start\n \n ## TRAIN STATISTICS\n \n if (i + 1) % 100 == 0:\n gpu_utilisation = 100*gpu_time/(wait_time+gpu_time)\n print(\"Wait time average: %.4f s GPU time average: %.4f s GPU util: %.2f %% device: %i\"%(wait_time/100, gpu_time/100, gpu_utilisation, c.DEVICE))\n gpu_time, wait_time = 0.,0.\n \n if (i + 1) % c.SUMMARY_FREQ == 0:\n \n rate = c.SUMMARY_FREQ/(time.time()-start1)\n \n with torch.no_grad():# faster inference without tracking\n \n model.eval()\n \n # get example outputs and losses\n inputs = sample_batch[\"inputs\"]# expects list of inputs\n labels = sample_batch[\"labels\"]# expects list of labels\n inputs = [inp.to(device) for inp in inputs]\n labels = [lab.to(device) for lab in labels]\n outputs = model(*inputs)\n \n l1loss = losses.l1_mean_loss(labels[0], outputs[0]).item()\n l2loss = losses.l2_mean_loss(labels[0], outputs[0]).item()\n\n writer.add_scalar(\"loss/l1_loss/train\", l1loss, i + 1)\n writer.add_scalar(\"loss/l2_loss/train\", l2loss, i + 1)\n \n inputs_array = inputs[0].detach().cpu().numpy().copy()# detach returns a new tensor, detached from the current graph\n outputs_array = outputs[0].detach().cpu().numpy().copy()\n labels_array = labels[0].detach().cpu().numpy().copy()\n if (i + 1) % (10 * c.SUMMARY_FREQ) == 0:\n f = plot_result(inputs_array, outputs_array, labels_array, sample_batch)\n writer.add_figure(\"compare/train\", f, i + 1, close=True)\n \n # check weight updates from previous summary\n w2 = get_weights(model)\n mu, _, av = get_weights_update_percent(w1, w2)\n s = \"Weight updates (%.1f %% average): \"%(100*av)\n for m in mu: s+=\"%.1f \"%(100*m)\n print(s)\n del w1; w1 = w2\n\n # add run statistics\n writer.add_scalar(\"stats/epoch\", ie, i + 1)\n writer.add_scalar(\"stats/rate/batch\", rate, i + 1)\n writer.add_scalar(\"stats/rate/gpu_utilisation\", gpu_utilisation, i + 1)\n \n # output to screen\n print('[epoch: %i/%i, batch: %i/%i i: %i] l2loss: %.4f rate: %.1f elapsed: %.2f hr %s %s' % (\n ie + 1,\n N_EPOCHS,\n ib + 1, \n N_BATCHES, \n i + 1,\n l2loss,\n rate,\n (time.time()-start0)/(60*60),\n time.strftime(\"%Y-%m-%d %H:%M:%S\",time.gmtime()),\n c.RUN\n ))\n \n start1 = time.time()\n \n ## TEST STATISTICS\n \n if (i + 1) % c.TEST_FREQ == 0:\n \n with torch.no_grad():# faster inference without tracking\n \n try:# get next sample_batch\n sample_batch = next(testloader_iterator)\n except StopIteration:# restart iterator\n del testloader_iterator\n testloader_iterator = iter(testloader)# re-initiates batch/sampler iterators, with new random starts\n sample_batch = next(testloader_iterator)\n #print(sample_batch[\"i\"])# check\n \n model.eval()\n \n # get example outputs and losses\n inputs = sample_batch[\"inputs\"]# expects list of inputs\n labels = sample_batch[\"labels\"]# expects list of labels\n inputs = [inp.to(device) for inp in inputs]\n labels = [lab.to(device) for lab in labels]\n outputs = model(*inputs)\n \n l1loss = losses.l1_mean_loss(labels[0], outputs[0]).item()\n l2loss = losses.l2_mean_loss(labels[0], outputs[0]).item()\n \n writer.add_scalar(\"loss/l1_loss/test\", l1loss, i + 1)\n writer.add_scalar(\"loss/l2_loss/test\", l2loss, i + 1)\n \n inputs_array = inputs[0].detach().cpu().numpy().copy()# detach returns a new tensor, detached from the current graph\n outputs_array = outputs[0].detach().cpu().numpy().copy()\n labels_array = labels[0].detach().cpu().numpy().copy()\n if (i + 1) % (10 * c.TEST_FREQ) == 0:\n f = plot_result(inputs_array, outputs_array, labels_array, sample_batch)\n writer.add_figure(\"compare/test\", f, i + 1, close=True)\n \n ## SAVE\n \n if (i + 1) % c.MODEL_SAVE_FREQ == 0:\n \n model.eval()\n \n model.to(torch.device('cpu'))# put model on cpu before saving\n # to avoid out-of-memory error\n \n # save a checkpoint\n torch.save({\n 'i': i + 1,\n 'model_state_dict': model.state_dict(),\n }, c.MODEL_OUT_DIR+\"model_%.8i.torch\"%(i + 1))\n \n model.to(device)\n \n wait_start = time.time()\n \n \n del trainloader_iterator, testloader_iterator\n \n print('Finished Training (total runtime: %.1f hrs)'%(\n (time.time()-start0)/(60*60)))\n \n def close(self):\n self.writer.close()\n \n\nif __name__ == \"__main__\":\n\n \n import models\n \n #cs = [Constants(),]\n \n DEVICE = 7\n \n #cs = [Constants(RUN=\"fault_cae\",\n # DEVICE=DEVICE,\n # ),]\n \n #cs = [Constants(RUN=\"fault_cae2\",\n # DEVICE=DEVICE,\n # ),]\n \n #cs = [Constants(RUN=\"fault_cae_seed\",\n # DEVICE=DEVICE,\n # SEED=1234,\n # ),]\n \n #cs = [Constants(RUN=\"fault_cae_l2\",\n # LOSS=losses.l2_mean_loss_gain,\n # DEVICE=DEVICE,\n # ),]\n \n #cs = [Constants(RUN=\"fault_cae_gain0\",\n # T_GAIN=0,\n # DEVICE=DEVICE,\n # ),]\n \n #cs = [Constants(RUN=\"fault_cae_gain5\",\n # T_GAIN=5,\n # DEVICE=DEVICE,\n # ),]\n \n #cs = [Constants(RUN=\"fault_cae_shallow\",\n # MODEL=models.AE_shallow_r,\n # DEVICE=DEVICE,\n # ),]\n \n cs = [Constants(RUN=\"fault_cae_narrow\",\n MODEL=models.AE_narrow_r,\n DEVICE=DEVICE,\n ),]\n '''\n cs = [Constants(RUN=\"fault_cae3\",\n DEVICE=DEVICE,\n ),]\n '''\n for c in cs:\n run = Trainer(c)\n run.train()\n run.close()\n ",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 20 16:57:42 2018\n\n@author: bmoseley\n\"\"\"\n\n\n# This module defines various loss functions in pytorch.\n\n\nimport numpy as np\nimport torch\n\n\n# Note: trainer passes arguments in the following order: loss(*labels, *outputs, constants)\n\ndef l2_mean_loss(a,b, c=None):\n return torch.mean((a-b)**2)\n\ndef l2_sum_loss(a,b, c=None):\n return torch.sum((a-b)**2)\n\ndef l1_mean_loss(a,b, c=None):\n return torch.mean(torch.abs(a - b))\n\ndef l1_sum_loss(a,b, c=None):\n return torch.sum(torch.abs(a - b))\n\n\ndef l1_mean_loss_gain(a,b, c=None):\n # apply loss with ^T_GAIN gain profile\n device = torch.device(\"cuda:%i\"%(c.DEVICE) if torch.cuda.is_available() else \"cpu\")\n \n t_gain = np.arange(c.GATHER_SHAPE[2], dtype=np.float32)**c.T_GAIN\n t_gain = t_gain/np.median(t_gain)\n t_gain = t_gain.reshape((1,1,1,c.GATHER_SHAPE[2]))# along NSTEPS\n t_gain = torch.from_numpy(t_gain).to(device)\n g = t_gain*(a-b)\n return torch.mean(torch.abs(g))\n\n\ndef l2_mean_loss_gain(a,b, c=None):\n # apply loss with ^T_GAIN gain profile\n device = torch.device(\"cuda:%i\"%(c.DEVICE) if torch.cuda.is_available() else \"cpu\")\n \n t_gain = np.arange(c.GATHER_SHAPE[2], dtype=np.float32)**c.T_GAIN\n t_gain = t_gain/np.median(t_gain)\n t_gain = t_gain.reshape((1,1,1,c.GATHER_SHAPE[2]))# along NSTEPS\n t_gain = torch.from_numpy(t_gain).to(device)\n g = t_gain*(a-b)\n return torch.mean(g**2)\n\n\nif __name__ == \"__main__\":\n \n a = torch.ones((10,20))\n b = torch.ones((10,20)).mul_(0.1)# (in place)\n \n print(l2_sum_loss(a,b))\n \n print(l2_mean_loss(a,b))\n print(l1_mean_loss(a,b))"
] |
[
[
"matplotlib.pyplot.imshow",
"numpy.max",
"torch.set_num_threads",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"torch.get_num_threads",
"matplotlib.pyplot.subplot2grid",
"torch.initial_seed",
"numpy.arange",
"numpy.ceil",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.median",
"numpy.random.seed",
"matplotlib.use",
"torch.manual_seed",
"torch.utils.data.RandomSampler",
"numpy.random.shuffle",
"matplotlib.pyplot.colorbar"
],
[
"torch.mean",
"torch.abs",
"torch.ones",
"numpy.arange",
"numpy.median",
"torch.sum",
"torch.from_numpy",
"torch.cuda.is_available"
]
] |
eanorambuena/Driver
|
[
"3cb14f5d741c6bae364326305ae0ded04e10e9d4"
] |
[
"example.py"
] |
[
"from egg.resources.structures import *\n\nx = Image()\nx.load(\"example.jpg\")\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.imshow(np.array(x.matrix))\n\nM = [[255,0,0,255,255,255,0,255], [0,0,0,255,255,255,0,255], [0,255,0,255,0,255,0,255], [0,255,255,255,255,255,0,255], [0,255,255,255,255,255,0,255], [0,0,255,255,255,0,0,255], [0,0,255,255,0,0,0,255], [0,0,255,255,0,0,0,255]]\n\nmatriz = Image()\nmatriz.loadFromBW(M)\nmatriz.printRGB()\n\njj = [[[242, 80, 34], [127, 186, 0]], [[0, 164, 239], [255, 185, 0]]]\nm = Image()\nm.loadFromRGB(jj)\nm.printRGB()\nm.bias = [255, 0, 0]\nm.printRGB()\n\nx = Image()\nx.load(\"example.jpg\")\nx.printRGB()\nx.printBW()\n\n#m.save(\"micro.jpg\") # Not working yet"
] |
[
[
"numpy.array"
]
] |
dptam/neural_wfst
|
[
"c0cd3e340a45c13fbe47310753432d496c20eb83"
] |
[
"src/python/transducer_data.py"
] |
[
"'''\n| Filename : transducer_data.py\n| Description : Functions that return the data fe to the transducer.\n| Author : Pushpendre Rastogi\n| Created : Tue Dec 8 17:50:51 2015 (-0500)\n| Last-Updated: Thu Dec 31 01:08:44 2015 (-0500)\n| By: Pushpendre Rastogi\n| Update #: 22\n'''\nimport codecs\nimport numpy\nimport string\nimport rasengan\nimport util_lstm_seqlabel\nimport warnings\nimport functools\n\nBOS_CHAR = '^'\ndef read_data(file_name):\n \"\"\"\n Helper function\n \"\"\"\n lst = []\n with codecs.open(file_name, 'r', encoding='utf8') as f:\n for line in f:\n line = line.strip()\n if line == '':\n continue\n (one, two, *rest) = line.split(\"\\t\")\n lst.append((one, two))\n return lst\n\ndef numerize(lst, Sigma, win):\n \" Takes the string-valued training data and interns it \"\n lst_prime = []\n bos_idx = len(Sigma)\n for one, two in lst:\n one_prime = numpy.asarray(\n util_lstm_seqlabel.conv_x(\n [Sigma[x] for x in one], win, bos_idx),\n dtype=numpy.int32)\n two_prime = numpy.asarray(\n [Sigma[x] for x in two],\n dtype=numpy.int32)\n lst_prime.append((one_prime, two_prime))\n return lst_prime\n\ndef int2str(lst, Sigma_inv):\n \" Converts a list of integers to a string \"\n _string = \"\"\n for x in lst:\n _string += Sigma_inv[x]\n return _string\n\ndef get_lst_char(data_tuple_list):\n lst_char = list(set(functools.reduce(\n lambda x, y: x + y[0] + y[1], data_tuple_list, '')))\n for e in list(set(string.ascii_letters.lower())):\n e = str(e)\n if e not in lst_char:\n lst_char.append(e)\n if(BOS_CHAR in lst_char):\n lst_char.remove(BOS_CHAR)\n assert BOS_CHAR not in lst_char\n lst_char.insert(0, BOS_CHAR)\n return lst_char\n\ndef add_bos(data_tuple_list):\n '''\n The BOS_CHAR is added to the left portion of the data, that is transduced\n so that my LSTM can produce (1 + length) dimensional tensor, which is then\n used by the cython transducer.\n '''\n return [(BOS_CHAR + a, b) for a,b in data_tuple_list]\n\ndef main(args):\n with rasengan.debug_support():\n with rasengan.tictoc(\"Loading Data\"):\n data_list = rasengan.namespacer(\n read_data(args.train_fn))\n val_data_list = rasengan.namespacer(\n read_data(args.dev_fn))\n if args.partition_dev_into_train > 0:\n lim = args.partition_dev_into_test\n data_list.extend(val_data_list[lim:])\n val_data_list = val_data_list[:lim]\n if args.partition_dev_into_test > 0:\n lim = args.partition_dev_into_test\n test_data_list = val_data_list[lim:]\n val_data_list = val_data_list[:lim]\n else:\n test_data_list = rasengan.namespacer(\n read_data(args.test_fn))\n # data_list = val_data_list = [(u'jason', u'eisner')]\n lst_char = get_lst_char(data_list\n + val_data_list\n + test_data_list)\n data_list = add_bos(data_list)\n val_data_list = add_bos(val_data_list)\n test_data_list = add_bos(test_data_list)\n # warnings.warn('''\n # NOTE: While preparing sigma, we add 1 to the index\n # returned by enumerate because the transducer unit that\n # Ryan wrote uses index 0 as the index for the epsilon\n # symbol. So essentially the epsilon symbol and the\n # integer 0 are reserved symbols that cannot appear in the\n # vocabulary.\n\n # ALSO, we need to add 1 to the vocsize because of that.\n # ''')\n # sigma :: char -> int\n sigma = dict((b, a+1) for (a,b) in enumerate(lst_char))\n # sigma_inv :: int -> char\n sigma_inv = dict((a+1, b) for (a,b) in enumerate(lst_char))\n if args.limit_corpus > 0:\n data_list = data_list[:args.limit_corpus]\n train_data = numerize(data_list, sigma, args.win)\n val_data = numerize(val_data_list, sigma, args.win)\n test_data = numerize(test_data_list, sigma, args.win)\n data = rasengan.Namespace()\n #-------------------------------------------------------------#\n # Add sets that would be used by the tensorflow seq2seq #\n # model. See~$PY/tensorflow/models/rnn/translate/translate.py #\n #-------------------------------------------------------------#\n data.train_data = data_list\n data.val_data = val_data_list\n data.test_data = test_data_list\n\n data.train_set = train_data\n data.dev_set = val_data\n data.test_set = test_data\n\n data.vocsize = len(sigma) + 1\n data.idx2label = sigma_inv\n data.label2idx = sigma\n print(\"J\")\n data.train_lex = [e[0] for e in train_data]\n data.train_y = [e[1] for e in train_data]\n print(\"K\")\n data.valid_lex = [e[0] for e in val_data]\n data.valid_y = util_lstm_seqlabel.convert_id_to_word(\n [e[1] for e in val_data], data.idx2label)\n print(\"L\")\n data.test_lex = [e[0] for e in test_data]\n data.test_y = util_lstm_seqlabel.convert_id_to_word(\n [e[1] for e in test_data], data.idx2label)\n\n data.words_train = []\n data.words_valid = []\n data.words_test = []\n return data\n"
] |
[
[
"numpy.asarray"
]
] |
mspacek/numpy
|
[
"645b9f572f0a22e9049fd736b8b91427be2c8402"
] |
[
"numpy/ma/tests/test_core.py"
] |
[
"# pylint: disable-msg=W0401,W0511,W0611,W0612,W0614,R0201,E1102\n\"\"\"Tests suite for MaskedArray & subclassing.\n\n:author: Pierre Gerard-Marchant\n:contact: pierregm_at_uga_dot_edu\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\n__author__ = \"Pierre GF Gerard-Marchant\"\n\nimport warnings\nimport sys\nimport pickle\nfrom functools import reduce\n\nimport numpy as np\nimport numpy.ma.core\nimport numpy.core.fromnumeric as fromnumeric\nfrom numpy import ndarray\nfrom numpy.ma.testutils import *\nfrom numpy.ma.core import *\nfrom numpy.compat import asbytes, asbytes_nested\n\npi = np.pi\n\n\n#..............................................................................\nclass TestMaskedArray(TestCase):\n \"Base test class for MaskedArrays.\"\n\n def setUp (self):\n \"Base data definition.\"\n x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.])\n y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])\n a10 = 10.\n m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1]\n xm = masked_array(x, mask=m1)\n ym = masked_array(y, mask=m2)\n z = np.array([-.5, 0., .5, .8])\n zm = masked_array(z, mask=[0, 1, 0, 0])\n xf = np.where(m1, 1e+20, x)\n xm.set_fill_value(1e+20)\n self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)\n\n\n def test_basicattributes(self):\n \"Tests some basic array attributes.\"\n a = array([1, 3, 2])\n b = array([1, 3, 2], mask=[1, 0, 1])\n assert_equal(a.ndim, 1)\n assert_equal(b.ndim, 1)\n assert_equal(a.size, 3)\n assert_equal(b.size, 3)\n assert_equal(a.shape, (3,))\n assert_equal(b.shape, (3,))\n\n\n def test_basic0d(self):\n \"Checks masking a scalar\"\n x = masked_array(0)\n assert_equal(str(x), '0')\n x = masked_array(0, mask=True)\n assert_equal(str(x), str(masked_print_option))\n x = masked_array(0, mask=False)\n assert_equal(str(x), '0')\n x = array(0, mask=1)\n self.assertTrue(x.filled().dtype is x._data.dtype)\n\n def test_basic1d(self):\n \"Test of basic array creation and properties in 1 dimension.\"\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n self.assertTrue(not isMaskedArray(x))\n self.assertTrue(isMaskedArray(xm))\n self.assertTrue((xm - ym).filled(0).any())\n fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))\n s = x.shape\n assert_equal(np.shape(xm), s)\n assert_equal(xm.shape, s)\n assert_equal(xm.dtype, x.dtype)\n assert_equal(zm.dtype, z.dtype)\n assert_equal(xm.size , reduce(lambda x, y:x * y, s))\n assert_equal(count(xm) , len(m1) - reduce(lambda x, y:x + y, m1))\n assert_array_equal(xm, xf)\n assert_array_equal(filled(xm, 1.e20), xf)\n assert_array_equal(x, xm)\n\n\n def test_basic2d(self):\n \"Test of basic array creation and properties in 2 dimensions.\"\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n for s in [(4, 3), (6, 2)]:\n x.shape = s\n y.shape = s\n xm.shape = s\n ym.shape = s\n xf.shape = s\n #\n self.assertTrue(not isMaskedArray(x))\n self.assertTrue(isMaskedArray(xm))\n assert_equal(shape(xm), s)\n assert_equal(xm.shape, s)\n assert_equal(xm.size , reduce(lambda x, y:x * y, s))\n assert_equal(count(xm) , len(m1) - reduce(lambda x, y:x + y, m1))\n assert_equal(xm, xf)\n assert_equal(filled(xm, 1.e20), xf)\n assert_equal(x, xm)\n\n def test_concatenate_basic(self):\n \"Tests concatenations.\"\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n # basic concatenation\n assert_equal(np.concatenate((x, y)), concatenate((xm, ym)))\n assert_equal(np.concatenate((x, y)), concatenate((x, y)))\n assert_equal(np.concatenate((x, y)), concatenate((xm, y)))\n assert_equal(np.concatenate((x, y, x)), concatenate((x, ym, x)))\n\n def test_concatenate_alongaxis(self):\n \"Tests concatenations.\"\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n # Concatenation along an axis\n s = (3, 4)\n x.shape = y.shape = xm.shape = ym.shape = s\n assert_equal(xm.mask, np.reshape(m1, s))\n assert_equal(ym.mask, np.reshape(m2, s))\n xmym = concatenate((xm, ym), 1)\n assert_equal(np.concatenate((x, y), 1), xmym)\n assert_equal(np.concatenate((xm.mask, ym.mask), 1), xmym._mask)\n #\n x = zeros(2)\n y = array(ones(2), mask=[False, True])\n z = concatenate((x, y))\n assert_array_equal(z, [0, 0, 1, 1])\n assert_array_equal(z.mask, [False, False, False, True])\n z = concatenate((y, x))\n assert_array_equal(z, [1, 1, 0, 0])\n assert_array_equal(z.mask, [False, True, False, False])\n\n def test_concatenate_flexible(self):\n \"Tests the concatenation on flexible arrays.\"\n data = masked_array(list(zip(np.random.rand(10),\n np.arange(10))),\n dtype=[('a', float), ('b', int)])\n #\n test = concatenate([data[:5], data[5:]])\n assert_equal_records(test, data)\n\n def test_creation_ndmin(self):\n \"Check the use of ndmin\"\n x = array([1, 2, 3], mask=[1, 0, 0], ndmin=2)\n assert_equal(x.shape, (1, 3))\n assert_equal(x._data, [[1, 2, 3]])\n assert_equal(x._mask, [[1, 0, 0]])\n\n def test_creation_ndmin_from_maskedarray(self):\n \"Make sure we're not losing the original mask w/ ndmin\"\n x = array([1, 2, 3])\n x[-1] = masked\n xx = array(x, ndmin=2, dtype=float)\n assert_equal(x.shape, x._mask.shape)\n assert_equal(xx.shape, xx._mask.shape)\n\n def test_creation_maskcreation(self):\n \"Tests how masks are initialized at the creation of Maskedarrays.\"\n data = arange(24, dtype=float)\n data[[3, 6, 15]] = masked\n dma_1 = MaskedArray(data)\n assert_equal(dma_1.mask, data.mask)\n dma_2 = MaskedArray(dma_1)\n assert_equal(dma_2.mask, dma_1.mask)\n dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)\n fail_if_equal(dma_3.mask, dma_1.mask)\n\n def test_creation_with_list_of_maskedarrays(self):\n \"Tests creaating a masked array from alist of masked arrays.\"\n x = array(np.arange(5), mask=[1, 0, 0, 0, 0])\n data = array((x, x[::-1]))\n assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])\n assert_equal(data._mask, [[1, 0, 0, 0, 0], [0, 0, 0, 0, 1]])\n #\n x.mask = nomask\n data = array((x, x[::-1]))\n assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])\n self.assertTrue(data.mask is nomask)\n\n def test_asarray(self):\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n xm.fill_value = -9999\n xm._hardmask = True\n xmm = asarray(xm)\n assert_equal(xmm._data, xm._data)\n assert_equal(xmm._mask, xm._mask)\n assert_equal(xmm.fill_value, xm.fill_value)\n assert_equal(xmm._hardmask, xm._hardmask)\n\n def test_fix_invalid(self):\n \"Checks fix_invalid.\"\n with np.errstate():\n np.seterr(invalid='ignore')\n data = masked_array([np.nan, 0., 1.], mask=[0, 0, 1])\n data_fixed = fix_invalid(data)\n assert_equal(data_fixed._data, [data.fill_value, 0., 1.])\n assert_equal(data_fixed._mask, [1., 0., 1.])\n\n def test_maskedelement(self):\n \"Test of masked element\"\n x = arange(6)\n x[1] = masked\n self.assertTrue(str(masked) == '--')\n self.assertTrue(x[1] is masked)\n assert_equal(filled(x[1], 0), 0)\n # don't know why these should raise an exception...\n #self.assertRaises(Exception, lambda x,y: x+y, masked, masked)\n #self.assertRaises(Exception, lambda x,y: x+y, masked, 2)\n #self.assertRaises(Exception, lambda x,y: x+y, masked, xx)\n #self.assertRaises(Exception, lambda x,y: x+y, xx, masked)\n\n def test_set_element_as_object(self):\n \"\"\"Tests setting elements with object\"\"\"\n a = empty(1, dtype=object)\n x = (1, 2, 3, 4, 5)\n a[0] = x\n assert_equal(a[0], x)\n self.assertTrue(a[0] is x)\n #\n import datetime\n dt = datetime.datetime.now()\n a[0] = dt\n self.assertTrue(a[0] is dt)\n\n\n def test_indexing(self):\n \"Tests conversions and indexing\"\n x1 = np.array([1, 2, 4, 3])\n x2 = array(x1, mask=[1, 0, 0, 0])\n x3 = array(x1, mask=[0, 1, 0, 1])\n x4 = array(x1)\n # test conversion to strings\n junk, garbage = str(x2), repr(x2)\n assert_equal(np.sort(x1), sort(x2, endwith=False))\n # tests of indexing\n assert_(type(x2[1]) is type(x1[1]))\n assert_(x1[1] == x2[1])\n assert_(x2[0] is masked)\n assert_equal(x1[2], x2[2])\n assert_equal(x1[2:5], x2[2:5])\n assert_equal(x1[:], x2[:])\n assert_equal(x1[1:], x3[1:])\n x1[2] = 9\n x2[2] = 9\n assert_equal(x1, x2)\n x1[1:3] = 99\n x2[1:3] = 99\n assert_equal(x1, x2)\n x2[1] = masked\n assert_equal(x1, x2)\n x2[1:3] = masked\n assert_equal(x1, x2)\n x2[:] = x1\n x2[1] = masked\n assert_(allequal(getmask(x2), array([0, 1, 0, 0])))\n x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])\n assert_(allequal(getmask(x3), array([0, 1, 1, 0])))\n x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])\n assert_(allequal(getmask(x4), array([0, 1, 1, 0])))\n assert_(allequal(x4, array([1, 2, 3, 4])))\n x1 = np.arange(5) * 1.0\n x2 = masked_values(x1, 3.0)\n assert_equal(x1, x2)\n assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))\n assert_equal(3.0, x2.fill_value)\n x1 = array([1, 'hello', 2, 3], object)\n x2 = np.array([1, 'hello', 2, 3], object)\n s1 = x1[1]\n s2 = x2[1]\n assert_equal(type(s2), str)\n assert_equal(type(s1), str)\n assert_equal(s1, s2)\n assert_(x1[1:1].shape == (0,))\n\n\n def test_copy(self):\n \"Tests of some subtle points of copying and sizing.\"\n n = [0, 0, 1, 0, 0]\n m = make_mask(n)\n m2 = make_mask(m)\n self.assertTrue(m is m2)\n m3 = make_mask(m, copy=1)\n self.assertTrue(m is not m3)\n\n x1 = np.arange(5)\n y1 = array(x1, mask=m)\n #self.assertTrue( y1._data is x1)\n assert_equal(y1._data.__array_interface__, x1.__array_interface__)\n self.assertTrue(allequal(x1, y1.data))\n #self.assertTrue( y1.mask is m)\n assert_equal(y1._mask.__array_interface__, m.__array_interface__)\n\n y1a = array(y1)\n self.assertTrue(y1a._data.__array_interface__ == y1._data.__array_interface__)\n self.assertTrue(y1a.mask is y1.mask)\n\n y2 = array(x1, mask=m)\n self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__)\n #self.assertTrue( y2.mask is m)\n self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__)\n self.assertTrue(y2[2] is masked)\n y2[2] = 9\n self.assertTrue(y2[2] is not masked)\n #self.assertTrue( y2.mask is not m)\n self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__)\n self.assertTrue(allequal(y2.mask, 0))\n\n y3 = array(x1 * 1.0, mask=m)\n self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)\n\n x4 = arange(4)\n x4[2] = masked\n y4 = resize(x4, (8,))\n assert_equal(concatenate([x4, x4]), y4)\n assert_equal(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])\n y5 = repeat(x4, (2, 2, 2, 2), axis=0)\n assert_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])\n y6 = repeat(x4, 2, axis=0)\n assert_equal(y5, y6)\n y7 = x4.repeat((2, 2, 2, 2), axis=0)\n assert_equal(y5, y7)\n y8 = x4.repeat(2, 0)\n assert_equal(y5, y8)\n\n y9 = x4.copy()\n assert_equal(y9._data, x4._data)\n assert_equal(y9._mask, x4._mask)\n #\n x = masked_array([1, 2, 3], mask=[0, 1, 0])\n # Copy is False by default\n y = masked_array(x)\n assert_equal(y._data.ctypes.data, x._data.ctypes.data)\n assert_equal(y._mask.ctypes.data, x._mask.ctypes.data)\n y = masked_array(x, copy=True)\n assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)\n assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)\n\n\n def test_deepcopy(self):\n from copy import deepcopy\n a = array([0, 1, 2], mask=[False, True, False])\n copied = deepcopy(a)\n assert_equal(copied.mask, a.mask)\n assert_not_equal(id(a._mask), id(copied._mask))\n #\n copied[1] = 1\n assert_equal(copied.mask, [0, 0, 0])\n assert_equal(a.mask, [0, 1, 0])\n #\n copied = deepcopy(a)\n assert_equal(copied.mask, a.mask)\n copied.mask[1] = False\n assert_equal(copied.mask, [0, 0, 0])\n assert_equal(a.mask, [0, 1, 0])\n\n\n def test_pickling(self):\n \"Tests pickling\"\n a = arange(10)\n a[::3] = masked\n a.fill_value = 999\n a_pickled = pickle.loads(a.dumps())\n assert_equal(a_pickled._mask, a._mask)\n assert_equal(a_pickled._data, a._data)\n assert_equal(a_pickled.fill_value, 999)\n\n def test_pickling_subbaseclass(self):\n \"Test pickling w/ a subclass of ndarray\"\n a = array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)\n a_pickled = pickle.loads(a.dumps())\n assert_equal(a_pickled._mask, a._mask)\n assert_equal(a_pickled, a)\n self.assertTrue(isinstance(a_pickled._data, np.matrix))\n\n def test_pickling_maskedconstant(self):\n \"Test pickling MaskedConstant\"\n\n mc = np.ma.masked\n mc_pickled = pickle.loads(mc.dumps())\n assert_equal(mc_pickled._baseclass, mc._baseclass)\n assert_equal(mc_pickled._mask, mc._mask)\n assert_equal(mc_pickled._data, mc._data)\n\n def test_pickling_wstructured(self):\n \"Tests pickling w/ structured array\"\n a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],\n dtype=[('a', int), ('b', float)])\n a_pickled = pickle.loads(a.dumps())\n assert_equal(a_pickled._mask, a._mask)\n assert_equal(a_pickled, a)\n\n def test_pickling_keepalignment(self):\n \"Tests pickling w/ F_CONTIGUOUS arrays\"\n a = arange(10)\n a.shape = (-1, 2)\n b = a.T\n test = pickle.loads(pickle.dumps(b))\n assert_equal(test, b)\n\n\n def test_single_element_subscript(self):\n \"Tests single element subscripts of Maskedarrays.\"\n a = array([1, 3, 2])\n b = array([1, 3, 2], mask=[1, 0, 1])\n assert_equal(a[0].shape, ())\n assert_equal(b[0].shape, ())\n assert_equal(b[1].shape, ())\n\n\n def test_topython(self):\n \"Tests some communication issues with Python.\"\n assert_equal(1, int(array(1)))\n assert_equal(1.0, float(array(1)))\n assert_equal(1, int(array([[[1]]])))\n assert_equal(1.0, float(array([[1]])))\n self.assertRaises(TypeError, float, array([1, 1]))\n #\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', UserWarning)\n assert_(np.isnan(float(array([1], mask=[1]))))\n #\n a = array([1, 2, 3], mask=[1, 0, 0])\n self.assertRaises(TypeError, lambda:float(a))\n assert_equal(float(a[-1]), 3.)\n self.assertTrue(np.isnan(float(a[0])))\n self.assertRaises(TypeError, int, a)\n assert_equal(int(a[-1]), 3)\n self.assertRaises(MAError, lambda:int(a[0]))\n\n\n def test_oddfeatures_1(self):\n \"Test of other odd features\"\n x = arange(20)\n x = x.reshape(4, 5)\n x.flat[5] = 12\n assert_(x[1, 0] == 12)\n z = x + 10j * x\n assert_equal(z.real, x)\n assert_equal(z.imag, 10 * x)\n assert_equal((z * conjugate(z)).real, 101 * x * x)\n z.imag[...] = 0.0\n #\n x = arange(10)\n x[3] = masked\n assert_(str(x[3]) == str(masked))\n c = x >= 8\n assert_(count(where(c, masked, masked)) == 0)\n assert_(shape(where(c, masked, masked)) == c.shape)\n #\n z = masked_where(c, x)\n assert_(z.dtype is x.dtype)\n assert_(z[3] is masked)\n assert_(z[4] is not masked)\n assert_(z[7] is not masked)\n assert_(z[8] is masked)\n assert_(z[9] is masked)\n assert_equal(x, z)\n\n\n def test_oddfeatures_2(self):\n \"Tests some more features.\"\n x = array([1., 2., 3., 4., 5.])\n c = array([1, 1, 1, 0, 0])\n x[2] = masked\n z = where(c, x, -x)\n assert_equal(z, [1., 2., 0., -4., -5])\n c[0] = masked\n z = where(c, x, -x)\n assert_equal(z, [1., 2., 0., -4., -5])\n assert_(z[0] is masked)\n assert_(z[1] is not masked)\n assert_(z[2] is masked)\n\n\n def test_oddfeatures_3(self):\n \"\"\"Tests some generic features.\"\"\"\n atest = array([10], mask=True)\n btest = array([20])\n idx = atest.mask\n atest[idx] = btest[idx]\n assert_equal(atest, [20])\n\n\n def test_filled_w_flexible_dtype(self):\n \"Test filled w/ flexible dtype\"\n flexi = array([(1, 1, 1)],\n dtype=[('i', int), ('s', '|S8'), ('f', float)])\n flexi[0] = masked\n assert_equal(flexi.filled(),\n np.array([(default_fill_value(0),\n default_fill_value('0'),\n default_fill_value(0.),)], dtype=flexi.dtype))\n flexi[0] = masked\n assert_equal(flexi.filled(1),\n np.array([(1, '1', 1.)], dtype=flexi.dtype))\n\n def test_filled_w_mvoid(self):\n \"Test filled w/ mvoid\"\n ndtype = [('a', int), ('b', float)]\n a = mvoid((1, 2.), mask=[(0, 1)], dtype=ndtype)\n # Filled using default\n test = a.filled()\n assert_equal(tuple(test), (1, default_fill_value(1.)))\n # Explicit fill_value\n test = a.filled((-1, -1))\n assert_equal(tuple(test), (1, -1))\n # Using predefined filling values\n a.fill_value = (-999, -999)\n assert_equal(tuple(a.filled()), (1, -999))\n\n\n def test_filled_w_nested_dtype(self):\n \"Test filled w/ nested dtype\"\n ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]\n a = array([(1, (1, 1)), (2, (2, 2))],\n mask=[(0, (1, 0)), (0, (0, 1))], dtype=ndtype)\n test = a.filled(0)\n control = np.array([(1, (0, 1)), (2, (2, 0))], dtype=ndtype)\n assert_equal(test, control)\n #\n test = a['B'].filled(0)\n control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)\n assert_equal(test, control)\n\n\n def test_filled_w_f_order(self):\n \"Test filled w/ F-contiguous array\"\n a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),\n mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),\n order='F') # this is currently ignored\n self.assertTrue(a.flags['F_CONTIGUOUS'])\n self.assertTrue(a.filled(0).flags['F_CONTIGUOUS'])\n\n\n def test_optinfo_propagation(self):\n \"Checks that _optinfo dictionary isn't back-propagated\"\n x = array([1, 2, 3, ], dtype=float)\n x._optinfo['info'] = '???'\n y = x.copy()\n assert_equal(y._optinfo['info'], '???')\n y._optinfo['info'] = '!!!'\n assert_equal(x._optinfo['info'], '???')\n\n\n def test_fancy_printoptions(self):\n \"Test printing a masked array w/ fancy dtype.\"\n fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])\n test = array([(1, (2, 3.0)), (4, (5, 6.0))],\n mask=[(1, (0, 1)), (0, (1, 0))],\n dtype=fancydtype)\n control = \"[(--, (2, --)) (4, (--, 6.0))]\"\n assert_equal(str(test), control)\n\n\n def test_flatten_structured_array(self):\n \"Test flatten_structured_array on arrays\"\n # On ndarray\n ndtype = [('a', int), ('b', float)]\n a = np.array([(1, 1), (2, 2)], dtype=ndtype)\n test = flatten_structured_array(a)\n control = np.array([[1., 1.], [2., 2.]], dtype=np.float)\n assert_equal(test, control)\n assert_equal(test.dtype, control.dtype)\n # On masked_array\n a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)\n test = flatten_structured_array(a)\n control = array([[1., 1.], [2., 2.]],\n mask=[[0, 1], [1, 0]], dtype=np.float)\n assert_equal(test, control)\n assert_equal(test.dtype, control.dtype)\n assert_equal(test.mask, control.mask)\n # On masked array with nested structure\n ndtype = [('a', int), ('b', [('ba', int), ('bb', float)])]\n a = array([(1, (1, 1.1)), (2, (2, 2.2))],\n mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)\n test = flatten_structured_array(a)\n control = array([[1., 1., 1.1], [2., 2., 2.2]],\n mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float)\n assert_equal(test, control)\n assert_equal(test.dtype, control.dtype)\n assert_equal(test.mask, control.mask)\n # Keeping the initial shape\n ndtype = [('a', int), ('b', float)]\n a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)\n test = flatten_structured_array(a)\n control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float)\n assert_equal(test, control)\n assert_equal(test.dtype, control.dtype)\n\n\n\n def test_void0d(self):\n \"Test creating a mvoid object\"\n ndtype = [('a', int), ('b', int)]\n a = np.array([(1, 2,)], dtype=ndtype)[0]\n f = mvoid(a)\n assert_(isinstance(f, mvoid))\n #\n a = masked_array([(1, 2)], mask=[(1, 0)], dtype=ndtype)[0]\n assert_(isinstance(a, mvoid))\n #\n a = masked_array([(1, 2), (1, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)\n f = mvoid(a._data[0], a._mask[0])\n assert_(isinstance(f, mvoid))\n\n def test_mvoid_getitem(self):\n \"Test mvoid.__getitem__\"\n ndtype = [('a', int), ('b', int)]\n a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], dtype=ndtype)\n # w/o mask\n f = a[0]\n self.assertTrue(isinstance(f, mvoid))\n assert_equal((f[0], f['a']), (1, 1))\n assert_equal(f['b'], 2)\n # w/ mask\n f = a[1]\n self.assertTrue(isinstance(f, mvoid))\n self.assertTrue(f[0] is masked)\n self.assertTrue(f['a'] is masked)\n assert_equal(f[1], 4)\n\n def test_mvoid_iter(self):\n \"Test iteration on __getitem__\"\n ndtype = [('a', int), ('b', int)]\n a = masked_array([(1, 2,), (3, 4)], mask=[(0, 0), (1, 0)], dtype=ndtype)\n # w/o mask\n assert_equal(list(a[0]), [1, 2])\n # w/ mask\n assert_equal(list(a[1]), [masked, 4])\n\n def test_mvoid_print(self):\n \"Test printing a mvoid\"\n mx = array([(1, 1), (2, 2)], dtype=[('a', int), ('b', int)])\n assert_equal(str(mx[0]), \"(1, 1)\")\n mx['b'][0] = masked\n ini_display = masked_print_option._display\n masked_print_option.set_display(\"-X-\")\n try:\n assert_equal(str(mx[0]), \"(1, -X-)\")\n assert_equal(repr(mx[0]), \"(1, -X-)\")\n finally:\n masked_print_option.set_display(ini_display)\n\n#------------------------------------------------------------------------------\n\nclass TestMaskedArrayArithmetic(TestCase):\n \"Base test class for MaskedArrays.\"\n\n def setUp (self):\n \"Base data definition.\"\n x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.])\n y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])\n a10 = 10.\n m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1]\n xm = masked_array(x, mask=m1)\n ym = masked_array(y, mask=m2)\n z = np.array([-.5, 0., .5, .8])\n zm = masked_array(z, mask=[0, 1, 0, 0])\n xf = np.where(m1, 1e+20, x)\n xm.set_fill_value(1e+20)\n self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf)\n self.err_status = np.geterr()\n np.seterr(divide='ignore', invalid='ignore')\n\n def tearDown(self):\n np.seterr(**self.err_status)\n\n def test_basic_arithmetic (self):\n \"Test of basic arithmetic.\"\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n a2d = array([[1, 2], [0, 4]])\n a2dm = masked_array(a2d, [[0, 0], [1, 0]])\n assert_equal(a2d * a2d, a2d * a2dm)\n assert_equal(a2d + a2d, a2d + a2dm)\n assert_equal(a2d - a2d, a2d - a2dm)\n for s in [(12,), (4, 3), (2, 6)]:\n x = x.reshape(s)\n y = y.reshape(s)\n xm = xm.reshape(s)\n ym = ym.reshape(s)\n xf = xf.reshape(s)\n assert_equal(-x, -xm)\n assert_equal(x + y, xm + ym)\n assert_equal(x - y, xm - ym)\n assert_equal(x * y, xm * ym)\n assert_equal(x / y, xm / ym)\n assert_equal(a10 + y, a10 + ym)\n assert_equal(a10 - y, a10 - ym)\n assert_equal(a10 * y, a10 * ym)\n assert_equal(a10 / y, a10 / ym)\n assert_equal(x + a10, xm + a10)\n assert_equal(x - a10, xm - a10)\n assert_equal(x * a10, xm * a10)\n assert_equal(x / a10, xm / a10)\n assert_equal(x ** 2, xm ** 2)\n assert_equal(abs(x) ** 2.5, abs(xm) ** 2.5)\n assert_equal(x ** y, xm ** ym)\n assert_equal(np.add(x, y), add(xm, ym))\n assert_equal(np.subtract(x, y), subtract(xm, ym))\n assert_equal(np.multiply(x, y), multiply(xm, ym))\n assert_equal(np.divide(x, y), divide(xm, ym))\n\n\n def test_divide_on_different_shapes(self):\n x = arange(6, dtype=float)\n x.shape = (2, 3)\n y = arange(3, dtype=float)\n #\n z = x / y\n assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])\n assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])\n #\n z = x / y[None, :]\n assert_equal(z, [[-1., 1., 1.], [-1., 4., 2.5]])\n assert_equal(z.mask, [[1, 0, 0], [1, 0, 0]])\n #\n y = arange(2, dtype=float)\n z = x / y[:, None]\n assert_equal(z, [[-1., -1., -1.], [3., 4., 5.]])\n assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])\n\n\n def test_mixed_arithmetic(self):\n \"Tests mixed arithmetics.\"\n na = np.array([1])\n ma = array([1])\n self.assertTrue(isinstance(na + ma, MaskedArray))\n self.assertTrue(isinstance(ma + na, MaskedArray))\n\n\n def test_limits_arithmetic(self):\n tiny = np.finfo(float).tiny\n a = array([tiny, 1. / tiny, 0.])\n assert_equal(getmaskarray(a / 2), [0, 0, 0])\n assert_equal(getmaskarray(2 / a), [1, 0, 1])\n\n\n def test_masked_singleton_arithmetic(self):\n \"Tests some scalar arithmetics on MaskedArrays.\"\n # Masked singleton should remain masked no matter what\n xm = array(0, mask=1)\n self.assertTrue((1 / array(0)).mask)\n self.assertTrue((1 + xm).mask)\n self.assertTrue((-xm).mask)\n self.assertTrue(maximum(xm, xm).mask)\n self.assertTrue(minimum(xm, xm).mask)\n\n\n def test_masked_singleton_equality(self):\n \"Tests (in)equality on masked snigleton\"\n a = array([1, 2, 3], mask=[1, 1, 0])\n assert_((a[0] == 0) is masked)\n assert_((a[0] != 0) is masked)\n assert_equal((a[-1] == 0), False)\n assert_equal((a[-1] != 0), True)\n\n\n def test_arithmetic_with_masked_singleton(self):\n \"Checks that there's no collapsing to masked\"\n x = masked_array([1, 2])\n y = x * masked\n assert_equal(y.shape, x.shape)\n assert_equal(y._mask, [True, True])\n y = x[0] * masked\n assert_(y is masked)\n y = x + masked\n assert_equal(y.shape, x.shape)\n assert_equal(y._mask, [True, True])\n\n\n def test_arithmetic_with_masked_singleton_on_1d_singleton(self):\n \"Check that we're not losing the shape of a singleton\"\n x = masked_array([1, ])\n y = x + masked\n assert_equal(y.shape, x.shape)\n assert_equal(y.mask, [True, ])\n\n\n def test_scalar_arithmetic(self):\n x = array(0, mask=0)\n assert_equal(x.filled().ctypes.data, x.ctypes.data)\n # Make sure we don't lose the shape in some circumstances\n xm = array((0, 0)) / 0.\n assert_equal(xm.shape, (2,))\n assert_equal(xm.mask, [1, 1])\n\n\n def test_basic_ufuncs (self):\n \"Test various functions such as sin, cos.\"\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n assert_equal(np.cos(x), cos(xm))\n assert_equal(np.cosh(x), cosh(xm))\n assert_equal(np.sin(x), sin(xm))\n assert_equal(np.sinh(x), sinh(xm))\n assert_equal(np.tan(x), tan(xm))\n assert_equal(np.tanh(x), tanh(xm))\n assert_equal(np.sqrt(abs(x)), sqrt(xm))\n assert_equal(np.log(abs(x)), log(xm))\n assert_equal(np.log10(abs(x)), log10(xm))\n assert_equal(np.exp(x), exp(xm))\n assert_equal(np.arcsin(z), arcsin(zm))\n assert_equal(np.arccos(z), arccos(zm))\n assert_equal(np.arctan(z), arctan(zm))\n assert_equal(np.arctan2(x, y), arctan2(xm, ym))\n assert_equal(np.absolute(x), absolute(xm))\n assert_equal(np.angle(x + 1j*y), angle(xm + 1j*ym))\n assert_equal(np.angle(x + 1j*y, deg=True), angle(xm + 1j*ym, deg=True))\n assert_equal(np.equal(x, y), equal(xm, ym))\n assert_equal(np.not_equal(x, y), not_equal(xm, ym))\n assert_equal(np.less(x, y), less(xm, ym))\n assert_equal(np.greater(x, y), greater(xm, ym))\n assert_equal(np.less_equal(x, y), less_equal(xm, ym))\n assert_equal(np.greater_equal(x, y), greater_equal(xm, ym))\n assert_equal(np.conjugate(x), conjugate(xm))\n\n\n def test_count_func (self):\n \"Tests count\"\n ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])\n if sys.version_info[0] >= 3:\n self.assertTrue(isinstance(count(ott), np.integer))\n else:\n self.assertTrue(isinstance(count(ott), int))\n assert_equal(3, count(ott))\n assert_equal(1, count(1))\n assert_equal(0, array(1, mask=[1]))\n ott = ott.reshape((2, 2))\n assert_(isinstance(count(ott, 0), ndarray))\n if sys.version_info[0] >= 3:\n assert_(isinstance(count(ott), np.integer))\n else:\n assert_(isinstance(count(ott), int))\n assert_equal(3, count(ott))\n assert_(getmask(count(ott, 0)) is nomask)\n assert_equal([1, 2], count(ott, 0))\n\n\n def test_minmax_func (self):\n \"Tests minimum and maximum.\"\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n xr = np.ravel(x) #max doesn't work if shaped\n xmr = ravel(xm)\n assert_equal(max(xr), maximum(xmr)) #true because of careful selection of data\n assert_equal(min(xr), minimum(xmr)) #true because of careful selection of data\n #\n assert_equal(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3])\n assert_equal(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9])\n x = arange(5)\n y = arange(5) - 2\n x[3] = masked\n y[0] = masked\n assert_equal(minimum(x, y), where(less(x, y), x, y))\n assert_equal(maximum(x, y), where(greater(x, y), x, y))\n assert_(minimum(x) == 0)\n assert_(maximum(x) == 4)\n #\n x = arange(4).reshape(2, 2)\n x[-1, -1] = masked\n assert_equal(maximum(x), 2)\n\n\n def test_minimummaximum_func(self):\n a = np.ones((2, 2))\n aminimum = minimum(a, a)\n self.assertTrue(isinstance(aminimum, MaskedArray))\n assert_equal(aminimum, np.minimum(a, a))\n #\n aminimum = minimum.outer(a, a)\n self.assertTrue(isinstance(aminimum, MaskedArray))\n assert_equal(aminimum, np.minimum.outer(a, a))\n #\n amaximum = maximum(a, a)\n self.assertTrue(isinstance(amaximum, MaskedArray))\n assert_equal(amaximum, np.maximum(a, a))\n #\n amaximum = maximum.outer(a, a)\n self.assertTrue(isinstance(amaximum, MaskedArray))\n assert_equal(amaximum, np.maximum.outer(a, a))\n\n\n def test_minmax_reduce(self):\n \"Test np.min/maximum.reduce on array w/ full False mask\"\n a = array([1, 2, 3], mask=[False, False, False])\n b = np.maximum.reduce(a)\n assert_equal(b, 3)\n\n def test_minmax_funcs_with_output(self):\n \"Tests the min/max functions with explicit outputs\"\n mask = np.random.rand(12).round()\n xm = array(np.random.uniform(0, 10, 12), mask=mask)\n xm.shape = (3, 4)\n for funcname in ('min', 'max'):\n # Initialize\n npfunc = getattr(np, funcname)\n mafunc = getattr(numpy.ma.core, funcname)\n # Use the np version\n nout = np.empty((4,), dtype=int)\n try:\n result = npfunc(xm, axis=0, out=nout)\n except MaskError:\n pass\n nout = np.empty((4,), dtype=float)\n result = npfunc(xm, axis=0, out=nout)\n self.assertTrue(result is nout)\n # Use the ma version\n nout.fill(-999)\n result = mafunc(xm, axis=0, out=nout)\n self.assertTrue(result is nout)\n\n\n def test_minmax_methods(self):\n \"Additional tests on max/min\"\n (_, _, _, _, _, xm, _, _, _, _) = self.d\n xm.shape = (xm.size,)\n assert_equal(xm.max(), 10)\n self.assertTrue(xm[0].max() is masked)\n self.assertTrue(xm[0].max(0) is masked)\n self.assertTrue(xm[0].max(-1) is masked)\n assert_equal(xm.min(), -10.)\n self.assertTrue(xm[0].min() is masked)\n self.assertTrue(xm[0].min(0) is masked)\n self.assertTrue(xm[0].min(-1) is masked)\n assert_equal(xm.ptp(), 20.)\n self.assertTrue(xm[0].ptp() is masked)\n self.assertTrue(xm[0].ptp(0) is masked)\n self.assertTrue(xm[0].ptp(-1) is masked)\n #\n x = array([1, 2, 3], mask=True)\n self.assertTrue(x.min() is masked)\n self.assertTrue(x.max() is masked)\n self.assertTrue(x.ptp() is masked)\n\n\n def test_addsumprod (self):\n \"Tests add, sum, product.\"\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n assert_equal(np.add.reduce(x), add.reduce(x))\n assert_equal(np.add.accumulate(x), add.accumulate(x))\n assert_equal(4, sum(array(4), axis=0))\n assert_equal(4, sum(array(4), axis=0))\n assert_equal(np.sum(x, axis=0), sum(x, axis=0))\n assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))\n assert_equal(np.sum(x, 0), sum(x, 0))\n assert_equal(np.product(x, axis=0), product(x, axis=0))\n assert_equal(np.product(x, 0), product(x, 0))\n assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))\n s = (3, 4)\n x.shape = y.shape = xm.shape = ym.shape = s\n if len(s) > 1:\n assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))\n assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))\n assert_equal(np.sum(x, 1), sum(x, 1))\n assert_equal(np.product(x, 1), product(x, 1))\n\n\n def test_binops_d2D(self):\n \"Test binary operations on 2D data\"\n a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])\n b = array([[2., 3.], [4., 5.], [6., 7.]])\n #\n test = a * b\n control = array([[2., 3.], [2., 2.], [3., 3.]],\n mask=[[0, 0], [1, 1], [1, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n #\n test = b * a\n control = array([[2., 3.], [4., 5.], [6., 7.]],\n mask=[[0, 0], [1, 1], [1, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n #\n a = array([[1.], [2.], [3.]])\n b = array([[2., 3.], [4., 5.], [6., 7.]],\n mask=[[0, 0], [0, 0], [0, 1]])\n test = a * b\n control = array([[2, 3], [8, 10], [18, 3]],\n mask=[[0, 0], [0, 0], [0, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n #\n test = b * a\n control = array([[2, 3], [8, 10], [18, 7]],\n mask=[[0, 0], [0, 0], [0, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n\n\n def test_domained_binops_d2D(self):\n \"Test domained binary operations on 2D data\"\n a = array([[1.], [2.], [3.]], mask=[[False], [True], [True]])\n b = array([[2., 3.], [4., 5.], [6., 7.]])\n #\n test = a / b\n control = array([[1. / 2., 1. / 3.], [2., 2.], [3., 3.]],\n mask=[[0, 0], [1, 1], [1, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n #\n test = b / a\n control = array([[2. / 1., 3. / 1.], [4., 5.], [6., 7.]],\n mask=[[0, 0], [1, 1], [1, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n #\n a = array([[1.], [2.], [3.]])\n b = array([[2., 3.], [4., 5.], [6., 7.]],\n mask=[[0, 0], [0, 0], [0, 1]])\n test = a / b\n control = array([[1. / 2, 1. / 3], [2. / 4, 2. / 5], [3. / 6, 3]],\n mask=[[0, 0], [0, 0], [0, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n #\n test = b / a\n control = array([[2 / 1., 3 / 1.], [4 / 2., 5 / 2.], [6 / 3., 7]],\n mask=[[0, 0], [0, 0], [0, 1]])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n\n\n def test_noshrinking(self):\n \"Check that we don't shrink a mask when not wanted\"\n # Binary operations\n a = masked_array([1., 2., 3.], mask=[False, False, False], shrink=False)\n b = a + 1\n assert_equal(b.mask, [0, 0, 0])\n # In place binary operation\n a += 1\n assert_equal(a.mask, [0, 0, 0])\n # Domained binary operation\n b = a / 1.\n assert_equal(b.mask, [0, 0, 0])\n # In place binary operation\n a /= 1.\n assert_equal(a.mask, [0, 0, 0])\n\n\n def test_mod(self):\n \"Tests mod\"\n (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d\n assert_equal(mod(x, y), mod(xm, ym))\n test = mod(ym, xm)\n assert_equal(test, np.mod(ym, xm))\n assert_equal(test.mask, mask_or(xm.mask, ym.mask))\n test = mod(xm, ym)\n assert_equal(test, np.mod(xm, ym))\n assert_equal(test.mask, mask_or(mask_or(xm.mask, ym.mask), (ym == 0)))\n\n def test_TakeTransposeInnerOuter(self):\n \"Test of take, transpose, inner, outer products\"\n x = arange(24)\n y = np.arange(24)\n x[5:6] = masked\n x = x.reshape(2, 3, 4)\n y = y.reshape(2, 3, 4)\n assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))\n assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))\n assert_equal(np.inner(filled(x, 0), filled(y, 0)),\n inner(x, y))\n assert_equal(np.outer(filled(x, 0), filled(y, 0)),\n outer(x, y))\n y = array(['abc', 1, 'def', 2, 3], object)\n y[2] = masked\n t = take(y, [0, 3, 4])\n assert_(t[0] == 'abc')\n assert_(t[1] == 2)\n assert_(t[2] == 3)\n\n\n def test_imag_real(self):\n \"Check complex\"\n xx = array([1 + 10j, 20 + 2j], mask=[1, 0])\n assert_equal(xx.imag, [10, 2])\n assert_equal(xx.imag.filled(), [1e+20, 2])\n assert_equal(xx.imag.dtype, xx._data.imag.dtype)\n assert_equal(xx.real, [1, 20])\n assert_equal(xx.real.filled(), [1e+20, 20])\n assert_equal(xx.real.dtype, xx._data.real.dtype)\n\n\n def test_methods_with_output(self):\n xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)\n xm[:, 0] = xm[0] = xm[-1, -1] = masked\n #\n funclist = ('sum', 'prod', 'var', 'std', 'max', 'min', 'ptp', 'mean',)\n #\n for funcname in funclist:\n npfunc = getattr(np, funcname)\n xmmeth = getattr(xm, funcname)\n # A ndarray as explicit input\n output = np.empty(4, dtype=float)\n output.fill(-9999)\n result = npfunc(xm, axis=0, out=output)\n # ... the result should be the given output\n assert_(result is output)\n assert_equal(result, xmmeth(axis=0, out=output))\n #\n output = empty(4, dtype=int)\n result = xmmeth(axis=0, out=output)\n assert_(result is output)\n assert_(output[0] is masked)\n\n\n def test_eq_on_structured(self):\n \"Test the equality of structured arrays\"\n ndtype = [('A', int), ('B', int)]\n a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)\n test = (a == a)\n assert_equal(test, [True, True])\n assert_equal(test.mask, [False, False])\n b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)\n test = (a == b)\n assert_equal(test, [False, True])\n assert_equal(test.mask, [True, False])\n b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)\n test = (a == b)\n assert_equal(test, [True, False])\n assert_equal(test.mask, [False, False])\n\n\n def test_ne_on_structured(self):\n \"Test the equality of structured arrays\"\n ndtype = [('A', int), ('B', int)]\n a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)\n test = (a != a)\n assert_equal(test, [False, False])\n assert_equal(test.mask, [False, False])\n b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)\n test = (a != b)\n assert_equal(test, [True, False])\n assert_equal(test.mask, [True, False])\n b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)\n test = (a != b)\n assert_equal(test, [False, True])\n assert_equal(test.mask, [False, False])\n\n\n def test_eq_w_None(self):\n # With partial mask\n a = array([1, 2], mask=[0, 1])\n assert_equal(a == None, False)\n assert_equal(a.data == None, False)\n assert_equal(a.mask == None, False)\n assert_equal(a != None, True)\n # With nomask\n a = array([1, 2], mask=False)\n assert_equal(a == None, False)\n assert_equal(a != None, True)\n # With complete mask\n a = array([1, 2], mask=True)\n assert_equal(a == None, False)\n assert_equal(a != None, True)\n # With masked\n a = masked\n assert_equal(a == None, masked)\n\n def test_eq_w_scalar(self):\n a = array(1)\n assert_equal(a == 1, True)\n assert_equal(a == 0, False)\n assert_equal(a != 1, False)\n assert_equal(a != 0, True)\n\n\n def test_numpyarithmetics(self):\n \"Check that the mask is not back-propagated when using numpy functions\"\n a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])\n control = masked_array([np.nan, np.nan, 0, np.log(2), -1],\n mask=[1, 1, 0, 0, 1])\n #\n test = log(a)\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n assert_equal(a.mask, [0, 0, 0, 0, 1])\n #\n test = np.log(a)\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n assert_equal(a.mask, [0, 0, 0, 0, 1])\n\n#------------------------------------------------------------------------------\n\nclass TestMaskedArrayAttributes(TestCase):\n\n def test_keepmask(self):\n \"Tests the keep mask flag\"\n x = masked_array([1, 2, 3], mask=[1, 0, 0])\n mx = masked_array(x)\n assert_equal(mx.mask, x.mask)\n mx = masked_array(x, mask=[0, 1, 0], keep_mask=False)\n assert_equal(mx.mask, [0, 1, 0])\n mx = masked_array(x, mask=[0, 1, 0], keep_mask=True)\n assert_equal(mx.mask, [1, 1, 0])\n # We default to true\n mx = masked_array(x, mask=[0, 1, 0])\n assert_equal(mx.mask, [1, 1, 0])\n\n def test_hardmask(self):\n \"Test hard_mask\"\n d = arange(5)\n n = [0, 0, 0, 1, 1]\n m = make_mask(n)\n xh = array(d, mask=m, hard_mask=True)\n # We need to copy, to avoid updating d in xh !\n xs = array(d, mask=m, hard_mask=False, copy=True)\n xh[[1, 4]] = [10, 40]\n xs[[1, 4]] = [10, 40]\n assert_equal(xh._data, [0, 10, 2, 3, 4])\n assert_equal(xs._data, [0, 10, 2, 3, 40])\n #assert_equal(xh.mask.ctypes._data, m.ctypes._data)\n assert_equal(xs.mask, [0, 0, 0, 1, 0])\n self.assertTrue(xh._hardmask)\n self.assertTrue(not xs._hardmask)\n xh[1:4] = [10, 20, 30]\n xs[1:4] = [10, 20, 30]\n assert_equal(xh._data, [0, 10, 20, 3, 4])\n assert_equal(xs._data, [0, 10, 20, 30, 40])\n #assert_equal(xh.mask.ctypes._data, m.ctypes._data)\n assert_equal(xs.mask, nomask)\n xh[0] = masked\n xs[0] = masked\n assert_equal(xh.mask, [1, 0, 0, 1, 1])\n assert_equal(xs.mask, [1, 0, 0, 0, 0])\n xh[:] = 1\n xs[:] = 1\n assert_equal(xh._data, [0, 1, 1, 3, 4])\n assert_equal(xs._data, [1, 1, 1, 1, 1])\n assert_equal(xh.mask, [1, 0, 0, 1, 1])\n assert_equal(xs.mask, nomask)\n # Switch to soft mask\n xh.soften_mask()\n xh[:] = arange(5)\n assert_equal(xh._data, [0, 1, 2, 3, 4])\n assert_equal(xh.mask, nomask)\n # Switch back to hard mask\n xh.harden_mask()\n xh[xh < 3] = masked\n assert_equal(xh._data, [0, 1, 2, 3, 4])\n assert_equal(xh._mask, [1, 1, 1, 0, 0])\n xh[filled(xh > 1, False)] = 5\n assert_equal(xh._data, [0, 1, 2, 5, 5])\n assert_equal(xh._mask, [1, 1, 1, 0, 0])\n #\n xh = array([[1, 2], [3, 4]], mask=[[1, 0], [0, 0]], hard_mask=True)\n xh[0] = 0\n assert_equal(xh._data, [[1, 0], [3, 4]])\n assert_equal(xh._mask, [[1, 0], [0, 0]])\n xh[-1, -1] = 5\n assert_equal(xh._data, [[1, 0], [3, 5]])\n assert_equal(xh._mask, [[1, 0], [0, 0]])\n xh[filled(xh < 5, False)] = 2\n assert_equal(xh._data, [[1, 2], [2, 5]])\n assert_equal(xh._mask, [[1, 0], [0, 0]])\n\n def test_hardmask_again(self):\n \"Another test of hardmask\"\n d = arange(5)\n n = [0, 0, 0, 1, 1]\n m = make_mask(n)\n xh = array(d, mask=m, hard_mask=True)\n xh[4:5] = 999\n #assert_equal(xh.mask.ctypes._data, m.ctypes._data)\n xh[0:1] = 999\n assert_equal(xh._data, [999, 1, 2, 3, 4])\n\n def test_hardmask_oncemore_yay(self):\n \"OK, yet another test of hardmask\"\n \"Make sure that harden_mask/soften_mask//unshare_mask retursn self\"\n a = array([1, 2, 3], mask=[1, 0, 0])\n b = a.harden_mask()\n assert_equal(a, b)\n b[0] = 0\n assert_equal(a, b)\n assert_equal(b, array([1, 2, 3], mask=[1, 0, 0]))\n a = b.soften_mask()\n a[0] = 0\n assert_equal(a, b)\n assert_equal(b, array([0, 2, 3], mask=[0, 0, 0]))\n\n\n def test_smallmask(self):\n \"Checks the behaviour of _smallmask\"\n a = arange(10)\n a[1] = masked\n a[1] = 1\n assert_equal(a._mask, nomask)\n a = arange(10)\n a._smallmask = False\n a[1] = masked\n a[1] = 1\n assert_equal(a._mask, zeros(10))\n\n\n def test_shrink_mask(self):\n \"Tests .shrink_mask()\"\n a = array([1, 2, 3], mask=[0, 0, 0])\n b = a.shrink_mask()\n assert_equal(a, b)\n assert_equal(a.mask, nomask)\n\n\n def test_flat(self):\n \"Test flat on masked_matrices\"\n test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])\n test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])\n control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])\n assert_equal(test, control)\n #\n test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])\n testflat = test.flat\n testflat[:] = testflat[[2, 1, 0]]\n assert_equal(test, control)\n\n#------------------------------------------------------------------------------\n\nclass TestFillingValues(TestCase):\n #\n def test_check_on_scalar(self):\n \"Test _check_fill_value\"\n _check_fill_value = np.ma.core._check_fill_value\n #\n fval = _check_fill_value(0, int)\n assert_equal(fval, 0)\n fval = _check_fill_value(None, int)\n assert_equal(fval, default_fill_value(0))\n #\n fval = _check_fill_value(0, \"|S3\")\n assert_equal(fval, asbytes(\"0\"))\n fval = _check_fill_value(None, \"|S3\")\n assert_equal(fval, default_fill_value(\"|S3\"))\n #\n fval = _check_fill_value(1e+20, int)\n assert_equal(fval, default_fill_value(0))\n\n\n def test_check_on_fields(self):\n \"Tests _check_fill_value with records\"\n _check_fill_value = np.ma.core._check_fill_value\n ndtype = [('a', int), ('b', float), ('c', \"|S3\")]\n # A check on a list should return a single record\n fval = _check_fill_value([-999, -12345678.9, \"???\"], ndtype)\n self.assertTrue(isinstance(fval, ndarray))\n assert_equal(fval.item(), [-999, -12345678.9, asbytes(\"???\")])\n # A check on None should output the defaults\n fval = _check_fill_value(None, ndtype)\n self.assertTrue(isinstance(fval, ndarray))\n assert_equal(fval.item(), [default_fill_value(0),\n default_fill_value(0.),\n asbytes(default_fill_value(\"0\"))])\n #.....Using a structured type as fill_value should work\n fill_val = np.array((-999, -12345678.9, \"???\"), dtype=ndtype)\n fval = _check_fill_value(fill_val, ndtype)\n self.assertTrue(isinstance(fval, ndarray))\n assert_equal(fval.item(), [-999, -12345678.9, asbytes(\"???\")])\n\n #.....Using a flexible type w/ a different type shouldn't matter\n # BEHAVIOR in 1.5 and earlier: match structured types by position\n #fill_val = np.array((-999, -12345678.9, \"???\"),\n # dtype=[(\"A\", int), (\"B\", float), (\"C\", \"|S3\")])\n # BEHAVIOR in 1.6 and later: match structured types by name\n fill_val = np.array((\"???\", -999, -12345678.9),\n dtype=[(\"c\", \"|S3\"), (\"a\", int), (\"b\", float), ])\n fval = _check_fill_value(fill_val, ndtype)\n self.assertTrue(isinstance(fval, ndarray))\n assert_equal(fval.item(), [-999, -12345678.9, asbytes(\"???\")])\n\n #.....Using an object-array shouldn't matter either\n fill_value = np.array((-999, -12345678.9, \"???\"), dtype=object)\n fval = _check_fill_value(fill_val, ndtype)\n self.assertTrue(isinstance(fval, ndarray))\n assert_equal(fval.item(), [-999, -12345678.9, asbytes(\"???\")])\n #\n fill_value = np.array((-999, -12345678.9, \"???\"))\n fval = _check_fill_value(fill_val, ndtype)\n self.assertTrue(isinstance(fval, ndarray))\n assert_equal(fval.item(), [-999, -12345678.9, asbytes(\"???\")])\n #.....One-field-only flexible type should work as well\n ndtype = [(\"a\", int)]\n fval = _check_fill_value(-999999999, ndtype)\n self.assertTrue(isinstance(fval, ndarray))\n assert_equal(fval.item(), (-999999999,))\n\n\n def test_fillvalue_conversion(self):\n \"Tests the behavior of fill_value during conversion\"\n # We had a tailored comment to make sure special attributes are properly\n # dealt with\n a = array(asbytes_nested(['3', '4', '5']))\n a._optinfo.update({'comment':\"updated!\"})\n #\n b = array(a, dtype=int)\n assert_equal(b._data, [3, 4, 5])\n assert_equal(b.fill_value, default_fill_value(0))\n #\n b = array(a, dtype=float)\n assert_equal(b._data, [3, 4, 5])\n assert_equal(b.fill_value, default_fill_value(0.))\n #\n b = a.astype(int)\n assert_equal(b._data, [3, 4, 5])\n assert_equal(b.fill_value, default_fill_value(0))\n assert_equal(b._optinfo['comment'], \"updated!\")\n #\n b = a.astype([('a', '|S3')])\n assert_equal(b['a']._data, a._data)\n assert_equal(b['a'].fill_value, a.fill_value)\n\n\n def test_fillvalue(self):\n \"Yet more fun with the fill_value\"\n data = masked_array([1, 2, 3], fill_value= -999)\n series = data[[0, 2, 1]]\n assert_equal(series._fill_value, data._fill_value)\n #\n mtype = [('f', float), ('s', '|S3')]\n x = array([(1, 'a'), (2, 'b'), (pi, 'pi')], dtype=mtype)\n x.fill_value = 999\n assert_equal(x.fill_value.item(), [999., asbytes('999')])\n assert_equal(x['f'].fill_value, 999)\n assert_equal(x['s'].fill_value, asbytes('999'))\n #\n x.fill_value = (9, '???')\n assert_equal(x.fill_value.item(), (9, asbytes('???')))\n assert_equal(x['f'].fill_value, 9)\n assert_equal(x['s'].fill_value, asbytes('???'))\n #\n x = array([1, 2, 3.1])\n x.fill_value = 999\n assert_equal(np.asarray(x.fill_value).dtype, float)\n assert_equal(x.fill_value, 999.)\n assert_equal(x._fill_value, np.array(999.))\n\n\n def test_fillvalue_exotic_dtype(self):\n \"Tests yet more exotic flexible dtypes\"\n _check_fill_value = np.ma.core._check_fill_value\n ndtype = [('i', int), ('s', '|S8'), ('f', float)]\n control = np.array((default_fill_value(0),\n default_fill_value('0'),\n default_fill_value(0.),),\n dtype=ndtype)\n assert_equal(_check_fill_value(None, ndtype), control)\n # The shape shouldn't matter\n ndtype = [('f0', float, (2, 2))]\n control = np.array((default_fill_value(0.),),\n dtype=[('f0', float)]).astype(ndtype)\n assert_equal(_check_fill_value(None, ndtype), control)\n control = np.array((0,), dtype=[('f0', float)]).astype(ndtype)\n assert_equal(_check_fill_value(0, ndtype), control)\n #\n ndtype = np.dtype(\"int, (2,3)float, float\")\n control = np.array((default_fill_value(0),\n default_fill_value(0.),\n default_fill_value(0.),),\n dtype=\"int, float, float\").astype(ndtype)\n test = _check_fill_value(None, ndtype)\n assert_equal(test, control)\n control = np.array((0, 0, 0), dtype=\"int, float, float\").astype(ndtype)\n assert_equal(_check_fill_value(0, ndtype), control)\n\n\n def test_extremum_fill_value(self):\n \"Tests extremum fill values for flexible type.\"\n a = array([(1, (2, 3)), (4, (5, 6))],\n dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])\n test = a.fill_value\n assert_equal(test['A'], default_fill_value(a['A']))\n assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))\n assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))\n #\n test = minimum_fill_value(a)\n assert_equal(test[0], minimum_fill_value(a['A']))\n assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))\n assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))\n assert_equal(test[1], minimum_fill_value(a['B']))\n #\n test = maximum_fill_value(a)\n assert_equal(test[0], maximum_fill_value(a['A']))\n assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))\n assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))\n assert_equal(test[1], maximum_fill_value(a['B']))\n\n def test_fillvalue_individual_fields(self):\n \"Test setting fill_value on individual fields\"\n ndtype = [('a', int), ('b', int)]\n # Explicit fill_value\n a = array(list(zip([1, 2, 3], [4, 5, 6])),\n fill_value=(-999, -999), dtype=ndtype)\n f = a._fill_value\n aa = a['a']\n aa.set_fill_value(10)\n assert_equal(aa._fill_value, np.array(10))\n assert_equal(tuple(a.fill_value), (10, -999))\n a.fill_value['b'] = -10\n assert_equal(tuple(a.fill_value), (10, -10))\n # Implicit fill_value\n t = array(list(zip([1, 2, 3], [4, 5, 6])), dtype=[('a', int), ('b', int)])\n tt = t['a']\n tt.set_fill_value(10)\n assert_equal(tt._fill_value, np.array(10))\n assert_equal(tuple(t.fill_value), (10, default_fill_value(0)))\n\n def test_fillvalue_implicit_structured_array(self):\n \"Check that fill_value is always defined for structured arrays\"\n ndtype = ('b', float)\n adtype = ('a', float)\n a = array([(1.,), (2.,)], mask=[(False,), (False,)],\n fill_value=(np.nan,), dtype=np.dtype([adtype]))\n b = empty(a.shape, dtype=[adtype, ndtype])\n b['a'] = a['a']\n b['a'].set_fill_value(a['a'].fill_value)\n f = b._fill_value[()]\n assert_(np.isnan(f[0]))\n assert_equal(f[-1], default_fill_value(1.))\n\n def test_fillvalue_as_arguments(self):\n \"Test adding a fill_value parameter to empty/ones/zeros\"\n a = empty(3, fill_value=999.)\n assert_equal(a.fill_value, 999.)\n #\n a = ones(3, fill_value=999., dtype=float)\n assert_equal(a.fill_value, 999.)\n #\n a = zeros(3, fill_value=0., dtype=complex)\n assert_equal(a.fill_value, 0.)\n #\n a = identity(3, fill_value=0., dtype=complex)\n assert_equal(a.fill_value, 0.)\n\n def test_fillvalue_in_view(self):\n \"Test the behavior of fill_value in view\"\n\n # Create initial masked array\n x = array([1,2,3], fill_value=1, dtype=np.int64)\n\n # Check that fill_value is preserved by default\n y = x.view()\n assert_(y.fill_value==1)\n\n # Check that fill_value is preserved if dtype is specified and the\n # dtype is an ndarray sub-class and has a _fill_value attribute\n y = x.view(MaskedArray)\n assert_(y.fill_value==1)\n\n # Check that fill_value is preserved if type is specified and the\n # dtype is an ndarray sub-class and has a _fill_value attribute (by\n # default, the first argument is dtype, not type)\n y = x.view(type=MaskedArray)\n assert_(y.fill_value==1)\n\n # Check that code does not crash if passed an ndarray sub-class that\n # does not have a _fill_value attribute\n y = x.view(np.ndarray)\n y = x.view(type=np.ndarray)\n\n # Check that fill_value can be overriden with view\n y = x.view(MaskedArray, fill_value=2)\n assert_(y.fill_value==2)\n\n # Check that fill_value can be overriden with view (using type=)\n y = x.view(type=MaskedArray, fill_value=2)\n assert_(y.fill_value==2)\n\n # Check that fill_value gets reset if passed a dtype but not a\n # fill_value. This is because even though in some cases one can safely\n # cast the fill_value, e.g. if taking an int64 view of an int32 array,\n # in other cases, this cannot be done (e.g. int32 view of an int64\n # array with a large fill_value).\n y = x.view(dtype=np.int32)\n assert_(y.fill_value == 999999)\n\n#------------------------------------------------------------------------------\n\nclass TestUfuncs(TestCase):\n \"Test class for the application of ufuncs on MaskedArrays.\"\n\n def setUp(self):\n \"Base data definition.\"\n self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),\n array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)\n self.err_status = np.geterr()\n np.seterr(divide='ignore', invalid='ignore')\n\n def tearDown(self):\n np.seterr(**self.err_status)\n\n def test_testUfuncRegression(self):\n \"Tests new ufuncs on MaskedArrays.\"\n for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',\n 'sin', 'cos', 'tan',\n 'arcsin', 'arccos', 'arctan',\n 'sinh', 'cosh', 'tanh',\n 'arcsinh',\n 'arccosh',\n 'arctanh',\n 'absolute', 'fabs', 'negative',\n # 'nonzero', 'around',\n 'floor', 'ceil',\n # 'sometrue', 'alltrue',\n 'logical_not',\n 'add', 'subtract', 'multiply',\n 'divide', 'true_divide', 'floor_divide',\n 'remainder', 'fmod', 'hypot', 'arctan2',\n 'equal', 'not_equal', 'less_equal', 'greater_equal',\n 'less', 'greater',\n 'logical_and', 'logical_or', 'logical_xor',\n ]:\n try:\n uf = getattr(umath, f)\n except AttributeError:\n uf = getattr(fromnumeric, f)\n mf = getattr(numpy.ma.core, f)\n args = self.d[:uf.nin]\n ur = uf(*args)\n mr = mf(*args)\n assert_equal(ur.filled(0), mr.filled(0), f)\n assert_mask_equal(ur.mask, mr.mask, err_msg=f)\n\n def test_reduce(self):\n \"Tests reduce on MaskedArrays.\"\n a = self.d[0]\n self.assertTrue(not alltrue(a, axis=0))\n self.assertTrue(sometrue(a, axis=0))\n assert_equal(sum(a[:3], axis=0), 0)\n assert_equal(product(a, axis=0), 0)\n assert_equal(add.reduce(a), pi)\n\n def test_minmax(self):\n \"Tests extrema on MaskedArrays.\"\n a = arange(1, 13).reshape(3, 4)\n amask = masked_where(a < 5, a)\n assert_equal(amask.max(), a.max())\n assert_equal(amask.min(), 5)\n assert_equal(amask.max(0), a.max(0))\n assert_equal(amask.min(0), [5, 6, 7, 8])\n self.assertTrue(amask.max(1)[0].mask)\n self.assertTrue(amask.min(1)[0].mask)\n\n def test_ndarray_mask(self):\n \"Check that the mask of the result is a ndarray (not a MaskedArray...)\"\n a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])\n test = np.sqrt(a)\n control = masked_array([-1, 0, 1, np.sqrt(2), -1],\n mask=[1, 0, 0, 0, 1])\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n self.assertTrue(not isinstance(test.mask, MaskedArray))\n\n#------------------------------------------------------------------------------\n\nclass TestMaskedArrayInPlaceArithmetics(TestCase):\n \"Test MaskedArray Arithmetics\"\n\n def setUp(self):\n x = arange(10)\n y = arange(10)\n xm = arange(10)\n xm[2] = masked\n self.intdata = (x, y, xm)\n self.floatdata = (x.astype(float), y.astype(float), xm.astype(float))\n\n def test_inplace_addition_scalar(self):\n \"\"\"Test of inplace additions\"\"\"\n (x, y, xm) = self.intdata\n xm[2] = masked\n x += 1\n assert_equal(x, y + 1)\n xm += 1\n assert_equal(xm, y + 1)\n #\n (x, _, xm) = self.floatdata\n id1 = x.data.ctypes._data\n x += 1.\n assert_(id1 == x.data.ctypes._data)\n assert_equal(x, y + 1.)\n\n def test_inplace_addition_array(self):\n \"\"\"Test of inplace additions\"\"\"\n (x, y, xm) = self.intdata\n m = xm.mask\n a = arange(10, dtype=np.int16)\n a[-1] = masked\n x += a\n xm += a\n assert_equal(x, y + a)\n assert_equal(xm, y + a)\n assert_equal(xm.mask, mask_or(m, a.mask))\n\n def test_inplace_subtraction_scalar(self):\n \"\"\"Test of inplace subtractions\"\"\"\n (x, y, xm) = self.intdata\n x -= 1\n assert_equal(x, y - 1)\n xm -= 1\n assert_equal(xm, y - 1)\n\n def test_inplace_subtraction_array(self):\n \"\"\"Test of inplace subtractions\"\"\"\n (x, y, xm) = self.floatdata\n m = xm.mask\n a = arange(10, dtype=float)\n a[-1] = masked\n x -= a\n xm -= a\n assert_equal(x, y - a)\n assert_equal(xm, y - a)\n assert_equal(xm.mask, mask_or(m, a.mask))\n\n def test_inplace_multiplication_scalar(self):\n \"\"\"Test of inplace multiplication\"\"\"\n (x, y, xm) = self.floatdata\n x *= 2.0\n assert_equal(x, y * 2)\n xm *= 2.0\n assert_equal(xm, y * 2)\n\n def test_inplace_multiplication_array(self):\n \"\"\"Test of inplace multiplication\"\"\"\n (x, y, xm) = self.floatdata\n m = xm.mask\n a = arange(10, dtype=float)\n a[-1] = masked\n x *= a\n xm *= a\n assert_equal(x, y * a)\n assert_equal(xm, y * a)\n assert_equal(xm.mask, mask_or(m, a.mask))\n\n def test_inplace_division_scalar_int(self):\n \"\"\"Test of inplace division\"\"\"\n (x, y, xm) = self.intdata\n x = arange(10) * 2\n xm = arange(10) * 2\n xm[2] = masked\n x //= 2\n assert_equal(x, y)\n xm //= 2\n assert_equal(xm, y)\n\n def test_inplace_division_scalar_float(self):\n \"\"\"Test of inplace division\"\"\"\n (x, y, xm) = self.floatdata\n x /= 2.0\n assert_equal(x, y / 2.0)\n xm /= arange(10)\n assert_equal(xm, ones((10,)))\n\n def test_inplace_division_array_float(self):\n \"\"\"Test of inplace division\"\"\"\n (x, y, xm) = self.floatdata\n m = xm.mask\n a = arange(10, dtype=float)\n a[-1] = masked\n x /= a\n xm /= a\n assert_equal(x, y / a)\n assert_equal(xm, y / a)\n assert_equal(xm.mask, mask_or(mask_or(m, a.mask), (a == 0)))\n\n def test_inplace_division_misc(self):\n #\n x = [1., 1., 1., -2., pi / 2., 4., 5., -10., 10., 1., 2., 3.]\n y = [5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.]\n m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1]\n xm = masked_array(x, mask=m1)\n ym = masked_array(y, mask=m2)\n #\n z = xm / ym\n assert_equal(z._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])\n assert_equal(z._data, [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])\n #assert_equal(z._data, [0.2,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])\n #\n xm = xm.copy()\n xm /= ym\n assert_equal(xm._mask, [1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1])\n assert_equal(z._data, [1., 1., 1., -1., -pi / 2., 4., 5., 1., 1., 1., 2., 3.])\n #assert_equal(xm._data, [1/5.,1.,1./3.,-1.,-pi/2.,-1.,5.,1.,1.,1.,2.,1.])\n\n\n def test_datafriendly_add(self):\n \"Test keeping data w/ (inplace) addition\"\n x = array([1, 2, 3], mask=[0, 0, 1])\n # Test add w/ scalar\n xx = x + 1\n assert_equal(xx.data, [2, 3, 3])\n assert_equal(xx.mask, [0, 0, 1])\n # Test iadd w/ scalar\n x += 1\n assert_equal(x.data, [2, 3, 3])\n assert_equal(x.mask, [0, 0, 1])\n # Test add w/ array\n x = array([1, 2, 3], mask=[0, 0, 1])\n xx = x + array([1, 2, 3], mask=[1, 0, 0])\n assert_equal(xx.data, [1, 4, 3])\n assert_equal(xx.mask, [1, 0, 1])\n # Test iadd w/ array\n x = array([1, 2, 3], mask=[0, 0, 1])\n x += array([1, 2, 3], mask=[1, 0, 0])\n assert_equal(x.data, [1, 4, 3])\n assert_equal(x.mask, [1, 0, 1])\n\n\n def test_datafriendly_sub(self):\n \"Test keeping data w/ (inplace) subtraction\"\n # Test sub w/ scalar\n x = array([1, 2, 3], mask=[0, 0, 1])\n xx = x - 1\n assert_equal(xx.data, [0, 1, 3])\n assert_equal(xx.mask, [0, 0, 1])\n # Test isub w/ scalar\n x = array([1, 2, 3], mask=[0, 0, 1])\n x -= 1\n assert_equal(x.data, [0, 1, 3])\n assert_equal(x.mask, [0, 0, 1])\n # Test sub w/ array\n x = array([1, 2, 3], mask=[0, 0, 1])\n xx = x - array([1, 2, 3], mask=[1, 0, 0])\n assert_equal(xx.data, [1, 0, 3])\n assert_equal(xx.mask, [1, 0, 1])\n # Test isub w/ array\n x = array([1, 2, 3], mask=[0, 0, 1])\n x -= array([1, 2, 3], mask=[1, 0, 0])\n assert_equal(x.data, [1, 0, 3])\n assert_equal(x.mask, [1, 0, 1])\n\n\n def test_datafriendly_mul(self):\n \"Test keeping data w/ (inplace) multiplication\"\n # Test mul w/ scalar\n x = array([1, 2, 3], mask=[0, 0, 1])\n xx = x * 2\n assert_equal(xx.data, [2, 4, 3])\n assert_equal(xx.mask, [0, 0, 1])\n # Test imul w/ scalar\n x = array([1, 2, 3], mask=[0, 0, 1])\n x *= 2\n assert_equal(x.data, [2, 4, 3])\n assert_equal(x.mask, [0, 0, 1])\n # Test mul w/ array\n x = array([1, 2, 3], mask=[0, 0, 1])\n xx = x * array([10, 20, 30], mask=[1, 0, 0])\n assert_equal(xx.data, [1, 40, 3])\n assert_equal(xx.mask, [1, 0, 1])\n # Test imul w/ array\n x = array([1, 2, 3], mask=[0, 0, 1])\n x *= array([10, 20, 30], mask=[1, 0, 0])\n assert_equal(x.data, [1, 40, 3])\n assert_equal(x.mask, [1, 0, 1])\n\n\n def test_datafriendly_div(self):\n \"Test keeping data w/ (inplace) division\"\n # Test div on scalar\n x = array([1, 2, 3], mask=[0, 0, 1])\n xx = x / 2.\n assert_equal(xx.data, [1 / 2., 2 / 2., 3])\n assert_equal(xx.mask, [0, 0, 1])\n # Test idiv on scalar\n x = array([1., 2., 3.], mask=[0, 0, 1])\n x /= 2.\n assert_equal(x.data, [1 / 2., 2 / 2., 3])\n assert_equal(x.mask, [0, 0, 1])\n # Test div on array\n x = array([1., 2., 3.], mask=[0, 0, 1])\n xx = x / array([10., 20., 30.], mask=[1, 0, 0])\n assert_equal(xx.data, [1., 2. / 20., 3.])\n assert_equal(xx.mask, [1, 0, 1])\n # Test idiv on array\n x = array([1., 2., 3.], mask=[0, 0, 1])\n x /= array([10., 20., 30.], mask=[1, 0, 0])\n assert_equal(x.data, [1., 2 / 20., 3.])\n assert_equal(x.mask, [1, 0, 1])\n\n\n def test_datafriendly_pow(self):\n \"Test keeping data w/ (inplace) power\"\n # Test pow on scalar\n x = array([1., 2., 3.], mask=[0, 0, 1])\n xx = x ** 2.5\n assert_equal(xx.data, [1., 2. ** 2.5, 3.])\n assert_equal(xx.mask, [0, 0, 1])\n # Test ipow on scalar\n x **= 2.5\n assert_equal(x.data, [1., 2. ** 2.5, 3])\n assert_equal(x.mask, [0, 0, 1])\n\n\n def test_datafriendly_add_arrays(self):\n a = array([[1, 1], [3, 3]])\n b = array([1, 1], mask=[0, 0])\n a += b\n assert_equal(a, [[2, 2], [4, 4]])\n if a.mask is not nomask:\n assert_equal(a.mask, [[0, 0], [0, 0]])\n #\n a = array([[1, 1], [3, 3]])\n b = array([1, 1], mask=[0, 1])\n a += b\n assert_equal(a, [[2, 2], [4, 4]])\n assert_equal(a.mask, [[0, 1], [0, 1]])\n\n\n def test_datafriendly_sub_arrays(self):\n a = array([[1, 1], [3, 3]])\n b = array([1, 1], mask=[0, 0])\n a -= b\n assert_equal(a, [[0, 0], [2, 2]])\n if a.mask is not nomask:\n assert_equal(a.mask, [[0, 0], [0, 0]])\n #\n a = array([[1, 1], [3, 3]])\n b = array([1, 1], mask=[0, 1])\n a -= b\n assert_equal(a, [[0, 0], [2, 2]])\n assert_equal(a.mask, [[0, 1], [0, 1]])\n\n\n def test_datafriendly_mul_arrays(self):\n a = array([[1, 1], [3, 3]])\n b = array([1, 1], mask=[0, 0])\n a *= b\n assert_equal(a, [[1, 1], [3, 3]])\n if a.mask is not nomask:\n assert_equal(a.mask, [[0, 0], [0, 0]])\n #\n a = array([[1, 1], [3, 3]])\n b = array([1, 1], mask=[0, 1])\n a *= b\n assert_equal(a, [[1, 1], [3, 3]])\n assert_equal(a.mask, [[0, 1], [0, 1]])\n\n#------------------------------------------------------------------------------\n\nclass TestMaskedArrayMethods(TestCase):\n \"Test class for miscellaneous MaskedArrays methods.\"\n def setUp(self):\n \"Base data definition.\"\n x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928,\n 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732,\n 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,\n 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479,\n 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,\n 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])\n X = x.reshape(6, 6)\n XX = x.reshape(3, 2, 2, 3)\n\n m = np.array([0, 1, 0, 1, 0, 0,\n 1, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 0, 0,\n 0, 0, 1, 0, 1, 0])\n mx = array(data=x, mask=m)\n mX = array(data=X, mask=m.reshape(X.shape))\n mXX = array(data=XX, mask=m.reshape(XX.shape))\n\n m2 = np.array([1, 1, 0, 1, 0, 0,\n 1, 1, 1, 1, 0, 1,\n 0, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 1, 0,\n 0, 0, 1, 0, 1, 1])\n m2x = array(data=x, mask=m2)\n m2X = array(data=X, mask=m2.reshape(X.shape))\n m2XX = array(data=XX, mask=m2.reshape(XX.shape))\n self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)\n\n def test_generic_methods(self):\n \"Tests some MaskedArray methods.\"\n a = array([1, 3, 2])\n b = array([1, 3, 2], mask=[1, 0, 1])\n assert_equal(a.any(), a._data.any())\n assert_equal(a.all(), a._data.all())\n assert_equal(a.argmax(), a._data.argmax())\n assert_equal(a.argmin(), a._data.argmin())\n assert_equal(a.choose(0, 1, 2, 3, 4), a._data.choose(0, 1, 2, 3, 4))\n assert_equal(a.compress([1, 0, 1]), a._data.compress([1, 0, 1]))\n assert_equal(a.conj(), a._data.conj())\n assert_equal(a.conjugate(), a._data.conjugate())\n #\n m = array([[1, 2], [3, 4]])\n assert_equal(m.diagonal(), m._data.diagonal())\n assert_equal(a.sum(), a._data.sum())\n assert_equal(a.take([1, 2]), a._data.take([1, 2]))\n assert_equal(m.transpose(), m._data.transpose())\n\n\n def test_allclose(self):\n \"Tests allclose on arrays\"\n a = np.random.rand(10)\n b = a + np.random.rand(10) * 1e-8\n self.assertTrue(allclose(a, b))\n # Test allclose w/ infs\n a[0] = np.inf\n self.assertTrue(not allclose(a, b))\n b[0] = np.inf\n self.assertTrue(allclose(a, b))\n # Test all close w/ masked\n a = masked_array(a)\n a[-1] = masked\n self.assertTrue(allclose(a, b, masked_equal=True))\n self.assertTrue(not allclose(a, b, masked_equal=False))\n # Test comparison w/ scalar\n a *= 1e-8\n a[0] = 0\n self.assertTrue(allclose(a, 0, masked_equal=True))\n\n\n def test_allany(self):\n \"\"\"Checks the any/all methods/functions.\"\"\"\n x = np.array([[ 0.13, 0.26, 0.90],\n [ 0.28, 0.33, 0.63],\n [ 0.31, 0.87, 0.70]])\n m = np.array([[ True, False, False],\n [False, False, False],\n [True, True, False]], dtype=np.bool_)\n mx = masked_array(x, mask=m)\n xbig = np.array([[False, False, True],\n [False, False, True],\n [False, True, True]], dtype=np.bool_)\n mxbig = (mx > 0.5)\n mxsmall = (mx < 0.5)\n #\n assert_((mxbig.all() == False))\n assert_((mxbig.any() == True))\n assert_equal(mxbig.all(0), [False, False, True])\n assert_equal(mxbig.all(1), [False, False, True])\n assert_equal(mxbig.any(0), [False, False, True])\n assert_equal(mxbig.any(1), [True, True, True])\n #\n assert_((mxsmall.all() == False))\n assert_((mxsmall.any() == True))\n assert_equal(mxsmall.all(0), [True, True, False])\n assert_equal(mxsmall.all(1), [False, False, False])\n assert_equal(mxsmall.any(0), [True, True, False])\n assert_equal(mxsmall.any(1), [True, True, False])\n\n\n def test_allany_onmatrices(self):\n x = np.array([[ 0.13, 0.26, 0.90],\n [ 0.28, 0.33, 0.63],\n [ 0.31, 0.87, 0.70]])\n X = np.matrix(x)\n m = np.array([[ True, False, False],\n [False, False, False],\n [True, True, False]], dtype=np.bool_)\n mX = masked_array(X, mask=m)\n mXbig = (mX > 0.5)\n mXsmall = (mX < 0.5)\n #\n assert_((mXbig.all() == False))\n assert_((mXbig.any() == True))\n assert_equal(mXbig.all(0), np.matrix([False, False, True]))\n assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)\n assert_equal(mXbig.any(0), np.matrix([False, False, True]))\n assert_equal(mXbig.any(1), np.matrix([ True, True, True]).T)\n #\n assert_((mXsmall.all() == False))\n assert_((mXsmall.any() == True))\n assert_equal(mXsmall.all(0), np.matrix([True, True, False]))\n assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)\n assert_equal(mXsmall.any(0), np.matrix([True, True, False]))\n assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)\n\n\n def test_allany_oddities(self):\n \"Some fun with all and any\"\n store = empty((), dtype=bool)\n full = array([1, 2, 3], mask=True)\n #\n self.assertTrue(full.all() is masked)\n full.all(out=store)\n self.assertTrue(store)\n self.assertTrue(store._mask, True)\n self.assertTrue(store is not masked)\n #\n store = empty((), dtype=bool)\n self.assertTrue(full.any() is masked)\n full.any(out=store)\n self.assertTrue(not store)\n self.assertTrue(store._mask, True)\n self.assertTrue(store is not masked)\n\n\n def test_argmax_argmin(self):\n \"Tests argmin & argmax on MaskedArrays.\"\n (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d\n #\n assert_equal(mx.argmin(), 35)\n assert_equal(mX.argmin(), 35)\n assert_equal(m2x.argmin(), 4)\n assert_equal(m2X.argmin(), 4)\n assert_equal(mx.argmax(), 28)\n assert_equal(mX.argmax(), 28)\n assert_equal(m2x.argmax(), 31)\n assert_equal(m2X.argmax(), 31)\n #\n assert_equal(mX.argmin(0), [2, 2, 2, 5, 0, 5])\n assert_equal(m2X.argmin(0), [2, 2, 4, 5, 0, 4])\n assert_equal(mX.argmax(0), [0, 5, 0, 5, 4, 0])\n assert_equal(m2X.argmax(0), [5, 5, 0, 5, 1, 0])\n #\n assert_equal(mX.argmin(1), [4, 1, 0, 0, 5, 5, ])\n assert_equal(m2X.argmin(1), [4, 4, 0, 0, 5, 3])\n assert_equal(mX.argmax(1), [2, 4, 1, 1, 4, 1])\n assert_equal(m2X.argmax(1), [2, 4, 1, 1, 1, 1])\n\n\n def test_clip(self):\n \"Tests clip on MaskedArrays.\"\n x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928,\n 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732,\n 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,\n 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479,\n 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,\n 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])\n m = np.array([0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0])\n mx = array(x, mask=m)\n clipped = mx.clip(2, 8)\n assert_equal(clipped.mask, mx.mask)\n assert_equal(clipped._data, x.clip(2, 8))\n assert_equal(clipped._data, mx._data.clip(2, 8))\n\n\n def test_compress(self):\n \"test compress\"\n a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)\n condition = (a > 1.5) & (a < 3.5)\n assert_equal(a.compress(condition), [2., 3.])\n #\n a[[2, 3]] = masked\n b = a.compress(condition)\n assert_equal(b._data, [2., 3.])\n assert_equal(b._mask, [0, 1])\n assert_equal(b.fill_value, 9999)\n assert_equal(b, a[condition])\n #\n condition = (a < 4.)\n b = a.compress(condition)\n assert_equal(b._data, [1., 2., 3.])\n assert_equal(b._mask, [0, 0, 1])\n assert_equal(b.fill_value, 9999)\n assert_equal(b, a[condition])\n #\n a = masked_array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0]])\n b = a.compress(a.ravel() >= 22)\n assert_equal(b._data, [30, 40, 50, 60])\n assert_equal(b._mask, [1, 1, 0, 0])\n #\n x = np.array([3, 1, 2])\n b = a.compress(x >= 2, axis=1)\n assert_equal(b._data, [[10, 30], [40, 60]])\n assert_equal(b._mask, [[0, 1], [1, 0]])\n\n\n def test_compressed(self):\n \"Tests compressed\"\n a = array([1, 2, 3, 4], mask=[0, 0, 0, 0])\n b = a.compressed()\n assert_equal(b, a)\n a[0] = masked\n b = a.compressed()\n assert_equal(b, [2, 3, 4])\n #\n a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])\n b = a.compressed()\n assert_equal(b, a)\n self.assertTrue(isinstance(b, np.matrix))\n a[0, 0] = masked\n b = a.compressed()\n assert_equal(b, [[2, 3, 4]])\n\n\n def test_empty(self):\n \"Tests empty/like\"\n datatype = [('a', int), ('b', float), ('c', '|S8')]\n a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],\n dtype=datatype)\n assert_equal(len(a.fill_value.item()), len(datatype))\n #\n b = empty_like(a)\n assert_equal(b.shape, a.shape)\n assert_equal(b.fill_value, a.fill_value)\n #\n b = empty(len(a), dtype=datatype)\n assert_equal(b.shape, a.shape)\n assert_equal(b.fill_value, a.fill_value)\n\n\n def test_put(self):\n \"Tests put.\"\n d = arange(5)\n n = [0, 0, 0, 1, 1]\n m = make_mask(n)\n x = array(d, mask=m)\n self.assertTrue(x[3] is masked)\n self.assertTrue(x[4] is masked)\n x[[1, 4]] = [10, 40]\n #self.assertTrue(x.mask is not m)\n self.assertTrue(x[3] is masked)\n self.assertTrue(x[4] is not masked)\n assert_equal(x, [0, 10, 2, -1, 40])\n #\n x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)\n i = [0, 2, 4, 6]\n x.put(i, [6, 4, 2, 0])\n assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))\n assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])\n x.put(i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))\n assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])\n assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])\n #\n x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)\n put(x, i, [6, 4, 2, 0])\n assert_equal(x, asarray([6, 1, 4, 3, 2, 5, 0, 7, 8, 9, ]))\n assert_equal(x.mask, [0, 0, 0, 0, 0, 1, 0, 0, 0, 0])\n put(x, i, masked_array([0, 2, 4, 6], [1, 0, 1, 0]))\n assert_array_equal(x, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, ])\n assert_equal(x.mask, [1, 0, 0, 0, 1, 1, 0, 0, 0, 0])\n\n\n def test_put_hardmask(self):\n \"Tests put on hardmask\"\n d = arange(5)\n n = [0, 0, 0, 1, 1]\n m = make_mask(n)\n xh = array(d + 1, mask=m, hard_mask=True, copy=True)\n xh.put([4, 2, 0, 1, 3], [1, 2, 3, 4, 5])\n assert_equal(xh._data, [3, 4, 2, 4, 5])\n\n\n def test_putmask(self):\n x = arange(6) + 1\n mx = array(x, mask=[0, 0, 0, 1, 1, 1])\n mask = [0, 0, 1, 0, 0, 1]\n # w/o mask, w/o masked values\n xx = x.copy()\n putmask(xx, mask, 99)\n assert_equal(xx, [1, 2, 99, 4, 5, 99])\n # w/ mask, w/o masked values\n mxx = mx.copy()\n putmask(mxx, mask, 99)\n assert_equal(mxx._data, [1, 2, 99, 4, 5, 99])\n assert_equal(mxx._mask, [0, 0, 0, 1, 1, 0])\n # w/o mask, w/ masked values\n values = array([10, 20, 30, 40, 50, 60], mask=[1, 1, 1, 0, 0, 0])\n xx = x.copy()\n putmask(xx, mask, values)\n assert_equal(xx._data, [1, 2, 30, 4, 5, 60])\n assert_equal(xx._mask, [0, 0, 1, 0, 0, 0])\n # w/ mask, w/ masked values\n mxx = mx.copy()\n putmask(mxx, mask, values)\n assert_equal(mxx._data, [1, 2, 30, 4, 5, 60])\n assert_equal(mxx._mask, [0, 0, 1, 1, 1, 0])\n # w/ mask, w/ masked values + hardmask\n mxx = mx.copy()\n mxx.harden_mask()\n putmask(mxx, mask, values)\n assert_equal(mxx, [1, 2, 30, 4, 5, 60])\n\n\n def test_ravel(self):\n \"Tests ravel\"\n a = array([[1, 2, 3, 4, 5]], mask=[[0, 1, 0, 0, 0]])\n aravel = a.ravel()\n assert_equal(a._mask.shape, a.shape)\n a = array([0, 0], mask=[1, 1])\n aravel = a.ravel()\n assert_equal(a._mask.shape, a.shape)\n a = array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])\n aravel = a.ravel()\n assert_equal(a.shape, (1, 5))\n assert_equal(a._mask.shape, a.shape)\n # Checks that small_mask is preserved\n a = array([1, 2, 3, 4], mask=[0, 0, 0, 0], shrink=False)\n assert_equal(a.ravel()._mask, [0, 0, 0, 0])\n # Test that the fill_value is preserved\n a.fill_value = -99\n a.shape = (2, 2)\n ar = a.ravel()\n assert_equal(ar._mask, [0, 0, 0, 0])\n assert_equal(ar._data, [1, 2, 3, 4])\n assert_equal(ar.fill_value, -99)\n\n\n def test_reshape(self):\n \"Tests reshape\"\n x = arange(4)\n x[0] = masked\n y = x.reshape(2, 2)\n assert_equal(y.shape, (2, 2,))\n assert_equal(y._mask.shape, (2, 2,))\n assert_equal(x.shape, (4,))\n assert_equal(x._mask.shape, (4,))\n\n\n def test_sort(self):\n \"Test sort\"\n x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)\n #\n sortedx = sort(x)\n assert_equal(sortedx._data, [1, 2, 3, 4])\n assert_equal(sortedx._mask, [0, 0, 0, 1])\n #\n sortedx = sort(x, endwith=False)\n assert_equal(sortedx._data, [4, 1, 2, 3])\n assert_equal(sortedx._mask, [1, 0, 0, 0])\n #\n x.sort()\n assert_equal(x._data, [1, 2, 3, 4])\n assert_equal(x._mask, [0, 0, 0, 1])\n #\n x = array([1, 4, 2, 3], mask=[0, 1, 0, 0], dtype=np.uint8)\n x.sort(endwith=False)\n assert_equal(x._data, [4, 1, 2, 3])\n assert_equal(x._mask, [1, 0, 0, 0])\n #\n x = [1, 4, 2, 3]\n sortedx = sort(x)\n self.assertTrue(not isinstance(sorted, MaskedArray))\n #\n x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)\n sortedx = sort(x, endwith=False)\n assert_equal(sortedx._data, [-2, -1, 0, 1, 2])\n x = array([0, 1, -1, -2, 2], mask=[0, 1, 0, 0, 1], dtype=np.int8)\n sortedx = sort(x, endwith=False)\n assert_equal(sortedx._data, [1, 2, -2, -1, 0])\n assert_equal(sortedx._mask, [1, 1, 0, 0, 0])\n\n\n def test_sort_2d(self):\n \"Check sort of 2D array.\"\n # 2D array w/o mask\n a = masked_array([[8, 4, 1], [2, 0, 9]])\n a.sort(0)\n assert_equal(a, [[2, 0, 1], [8, 4, 9]])\n a = masked_array([[8, 4, 1], [2, 0, 9]])\n a.sort(1)\n assert_equal(a, [[1, 4, 8], [0, 2, 9]])\n # 2D array w/mask\n a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])\n a.sort(0)\n assert_equal(a, [[2, 0, 1], [8, 4, 9]])\n assert_equal(a._mask, [[0, 0, 0], [1, 0, 1]])\n a = masked_array([[8, 4, 1], [2, 0, 9]], mask=[[1, 0, 0], [0, 0, 1]])\n a.sort(1)\n assert_equal(a, [[1, 4, 8], [0, 2, 9]])\n assert_equal(a._mask, [[0, 0, 1], [0, 0, 1]])\n # 3D\n a = masked_array([[[7, 8, 9], [4, 5, 6], [1, 2, 3]],\n [[1, 2, 3], [7, 8, 9], [4, 5, 6]],\n [[7, 8, 9], [1, 2, 3], [4, 5, 6]],\n [[4, 5, 6], [1, 2, 3], [7, 8, 9]]])\n a[a % 4 == 0] = masked\n am = a.copy()\n an = a.filled(99)\n am.sort(0)\n an.sort(0)\n assert_equal(am, an)\n am = a.copy()\n an = a.filled(99)\n am.sort(1)\n an.sort(1)\n assert_equal(am, an)\n am = a.copy()\n an = a.filled(99)\n am.sort(2)\n an.sort(2)\n assert_equal(am, an)\n\n\n def test_sort_flexible(self):\n \"Test sort on flexible dtype.\"\n a = array([(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],\n mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],\n dtype=[('A', int), ('B', int)])\n #\n test = sort(a)\n b = array([(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],\n mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],\n dtype=[('A', int), ('B', int)])\n assert_equal(test, b)\n assert_equal(test.mask, b.mask)\n #\n test = sort(a, endwith=False)\n b = array([(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ],\n mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ],\n dtype=[('A', int), ('B', int)])\n assert_equal(test, b)\n assert_equal(test.mask, b.mask)\n\n def test_argsort(self):\n \"Test argsort\"\n a = array([1, 5, 2, 4, 3], mask=[1, 0, 0, 1, 0])\n assert_equal(np.argsort(a), argsort(a))\n\n\n def test_squeeze(self):\n \"Check squeeze\"\n data = masked_array([[1, 2, 3]])\n assert_equal(data.squeeze(), [1, 2, 3])\n data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])\n assert_equal(data.squeeze(), [1, 2, 3])\n assert_equal(data.squeeze()._mask, [1, 1, 1])\n data = masked_array([[1]], mask=True)\n self.assertTrue(data.squeeze() is masked)\n\n\n def test_swapaxes(self):\n \"Tests swapaxes on MaskedArrays.\"\n x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928,\n 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732,\n 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,\n 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479,\n 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,\n 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])\n m = np.array([0, 1, 0, 1, 0, 0,\n 1, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 0, 0,\n 0, 0, 1, 0, 1, 0])\n mX = array(x, mask=m).reshape(6, 6)\n mXX = mX.reshape(3, 2, 2, 3)\n #\n mXswapped = mX.swapaxes(0, 1)\n assert_equal(mXswapped[-1], mX[:, -1])\n\n mXXswapped = mXX.swapaxes(0, 2)\n assert_equal(mXXswapped.shape, (2, 2, 3, 3))\n\n\n def test_take(self):\n \"Tests take\"\n x = masked_array([10, 20, 30, 40], [0, 1, 0, 1])\n assert_equal(x.take([0, 0, 3]), masked_array([10, 10, 40], [0, 0, 1]))\n assert_equal(x.take([0, 0, 3]), x[[0, 0, 3]])\n assert_equal(x.take([[0, 1], [0, 1]]),\n masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))\n #\n x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])\n assert_equal(x.take([0, 2], axis=1),\n array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))\n assert_equal(take(x, [0, 2], axis=1),\n array([[10, 30], [40, 60]], mask=[[0, 1], [1, 0]]))\n\n def test_take_masked_indices(self):\n \"Test take w/ masked indices\"\n a = np.array((40, 18, 37, 9, 22))\n indices = np.arange(3)[None, :] + np.arange(5)[:, None]\n mindices = array(indices, mask=(indices >= len(a)))\n # No mask\n test = take(a, mindices, mode='clip')\n ctrl = array([[40, 18, 37],\n [18, 37, 9],\n [37, 9, 22],\n [ 9, 22, 22],\n [22, 22, 22]])\n assert_equal(test, ctrl)\n # Masked indices\n test = take(a, mindices)\n ctrl = array([[40, 18, 37],\n [18, 37, 9],\n [37, 9, 22],\n [ 9, 22, 40],\n [22, 40, 40]])\n ctrl[3, 2] = ctrl[4, 1] = ctrl[4, 2] = masked\n assert_equal(test, ctrl)\n assert_equal(test.mask, ctrl.mask)\n # Masked input + masked indices\n a = array((40, 18, 37, 9, 22), mask=(0, 1, 0, 0, 0))\n test = take(a, mindices)\n ctrl[0, 1] = ctrl[1, 0] = masked\n assert_equal(test, ctrl)\n assert_equal(test.mask, ctrl.mask)\n\n\n def test_tolist(self):\n \"Tests to list\"\n # ... on 1D\n x = array(np.arange(12))\n x[[1, -2]] = masked\n xlist = x.tolist()\n self.assertTrue(xlist[1] is None)\n self.assertTrue(xlist[-2] is None)\n # ... on 2D\n x.shape = (3, 4)\n xlist = x.tolist()\n ctrl = [[0, None, 2, 3], [4, 5, 6, 7], [8, 9, None, 11]]\n assert_equal(xlist[0], [0, None, 2, 3])\n assert_equal(xlist[1], [4, 5, 6, 7])\n assert_equal(xlist[2], [8, 9, None, 11])\n assert_equal(xlist, ctrl)\n # ... on structured array w/ masked records\n x = array(list(zip([1, 2, 3],\n [1.1, 2.2, 3.3],\n ['one', 'two', 'thr'])),\n dtype=[('a', int), ('b', float), ('c', '|S8')])\n x[-1] = masked\n assert_equal(x.tolist(),\n [(1, 1.1, asbytes('one')),\n (2, 2.2, asbytes('two')),\n (None, None, None)])\n # ... on structured array w/ masked fields\n a = array([(1, 2,), (3, 4)], mask=[(0, 1), (0, 0)],\n dtype=[('a', int), ('b', int)])\n test = a.tolist()\n assert_equal(test, [[1, None], [3, 4]])\n # ... on mvoid\n a = a[0]\n test = a.tolist()\n assert_equal(test, [1, None])\n\n def test_tolist_specialcase(self):\n \"Test mvoid.tolist: make sure we return a standard Python object\"\n a = array([(0, 1), (2, 3)], dtype=[('a', int), ('b', int)])\n # w/o mask: each entry is a np.void whose elements are standard Python\n for entry in a:\n for item in entry.tolist():\n assert_(not isinstance(item, np.generic))\n # w/ mask: each entry is a ma.void whose elements should be standard Python\n a.mask[0] = (0, 1)\n for entry in a:\n for item in entry.tolist():\n assert_(not isinstance(item, np.generic))\n\n\n def test_toflex(self):\n \"Test the conversion to records\"\n data = arange(10)\n record = data.toflex()\n assert_equal(record['_data'], data._data)\n assert_equal(record['_mask'], data._mask)\n #\n data[[0, 1, 2, -1]] = masked\n record = data.toflex()\n assert_equal(record['_data'], data._data)\n assert_equal(record['_mask'], data._mask)\n #\n ndtype = [('i', int), ('s', '|S3'), ('f', float)]\n data = array([(i, s, f) for (i, s, f) in zip(np.arange(10),\n 'ABCDEFGHIJKLM',\n np.random.rand(10))],\n dtype=ndtype)\n data[[0, 1, 2, -1]] = masked\n record = data.toflex()\n assert_equal(record['_data'], data._data)\n assert_equal(record['_mask'], data._mask)\n #\n ndtype = np.dtype(\"int, (2,3)float, float\")\n data = array([(i, f, ff) for (i, f, ff) in zip(np.arange(10),\n np.random.rand(10),\n np.random.rand(10))],\n dtype=ndtype)\n data[[0, 1, 2, -1]] = masked\n record = data.toflex()\n assert_equal_records(record['_data'], data._data)\n assert_equal_records(record['_mask'], data._mask)\n\n\n def test_fromflex(self):\n \"Test the reconstruction of a masked_array from a record\"\n a = array([1, 2, 3])\n test = fromflex(a.toflex())\n assert_equal(test, a)\n assert_equal(test.mask, a.mask)\n #\n a = array([1, 2, 3], mask=[0, 0, 1])\n test = fromflex(a.toflex())\n assert_equal(test, a)\n assert_equal(test.mask, a.mask)\n #\n a = array([(1, 1.), (2, 2.), (3, 3.)], mask=[(1, 0), (0, 0), (0, 1)],\n dtype=[('A', int), ('B', float)])\n test = fromflex(a.toflex())\n assert_equal(test, a)\n assert_equal(test.data, a.data)\n\n\n def test_arraymethod(self):\n \"Test a _arraymethod w/ n argument\"\n marray = masked_array([[1, 2, 3, 4, 5]], mask=[0, 0, 1, 0, 0])\n control = masked_array([[1], [2], [3], [4], [5]],\n mask=[0, 0, 1, 0, 0])\n assert_equal(marray.T, control)\n assert_equal(marray.transpose(), control)\n #\n assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))\n\n\n#------------------------------------------------------------------------------\n\n\nclass TestMaskedArrayMathMethods(TestCase):\n\n def setUp(self):\n \"Base data definition.\"\n x = np.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928,\n 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732,\n 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,\n 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479,\n 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,\n 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])\n X = x.reshape(6, 6)\n XX = x.reshape(3, 2, 2, 3)\n\n m = np.array([0, 1, 0, 1, 0, 0,\n 1, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 0, 0,\n 0, 0, 1, 0, 1, 0])\n mx = array(data=x, mask=m)\n mX = array(data=X, mask=m.reshape(X.shape))\n mXX = array(data=XX, mask=m.reshape(XX.shape))\n\n m2 = np.array([1, 1, 0, 1, 0, 0,\n 1, 1, 1, 1, 0, 1,\n 0, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 1, 0,\n 0, 0, 1, 0, 1, 1])\n m2x = array(data=x, mask=m2)\n m2X = array(data=X, mask=m2.reshape(X.shape))\n m2XX = array(data=XX, mask=m2.reshape(XX.shape))\n self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)\n\n\n def test_cumsumprod(self):\n \"Tests cumsum & cumprod on MaskedArrays.\"\n (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d\n mXcp = mX.cumsum(0)\n assert_equal(mXcp._data, mX.filled(0).cumsum(0))\n mXcp = mX.cumsum(1)\n assert_equal(mXcp._data, mX.filled(0).cumsum(1))\n #\n mXcp = mX.cumprod(0)\n assert_equal(mXcp._data, mX.filled(1).cumprod(0))\n mXcp = mX.cumprod(1)\n assert_equal(mXcp._data, mX.filled(1).cumprod(1))\n\n\n def test_cumsumprod_with_output(self):\n \"Tests cumsum/cumprod w/ output\"\n xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)\n xm[:, 0] = xm[0] = xm[-1, -1] = masked\n #\n for funcname in ('cumsum', 'cumprod'):\n npfunc = getattr(np, funcname)\n xmmeth = getattr(xm, funcname)\n\n # A ndarray as explicit input\n output = np.empty((3, 4), dtype=float)\n output.fill(-9999)\n result = npfunc(xm, axis=0, out=output)\n # ... the result should be the given output\n self.assertTrue(result is output)\n assert_equal(result, xmmeth(axis=0, out=output))\n #\n output = empty((3, 4), dtype=int)\n result = xmmeth(axis=0, out=output)\n self.assertTrue(result is output)\n\n\n def test_ptp(self):\n \"Tests ptp on MaskedArrays.\"\n (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d\n (n, m) = X.shape\n assert_equal(mx.ptp(), mx.compressed().ptp())\n rows = np.zeros(n, np.float)\n cols = np.zeros(m, np.float)\n for k in range(m):\n cols[k] = mX[:, k].compressed().ptp()\n for k in range(n):\n rows[k] = mX[k].compressed().ptp()\n assert_equal(mX.ptp(0), cols)\n assert_equal(mX.ptp(1), rows)\n\n\n def test_sum_object(self):\n \"Test sum on object dtype\"\n a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)\n assert_equal(a.sum(), 5)\n a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)\n assert_equal(a.sum(axis=0), [5, 7, 9])\n\n def test_prod_object(self):\n \"Test prod on object dtype\"\n a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)\n assert_equal(a.prod(), 2 * 3)\n a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)\n assert_equal(a.prod(axis=0), [4, 10, 18])\n\n def test_meananom_object(self):\n \"Test mean/anom on object dtype\"\n a = masked_array([1, 2, 3], dtype=np.object)\n assert_equal(a.mean(), 2)\n assert_equal(a.anom(), [-1, 0, 1])\n\n\n def test_trace(self):\n \"Tests trace on MaskedArrays.\"\n (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d\n mXdiag = mX.diagonal()\n assert_equal(mX.trace(), mX.diagonal().compressed().sum())\n assert_almost_equal(mX.trace(),\n X.trace() - sum(mXdiag.mask * X.diagonal(), axis=0))\n\n\n def test_varstd(self):\n \"Tests var & std on MaskedArrays.\"\n (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d\n assert_almost_equal(mX.var(axis=None), mX.compressed().var())\n assert_almost_equal(mX.std(axis=None), mX.compressed().std())\n assert_almost_equal(mX.std(axis=None, ddof=1),\n mX.compressed().std(ddof=1))\n assert_almost_equal(mX.var(axis=None, ddof=1),\n mX.compressed().var(ddof=1))\n assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)\n assert_equal(mX.var().shape, X.var().shape)\n (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))\n assert_almost_equal(mX.var(axis=None, ddof=2), mX.compressed().var(ddof=2))\n assert_almost_equal(mX.std(axis=None, ddof=2), mX.compressed().std(ddof=2))\n for k in range(6):\n assert_almost_equal(mXvar1[k], mX[k].compressed().var())\n assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())\n assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std())\n\n\n def test_varstd_specialcases(self):\n \"Test a special case for var\"\n nout = np.array(-1, dtype=float)\n mout = array(-1, dtype=float)\n #\n x = array(arange(10), mask=True)\n for methodname in ('var', 'std'):\n method = getattr(x, methodname)\n self.assertTrue(method() is masked)\n self.assertTrue(method(0) is masked)\n self.assertTrue(method(-1) is masked)\n # Using a masked array as explicit output\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n _ = method(out=mout)\n self.assertTrue(mout is not masked)\n assert_equal(mout.mask, True)\n # Using a ndarray as explicit output\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n _ = method(out=nout)\n self.assertTrue(np.isnan(nout))\n #\n x = array(arange(10), mask=True)\n x[-1] = 9\n for methodname in ('var', 'std'):\n method = getattr(x, methodname)\n self.assertTrue(method(ddof=1) is masked)\n self.assertTrue(method(0, ddof=1) is masked)\n self.assertTrue(method(-1, ddof=1) is masked)\n # Using a masked array as explicit output\n _ = method(out=mout, ddof=1)\n self.assertTrue(mout is not masked)\n assert_equal(mout.mask, True)\n # Using a ndarray as explicit output\n _ = method(out=nout, ddof=1)\n self.assertTrue(np.isnan(nout))\n\n\n def test_varstd_ddof(self):\n a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])\n test = a.std(axis=0, ddof=0)\n assert_equal(test.filled(0), [0, 0, 0])\n assert_equal(test.mask, [0, 0, 1])\n test = a.std(axis=0, ddof=1)\n assert_equal(test.filled(0), [0, 0, 0])\n assert_equal(test.mask, [0, 0, 1])\n test = a.std(axis=0, ddof=2)\n assert_equal(test.filled(0), [0, 0, 0])\n assert_equal(test.mask, [1, 1, 1])\n\n\n def test_diag(self):\n \"Test diag\"\n x = arange(9).reshape((3, 3))\n x[1, 1] = masked\n out = np.diag(x)\n assert_equal(out, [0, 4, 8])\n out = diag(x)\n assert_equal(out, [0, 4, 8])\n assert_equal(out.mask, [0, 1, 0])\n out = diag(out)\n control = array([[0, 0, 0], [0, 4, 0], [0, 0, 8]],\n mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])\n assert_equal(out, control)\n\n\n def test_axis_methods_nomask(self):\n \"Test the combination nomask & methods w/ axis\"\n a = array([[1, 2, 3], [4, 5, 6]])\n #\n assert_equal(a.sum(0), [5, 7, 9])\n assert_equal(a.sum(-1), [6, 15])\n assert_equal(a.sum(1), [6, 15])\n #\n assert_equal(a.prod(0), [4, 10, 18])\n assert_equal(a.prod(-1), [6, 120])\n assert_equal(a.prod(1), [6, 120])\n #\n assert_equal(a.min(0), [1, 2, 3])\n assert_equal(a.min(-1), [1, 4])\n assert_equal(a.min(1), [1, 4])\n #\n assert_equal(a.max(0), [4, 5, 6])\n assert_equal(a.max(-1), [3, 6])\n assert_equal(a.max(1), [3, 6])\n\n#------------------------------------------------------------------------------\n\nclass TestMaskedArrayMathMethodsComplex(TestCase):\n \"Test class for miscellaneous MaskedArrays methods.\"\n def setUp(self):\n \"Base data definition.\"\n x = np.array([ 8.375j, 7.545j, 8.828j, 8.5j , 1.757j, 5.928,\n 8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732,\n 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,\n 6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479j,\n 7.189j, 9.645, 5.395, 4.961, 9.894, 2.893,\n 7.357, 9.828, 6.272, 3.758, 6.693, 0.993j])\n X = x.reshape(6, 6)\n XX = x.reshape(3, 2, 2, 3)\n\n m = np.array([0, 1, 0, 1, 0, 0,\n 1, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 0, 0,\n 0, 0, 1, 0, 1, 0])\n mx = array(data=x, mask=m)\n mX = array(data=X, mask=m.reshape(X.shape))\n mXX = array(data=XX, mask=m.reshape(XX.shape))\n\n m2 = np.array([1, 1, 0, 1, 0, 0,\n 1, 1, 1, 1, 0, 1,\n 0, 0, 1, 1, 0, 1,\n 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 1, 1, 0,\n 0, 0, 1, 0, 1, 1])\n m2x = array(data=x, mask=m2)\n m2X = array(data=X, mask=m2.reshape(X.shape))\n m2XX = array(data=XX, mask=m2.reshape(XX.shape))\n self.d = (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX)\n\n\n def test_varstd(self):\n \"Tests var & std on MaskedArrays.\"\n (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d\n assert_almost_equal(mX.var(axis=None), mX.compressed().var())\n assert_almost_equal(mX.std(axis=None), mX.compressed().std())\n assert_equal(mXX.var(axis=3).shape, XX.var(axis=3).shape)\n assert_equal(mX.var().shape, X.var().shape)\n (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))\n assert_almost_equal(mX.var(axis=None, ddof=2), mX.compressed().var(ddof=2))\n assert_almost_equal(mX.std(axis=None, ddof=2), mX.compressed().std(ddof=2))\n for k in range(6):\n assert_almost_equal(mXvar1[k], mX[k].compressed().var())\n assert_almost_equal(mXvar0[k], mX[:, k].compressed().var())\n assert_almost_equal(np.sqrt(mXvar0[k]), mX[:, k].compressed().std())\n\n\n#------------------------------------------------------------------------------\n\nclass TestMaskedArrayFunctions(TestCase):\n \"Test class for miscellaneous functions.\"\n\n def setUp(self):\n x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.])\n y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])\n a10 = 10.\n m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1]\n xm = masked_array(x, mask=m1)\n ym = masked_array(y, mask=m2)\n z = np.array([-.5, 0., .5, .8])\n zm = masked_array(z, mask=[0, 1, 0, 0])\n xf = np.where(m1, 1e+20, x)\n xm.set_fill_value(1e+20)\n self.info = (xm, ym)\n\n def test_masked_where_bool(self):\n x = [1, 2]\n y = masked_where(False, x)\n assert_equal(y, [1, 2])\n assert_equal(y[1], 2)\n\n def test_masked_equal_wlist(self):\n x = [1, 2, 3]\n mx = masked_equal(x, 3)\n assert_equal(mx, x)\n assert_equal(mx._mask, [0, 0, 1])\n mx = masked_not_equal(x, 3)\n assert_equal(mx, x)\n assert_equal(mx._mask, [1, 1, 0])\n\n def test_masked_equal_fill_value(self):\n x = [1, 2, 3]\n mx = masked_equal(x, 3)\n assert_equal(mx._mask, [0, 0, 1])\n assert_equal(mx.fill_value, 3)\n\n def test_masked_where_condition(self):\n \"Tests masking functions.\"\n x = array([1., 2., 3., 4., 5.])\n x[2] = masked\n assert_equal(masked_where(greater(x, 2), x), masked_greater(x, 2))\n assert_equal(masked_where(greater_equal(x, 2), x), masked_greater_equal(x, 2))\n assert_equal(masked_where(less(x, 2), x), masked_less(x, 2))\n assert_equal(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2))\n assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))\n assert_equal(masked_where(equal(x, 2), x), masked_equal(x, 2))\n assert_equal(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2))\n assert_equal(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]), [99, 99, 3, 4, 5])\n\n\n def test_masked_where_oddities(self):\n \"\"\"Tests some generic features.\"\"\"\n atest = ones((10, 10, 10), dtype=float)\n btest = zeros(atest.shape, MaskType)\n ctest = masked_where(btest, atest)\n assert_equal(atest, ctest)\n\n\n def test_masked_where_shape_constraint(self):\n a = arange(10)\n try:\n test = masked_equal(1, a)\n except IndexError:\n pass\n else:\n raise AssertionError(\"Should have failed...\")\n test = masked_equal(a, 1)\n assert_equal(test.mask, [0, 1, 0, 0, 0, 0, 0, 0, 0, 0])\n\n\n def test_masked_otherfunctions(self):\n assert_equal(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4])\n assert_equal(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199])\n assert_equal(masked_inside(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 1, 3).mask, [1, 1, 1, 1, 0])\n assert_equal(masked_outside(array(list(range(5)), mask=[0, 1, 0, 0, 0]), 1, 3).mask, [1, 1, 0, 0, 1])\n assert_equal(masked_equal(array(list(range(5)), mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 0])\n assert_equal(masked_not_equal(array([2, 2, 1, 2, 1], mask=[1, 0, 0, 0, 0]), 2).mask, [1, 0, 1, 0, 1])\n\n\n def test_round(self):\n a = array([1.23456, 2.34567, 3.45678, 4.56789, 5.67890],\n mask=[0, 1, 0, 0, 0])\n assert_equal(a.round(), [1., 2., 3., 5., 6.])\n assert_equal(a.round(1), [1.2, 2.3, 3.5, 4.6, 5.7])\n assert_equal(a.round(3), [1.235, 2.346, 3.457, 4.568, 5.679])\n b = empty_like(a)\n a.round(out=b)\n assert_equal(b, [1., 2., 3., 5., 6.])\n\n x = array([1., 2., 3., 4., 5.])\n c = array([1, 1, 1, 0, 0])\n x[2] = masked\n z = where(c, x, -x)\n assert_equal(z, [1., 2., 0., -4., -5])\n c[0] = masked\n z = where(c, x, -x)\n assert_equal(z, [1., 2., 0., -4., -5])\n assert_(z[0] is masked)\n assert_(z[1] is not masked)\n assert_(z[2] is masked)\n\n\n def test_round_with_output(self):\n \"Testing round with an explicit output\"\n\n xm = array(np.random.uniform(0, 10, 12)).reshape(3, 4)\n xm[:, 0] = xm[0] = xm[-1, -1] = masked\n\n # A ndarray as explicit input\n output = np.empty((3, 4), dtype=float)\n output.fill(-9999)\n result = np.round(xm, decimals=2, out=output)\n # ... the result should be the given output\n self.assertTrue(result is output)\n assert_equal(result, xm.round(decimals=2, out=output))\n #\n output = empty((3, 4), dtype=float)\n result = xm.round(decimals=2, out=output)\n self.assertTrue(result is output)\n\n\n def test_identity(self):\n a = identity(5)\n self.assertTrue(isinstance(a, MaskedArray))\n assert_equal(a, np.identity(5))\n\n\n def test_power(self):\n x = -1.1\n assert_almost_equal(power(x, 2.), 1.21)\n self.assertTrue(power(x, masked) is masked)\n x = array([-1.1, -1.1, 1.1, 1.1, 0.])\n b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])\n y = power(x, b)\n assert_almost_equal(y, [0, 1.21, 1.04880884817, 1.21, 0.])\n assert_equal(y._mask, [1, 0, 0, 0, 1])\n b.mask = nomask\n y = power(x, b)\n assert_equal(y._mask, [1, 0, 0, 0, 1])\n z = x ** b\n assert_equal(z._mask, y._mask)\n assert_almost_equal(z, y)\n assert_almost_equal(z._data, y._data)\n x **= b\n assert_equal(x._mask, y._mask)\n assert_almost_equal(x, y)\n assert_almost_equal(x._data, y._data)\n\n def test_power_w_broadcasting(self):\n \"Test power w/ broadcasting\"\n a2 = np.array([[1., 2., 3.], [4., 5., 6.]])\n a2m = array(a2, mask=[[1, 0, 0], [0, 0, 1]])\n b1 = np.array([2, 4, 3])\n b1m = array(b1, mask=[0, 1, 0])\n b2 = np.array([b1, b1])\n b2m = array(b2, mask=[[0, 1, 0], [0, 1, 0]])\n #\n ctrl = array([[1 ** 2, 2 ** 4, 3 ** 3], [4 ** 2, 5 ** 4, 6 ** 3]],\n mask=[[1, 1, 0], [0, 1, 1]])\n # No broadcasting, base & exp w/ mask\n test = a2m ** b2m\n assert_equal(test, ctrl)\n assert_equal(test.mask, ctrl.mask)\n # No broadcasting, base w/ mask, exp w/o mask\n test = a2m ** b2\n assert_equal(test, ctrl)\n assert_equal(test.mask, a2m.mask)\n # No broadcasting, base w/o mask, exp w/ mask\n test = a2 ** b2m\n assert_equal(test, ctrl)\n assert_equal(test.mask, b2m.mask)\n #\n ctrl = array([[2 ** 2, 4 ** 4, 3 ** 3], [2 ** 2, 4 ** 4, 3 ** 3]],\n mask=[[0, 1, 0], [0, 1, 0]])\n test = b1 ** b2m\n assert_equal(test, ctrl)\n assert_equal(test.mask, ctrl.mask)\n test = b2m ** b1\n assert_equal(test, ctrl)\n assert_equal(test.mask, ctrl.mask)\n\n\n def test_where(self):\n \"Test the where function\"\n x = np.array([1., 1., 1., -2., pi / 2.0, 4., 5., -10., 10., 1., 2., 3.])\n y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])\n a10 = 10.\n m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]\n m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 , 0, 1]\n xm = masked_array(x, mask=m1)\n ym = masked_array(y, mask=m2)\n z = np.array([-.5, 0., .5, .8])\n zm = masked_array(z, mask=[0, 1, 0, 0])\n xf = np.where(m1, 1e+20, x)\n xm.set_fill_value(1e+20)\n #\n d = where(xm > 2, xm, -9)\n assert_equal(d, [-9., -9., -9., -9., -9., 4., -9., -9., 10., -9., -9., 3.])\n assert_equal(d._mask, xm._mask)\n d = where(xm > 2, -9, ym)\n assert_equal(d, [5., 0., 3., 2., -1., -9., -9., -10., -9., 1., 0., -9.])\n assert_equal(d._mask, [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0])\n d = where(xm > 2, xm, masked)\n assert_equal(d, [-9., -9., -9., -9., -9., 4., -9., -9., 10., -9., -9., 3.])\n tmp = xm._mask.copy()\n tmp[(xm <= 2).filled(True)] = True\n assert_equal(d._mask, tmp)\n #\n ixm = xm.astype(int)\n d = where(ixm > 2, ixm, masked)\n assert_equal(d, [-9, -9, -9, -9, -9, 4, -9, -9, 10, -9, -9, 3])\n assert_equal(d.dtype, ixm.dtype)\n\n\n def test_where_with_masked_choice(self):\n x = arange(10)\n x[3] = masked\n c = x >= 8\n # Set False to masked\n z = where(c , x, masked)\n assert_(z.dtype is x.dtype)\n assert_(z[3] is masked)\n assert_(z[4] is masked)\n assert_(z[7] is masked)\n assert_(z[8] is not masked)\n assert_(z[9] is not masked)\n assert_equal(x, z)\n # Set True to masked\n z = where(c , masked, x)\n assert_(z.dtype is x.dtype)\n assert_(z[3] is masked)\n assert_(z[4] is not masked)\n assert_(z[7] is not masked)\n assert_(z[8] is masked)\n assert_(z[9] is masked)\n\n def test_where_with_masked_condition(self):\n x = array([1., 2., 3., 4., 5.])\n c = array([1, 1, 1, 0, 0])\n x[2] = masked\n z = where(c, x, -x)\n assert_equal(z, [1., 2., 0., -4., -5])\n c[0] = masked\n z = where(c, x, -x)\n assert_equal(z, [1., 2., 0., -4., -5])\n assert_(z[0] is masked)\n assert_(z[1] is not masked)\n assert_(z[2] is masked)\n #\n x = arange(1, 6)\n x[-1] = masked\n y = arange(1, 6) * 10\n y[2] = masked\n c = array([1, 1, 1, 0, 0], mask=[1, 0, 0, 0, 0])\n cm = c.filled(1)\n z = where(c, x, y)\n zm = where(cm, x, y)\n assert_equal(z, zm)\n assert_(getmask(zm) is nomask)\n assert_equal(zm, [1, 2, 3, 40, 50])\n z = where(c, masked, 1)\n assert_equal(z, [99, 99, 99, 1, 1])\n z = where(c, 1, masked)\n assert_equal(z, [99, 1, 1, 99, 99])\n\n def test_where_type(self):\n \"Test the type conservation with where\"\n x = np.arange(4, dtype=np.int32)\n y = np.arange(4, dtype=np.float32) * 2.2\n test = where(x > 1.5, y, x).dtype\n control = np.find_common_type([np.int32, np.float32], [])\n assert_equal(test, control)\n\n\n def test_choose(self):\n \"Test choose\"\n choices = [[0, 1, 2, 3], [10, 11, 12, 13],\n [20, 21, 22, 23], [30, 31, 32, 33]]\n chosen = choose([2, 3, 1, 0], choices)\n assert_equal(chosen, array([20, 31, 12, 3]))\n chosen = choose([2, 4, 1, 0], choices, mode='clip')\n assert_equal(chosen, array([20, 31, 12, 3]))\n chosen = choose([2, 4, 1, 0], choices, mode='wrap')\n assert_equal(chosen, array([20, 1, 12, 3]))\n # Check with some masked indices\n indices_ = array([2, 4, 1, 0], mask=[1, 0, 0, 1])\n chosen = choose(indices_, choices, mode='wrap')\n assert_equal(chosen, array([99, 1, 12, 99]))\n assert_equal(chosen.mask, [1, 0, 0, 1])\n # Check with some masked choices\n choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],\n [1, 0, 0, 0], [0, 0, 0, 0]])\n indices_ = [2, 3, 1, 0]\n chosen = choose(indices_, choices, mode='wrap')\n assert_equal(chosen, array([20, 31, 12, 3]))\n assert_equal(chosen.mask, [1, 0, 0, 1])\n\n\n def test_choose_with_out(self):\n \"Test choose with an explicit out keyword\"\n choices = [[0, 1, 2, 3], [10, 11, 12, 13],\n [20, 21, 22, 23], [30, 31, 32, 33]]\n store = empty(4, dtype=int)\n chosen = choose([2, 3, 1, 0], choices, out=store)\n assert_equal(store, array([20, 31, 12, 3]))\n self.assertTrue(store is chosen)\n # Check with some masked indices + out\n store = empty(4, dtype=int)\n indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])\n chosen = choose(indices_, choices, mode='wrap', out=store)\n assert_equal(store, array([99, 31, 12, 99]))\n assert_equal(store.mask, [1, 0, 0, 1])\n # Check with some masked choices + out ina ndarray !\n choices = array(choices, mask=[[0, 0, 0, 1], [1, 1, 0, 1],\n [1, 0, 0, 0], [0, 0, 0, 0]])\n indices_ = [2, 3, 1, 0]\n store = empty(4, dtype=int).view(ndarray)\n chosen = choose(indices_, choices, mode='wrap', out=store)\n assert_equal(store, array([999999, 31, 12, 999999]))\n\n\n def test_reshape(self):\n a = arange(10)\n a[0] = masked\n # Try the default\n b = a.reshape((5, 2))\n assert_equal(b.shape, (5, 2))\n self.assertTrue(b.flags['C'])\n # Try w/ arguments as list instead of tuple\n b = a.reshape(5, 2)\n assert_equal(b.shape, (5, 2))\n self.assertTrue(b.flags['C'])\n # Try w/ order\n b = a.reshape((5, 2), order='F')\n assert_equal(b.shape, (5, 2))\n self.assertTrue(b.flags['F'])\n # Try w/ order\n b = a.reshape(5, 2, order='F')\n assert_equal(b.shape, (5, 2))\n self.assertTrue(b.flags['F'])\n #\n c = np.reshape(a, (2, 5))\n self.assertTrue(isinstance(c, MaskedArray))\n assert_equal(c.shape, (2, 5))\n self.assertTrue(c[0, 0] is masked)\n self.assertTrue(c.flags['C'])\n\n\n def test_make_mask_descr(self):\n \"Test make_mask_descr\"\n # Flexible\n ntype = [('a', np.float), ('b', np.float)]\n test = make_mask_descr(ntype)\n assert_equal(test, [('a', np.bool), ('b', np.bool)])\n # Standard w/ shape\n ntype = (np.float, 2)\n test = make_mask_descr(ntype)\n assert_equal(test, (np.bool, 2))\n # Standard standard\n ntype = np.float\n test = make_mask_descr(ntype)\n assert_equal(test, np.dtype(np.bool))\n # Nested\n ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])]\n test = make_mask_descr(ntype)\n control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])\n assert_equal(test, control)\n # Named+ shape\n ntype = [('a', (np.float, 2))]\n test = make_mask_descr(ntype)\n assert_equal(test, np.dtype([('a', (np.bool, 2))]))\n # 2 names\n ntype = [(('A', 'a'), float)]\n test = make_mask_descr(ntype)\n assert_equal(test, np.dtype([(('A', 'a'), bool)]))\n\n\n def test_make_mask(self):\n \"Test make_mask\"\n # w/ a list as an input\n mask = [0, 1]\n test = make_mask(mask)\n assert_equal(test.dtype, MaskType)\n assert_equal(test, [0, 1])\n # w/ a ndarray as an input\n mask = np.array([0, 1], dtype=np.bool)\n test = make_mask(mask)\n assert_equal(test.dtype, MaskType)\n assert_equal(test, [0, 1])\n # w/ a flexible-type ndarray as an input - use default\n mdtype = [('a', np.bool), ('b', np.bool)]\n mask = np.array([(0, 0), (0, 1)], dtype=mdtype)\n test = make_mask(mask)\n assert_equal(test.dtype, MaskType)\n assert_equal(test, [1, 1])\n # w/ a flexible-type ndarray as an input - use input dtype\n mdtype = [('a', np.bool), ('b', np.bool)]\n mask = np.array([(0, 0), (0, 1)], dtype=mdtype)\n test = make_mask(mask, dtype=mask.dtype)\n assert_equal(test.dtype, mdtype)\n assert_equal(test, mask)\n # w/ a flexible-type ndarray as an input - use input dtype\n mdtype = [('a', np.float), ('b', np.float)]\n bdtype = [('a', np.bool), ('b', np.bool)]\n mask = np.array([(0, 0), (0, 1)], dtype=mdtype)\n test = make_mask(mask, dtype=mask.dtype)\n assert_equal(test.dtype, bdtype)\n assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype))\n\n\n def test_mask_or(self):\n # Initialize\n mtype = [('a', np.bool), ('b', np.bool)]\n mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)\n # Test using nomask as input\n test = mask_or(mask, nomask)\n assert_equal(test, mask)\n test = mask_or(nomask, mask)\n assert_equal(test, mask)\n # Using False as input\n test = mask_or(mask, False)\n assert_equal(test, mask)\n # Using True as input. Won't work, but keep it for the kicks\n # test = mask_or(mask, True)\n # control = np.array([(1, 1), (1, 1), (1, 1), (1, 1)], dtype=mtype)\n # assert_equal(test, control)\n # Using another array w / the same dtype\n other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=mtype)\n test = mask_or(mask, other)\n control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)\n assert_equal(test, control)\n # Using another array w / a different dtype\n othertype = [('A', np.bool), ('B', np.bool)]\n other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)\n try:\n test = mask_or(mask, other)\n except ValueError:\n pass\n # Using nested arrays\n dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])]\n amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)\n bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)\n cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)\n assert_equal(mask_or(amask, bmask), cntrl)\n\n\n def test_flatten_mask(self):\n \"Tests flatten mask\"\n # Standarad dtype\n mask = np.array([0, 0, 1], dtype=np.bool)\n assert_equal(flatten_mask(mask), mask)\n # Flexible dtype\n mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])\n test = flatten_mask(mask)\n control = np.array([0, 0, 0, 1], dtype=bool)\n assert_equal(test, control)\n\n mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]\n data = [(0, (0, 0)), (0, (0, 1))]\n mask = np.array(data, dtype=mdtype)\n test = flatten_mask(mask)\n control = np.array([ 0, 0, 0, 0, 0, 1], dtype=bool)\n assert_equal(test, control)\n\n\n def test_on_ndarray(self):\n \"Test functions on ndarrays\"\n a = np.array([1, 2, 3, 4])\n m = array(a, mask=False)\n test = anom(a)\n assert_equal(test, m.anom())\n test = reshape(a, (2, 2))\n assert_equal(test, m.reshape(2, 2))\n\n def test_compress(self):\n # Test compress function on ndarray and masked array\n # Address Github #2495.\n arr = np.arange(8)\n arr.shape = 4,2\n cond = np.array([True, False, True, True])\n control = arr[[0, 2, 3]]\n test = np.ma.compress(cond, arr, axis=0)\n assert_equal(test, control)\n marr = np.ma.array(arr)\n test = np.ma.compress(cond, marr, axis=0)\n assert_equal(test, control)\n\n#------------------------------------------------------------------------------\n\nclass TestMaskedFields(TestCase):\n #\n def setUp(self):\n ilist = [1, 2, 3, 4, 5]\n flist = [1.1, 2.2, 3.3, 4.4, 5.5]\n slist = ['one', 'two', 'three', 'four', 'five']\n ddtype = [('a', int), ('b', float), ('c', '|S8')]\n mdtype = [('a', bool), ('b', bool), ('c', bool)]\n mask = [0, 1, 0, 0, 1]\n base = array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)\n self.data = dict(base=base, mask=mask, ddtype=ddtype, mdtype=mdtype)\n\n def test_set_records_masks(self):\n base = self.data['base']\n mdtype = self.data['mdtype']\n # Set w/ nomask or masked\n base.mask = nomask\n assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))\n base.mask = masked\n assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))\n # Set w/ simple boolean\n base.mask = False\n assert_equal_records(base._mask, np.zeros(base.shape, dtype=mdtype))\n base.mask = True\n assert_equal_records(base._mask, np.ones(base.shape, dtype=mdtype))\n # Set w/ list\n base.mask = [0, 0, 0, 1, 1]\n assert_equal_records(base._mask,\n np.array([(x, x, x) for x in [0, 0, 0, 1, 1]],\n dtype=mdtype))\n\n def test_set_record_element(self):\n \"Check setting an element of a record)\"\n base = self.data['base']\n (base_a, base_b, base_c) = (base['a'], base['b'], base['c'])\n base[0] = (pi, pi, 'pi')\n\n assert_equal(base_a.dtype, int)\n assert_equal(base_a._data, [3, 2, 3, 4, 5])\n\n assert_equal(base_b.dtype, float)\n assert_equal(base_b._data, [pi, 2.2, 3.3, 4.4, 5.5])\n\n assert_equal(base_c.dtype, '|S8')\n assert_equal(base_c._data,\n asbytes_nested(['pi', 'two', 'three', 'four', 'five']))\n\n def test_set_record_slice(self):\n base = self.data['base']\n (base_a, base_b, base_c) = (base['a'], base['b'], base['c'])\n base[:3] = (pi, pi, 'pi')\n\n assert_equal(base_a.dtype, int)\n assert_equal(base_a._data, [3, 3, 3, 4, 5])\n\n assert_equal(base_b.dtype, float)\n assert_equal(base_b._data, [pi, pi, pi, 4.4, 5.5])\n\n assert_equal(base_c.dtype, '|S8')\n assert_equal(base_c._data,\n asbytes_nested(['pi', 'pi', 'pi', 'four', 'five']))\n\n def test_mask_element(self):\n \"Check record access\"\n base = self.data['base']\n (base_a, base_b, base_c) = (base['a'], base['b'], base['c'])\n base[0] = masked\n #\n for n in ('a', 'b', 'c'):\n assert_equal(base[n].mask, [1, 1, 0, 0, 1])\n assert_equal(base[n]._data, base._data[n])\n #\n def test_getmaskarray(self):\n \"Test getmaskarray on flexible dtype\"\n ndtype = [('a', int), ('b', float)]\n test = empty(3, dtype=ndtype)\n assert_equal(getmaskarray(test),\n np.array([(0, 0) , (0, 0), (0, 0)],\n dtype=[('a', '|b1'), ('b', '|b1')]))\n test[:] = masked\n assert_equal(getmaskarray(test),\n np.array([(1, 1) , (1, 1), (1, 1)],\n dtype=[('a', '|b1'), ('b', '|b1')]))\n #\n def test_view(self):\n \"Test view w/ flexible dtype\"\n iterator = list(zip(np.arange(10), np.random.rand(10)))\n data = np.array(iterator)\n a = array(iterator, dtype=[('a', float), ('b', float)])\n a.mask[0] = (1, 0)\n controlmask = np.array([1] + 19 * [0], dtype=bool)\n # Transform globally to simple dtype\n test = a.view(float)\n assert_equal(test, data.ravel())\n assert_equal(test.mask, controlmask)\n # Transform globally to dty\n test = a.view((float, 2))\n assert_equal(test, data)\n assert_equal(test.mask, controlmask.reshape(-1, 2))\n #\n test = a.view((float, 2), np.matrix)\n assert_equal(test, data)\n self.assertTrue(isinstance(test, np.matrix))\n #\n def test_getitem(self):\n ndtype = [('a', float), ('b', float)]\n a = array(list(zip(np.random.rand(10), np.arange(10))), dtype=ndtype)\n a.mask = np.array(list(zip([0, 0, 0, 0, 0, 0, 0, 0, 1, 1],\n [1, 0, 0, 0, 0, 0, 0, 0, 1, 0])),\n dtype=[('a', bool), ('b', bool)])\n # No mask\n self.assertTrue(isinstance(a[1], MaskedArray))\n # One element masked\n self.assertTrue(isinstance(a[0], MaskedArray))\n assert_equal_records(a[0]._data, a._data[0])\n assert_equal_records(a[0]._mask, a._mask[0])\n # All element masked\n self.assertTrue(isinstance(a[-2], MaskedArray))\n assert_equal_records(a[-2]._data, a._data[-2])\n assert_equal_records(a[-2]._mask, a._mask[-2])\n\n def test_setitem(self):\n # Issue 2403\n ndtype = np.dtype([('a', float), ('b', int)])\n mdtype = np.dtype([('a', bool), ('b', bool)])\n # soft mask\n control = np.array([(False, True), (True, True)], dtype=mdtype)\n a = np.ma.masked_all((2,), dtype=ndtype)\n a['a'][0] = 2\n assert_equal(a.mask, control)\n a = np.ma.masked_all((2,), dtype=ndtype)\n a[0]['a'] = 2\n assert_equal(a.mask, control)\n # hard mask\n control = np.array([(True, True), (True, True)], dtype=mdtype)\n a = np.ma.masked_all((2,), dtype=ndtype)\n a.harden_mask()\n a['a'][0] = 2\n assert_equal(a.mask, control)\n a = np.ma.masked_all((2,), dtype=ndtype)\n a.harden_mask()\n a[0]['a'] = 2\n assert_equal(a.mask, control)\n\n def test_element_len(self):\n # check that len() works for mvoid (Github issue #576)\n for rec in self.data['base']:\n assert_equal(len(rec), len(self.data['ddtype']))\n\n#------------------------------------------------------------------------------\n\nclass TestMaskedView(TestCase):\n #\n def setUp(self):\n iterator = list(zip(np.arange(10), np.random.rand(10)))\n data = np.array(iterator)\n a = array(iterator, dtype=[('a', float), ('b', float)])\n a.mask[0] = (1, 0)\n controlmask = np.array([1] + 19 * [0], dtype=bool)\n self.data = (data, a, controlmask)\n #\n def test_view_to_nothing(self):\n (data, a, controlmask) = self.data\n test = a.view()\n self.assertTrue(isinstance(test, MaskedArray))\n assert_equal(test._data, a._data)\n assert_equal(test._mask, a._mask)\n\n #\n def test_view_to_type(self):\n (data, a, controlmask) = self.data\n test = a.view(np.ndarray)\n self.assertTrue(not isinstance(test, MaskedArray))\n assert_equal(test, a._data)\n assert_equal_records(test, data.view(a.dtype).squeeze())\n #\n def test_view_to_simple_dtype(self):\n (data, a, controlmask) = self.data\n # View globally\n test = a.view(float)\n self.assertTrue(isinstance(test, MaskedArray))\n assert_equal(test, data.ravel())\n assert_equal(test.mask, controlmask)\n #\n def test_view_to_flexible_dtype(self):\n (data, a, controlmask) = self.data\n #\n test = a.view([('A', float), ('B', float)])\n assert_equal(test.mask.dtype.names, ('A', 'B'))\n assert_equal(test['A'], a['a'])\n assert_equal(test['B'], a['b'])\n #\n test = a[0].view([('A', float), ('B', float)])\n self.assertTrue(isinstance(test, MaskedArray))\n assert_equal(test.mask.dtype.names, ('A', 'B'))\n assert_equal(test['A'], a['a'][0])\n assert_equal(test['B'], a['b'][0])\n #\n test = a[-1].view([('A', float), ('B', float)])\n self.assertTrue(isinstance(test, MaskedArray))\n assert_equal(test.dtype.names, ('A', 'B'))\n assert_equal(test['A'], a['a'][-1])\n assert_equal(test['B'], a['b'][-1])\n\n #\n def test_view_to_subdtype(self):\n (data, a, controlmask) = self.data\n # View globally\n test = a.view((float, 2))\n self.assertTrue(isinstance(test, MaskedArray))\n assert_equal(test, data)\n assert_equal(test.mask, controlmask.reshape(-1, 2))\n # View on 1 masked element\n test = a[0].view((float, 2))\n self.assertTrue(isinstance(test, MaskedArray))\n assert_equal(test, data[0])\n assert_equal(test.mask, (1, 0))\n # View on 1 unmasked element\n test = a[-1].view((float, 2))\n self.assertTrue(isinstance(test, MaskedArray))\n assert_equal(test, data[-1])\n #\n def test_view_to_dtype_and_type(self):\n (data, a, controlmask) = self.data\n #\n test = a.view((float, 2), np.matrix)\n assert_equal(test, data)\n self.assertTrue(isinstance(test, np.matrix))\n self.assertTrue(not isinstance(test, MaskedArray))\n\ndef test_masked_array():\n a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])\n assert_equal(np.argwhere(a), [[1], [3]])\n\n###############################################################################\nif __name__ == \"__main__\":\n run_module_suite()\n"
] |
[
[
"numpy.matrix",
"numpy.diag",
"numpy.product",
"numpy.minimum",
"numpy.sqrt",
"numpy.ma.compress",
"numpy.arctan",
"numpy.take",
"numpy.asarray",
"numpy.dtype",
"numpy.round",
"numpy.seterr",
"numpy.concatenate",
"numpy.arctan2",
"numpy.add",
"numpy.ma.array",
"numpy.compat.asbytes_nested",
"numpy.exp",
"numpy.where",
"numpy.conjugate",
"numpy.divide",
"numpy.geterr",
"numpy.greater",
"numpy.arcsin",
"numpy.reshape",
"numpy.arange",
"numpy.less",
"numpy.add.reduce",
"numpy.subtract",
"numpy.finfo",
"numpy.sin",
"numpy.greater_equal",
"numpy.less_equal",
"numpy.minimum.outer",
"numpy.ravel",
"numpy.zeros",
"numpy.log",
"numpy.cosh",
"numpy.multiply",
"numpy.isnan",
"numpy.arccos",
"numpy.tan",
"numpy.identity",
"numpy.random.rand",
"numpy.equal",
"numpy.transpose",
"numpy.errstate",
"numpy.ma.masked_all",
"numpy.find_common_type",
"numpy.array",
"numpy.tanh",
"numpy.not_equal",
"numpy.add.accumulate",
"numpy.maximum.reduce",
"numpy.sum",
"numpy.argsort",
"numpy.absolute",
"numpy.maximum",
"numpy.cos",
"numpy.sort",
"numpy.ones",
"numpy.argwhere",
"numpy.sinh",
"numpy.compat.asbytes",
"numpy.maximum.outer",
"numpy.shape",
"numpy.mod",
"numpy.random.uniform",
"numpy.angle",
"numpy.empty"
]
] |
m-tari/stock_drirection_predict_ml
|
[
"902331665109c1c28c9c234f0bce6a49371354d2"
] |
[
"webapp/app.py"
] |
[
"import streamlit as st\nimport pandas as pd\nimport re\nimport pickle\n\n# loading the trained model\npickle_in = open('classifier.pkl', 'rb') \nclassifier = pickle.load(pickle_in)\n\ndef prediction(ticker_to_predict):\n # Making predictions \n prediction = classifier.predict(ticker_to_predict)\n return prediction\n\n\ndef main(): \n\n\tst.write(\n\t'''\n\t# Will stock X's price go up or down the next day?\n\n\n\tIs there any pattern in the day to day movement of the stock prices? The app presents the\n\tprediction of the machine learning model at\n\n\t'''\n\t)\n\tst.markdown('[https://github.com/m-tari/stock_direction_predict_ml](https://github.com/m-tari/stock_direction_predict_ml)')\n\tst.write('The close price of 94 anonymized stocks are scaled to day 1 for 500 days.')\n\n\n\tdata = pd.read_csv('./datasets/ohlc_data.csv')\n\n\n\t# Show close price history\n\tticker = st.number_input('Enter a ticker to show the historical price and volume data (a number from 1-94)', \n\t\tmin_value=1, \n\t\tmax_value=94, \n\t\tformat='%d',\n\t\tkey='ticker')\n\n\t# TODO: An input number for the number of days to display\n\n\t# we use regular expression to select Close and Volume columns in data\n\tdata_cols = data.columns\n\n\tr_close = re.compile(\"^C\")\n\tclose_cols = filter(r_close.match, data_cols)\n\tclose = data.loc[ticker, close_cols].T\n\tclose_renamed = close.rename(lambda s: s.strip(\"C\"))\n\tclose_renamed.index = close_renamed.index.astype('int64')\n\n\tr_volume = re.compile(\"^V\")\n\tvolume_cols = filter(r_volume.match, data_cols)\n\tvolume = data.loc[ticker, volume_cols].T\n\tvolume_renamed = volume.rename(lambda s: s.strip(\"V\"))\n\tvolume_renamed.index = volume_renamed.index.astype('int64')\n\n\t# Draw Close and Volume plots\n\tst.write(\n\t'''\n\t### Close Price Historical Data\n\n\t'''\n\t)\n\tst.line_chart(close_renamed)\n\tst.write(\n\t'''\n\t### Volume Historical Data\n\n\t'''\n\t)\n\tst.line_chart(volume_renamed)\n\n\n\tticker_to_predict = st.number_input('Enter a ticker to show the movement prediction (a number from 1-94)', \n\t\tmin_value=1, \n\t\tmax_value=94, \n\t\tformat='%d',\n\t\tkey='ticker_to_predict')\n\n\t# predict button\n\tif st.button('Predict the next day movment'):\n\n\t\tr_OC = re.compile(\"^[O, C]\")\n\t\tOC_cols = filter(r_OC.match, data_cols)\n\t\tOC_df = data.loc[ticker_to_predict-1, OC_cols].iloc[880:899].to_frame().T\n\n\t\tmovement_to_show = prediction(OC_df)\n\t\tif movement_to_show == 1:\n\t\t\tst.write('Upward')\n\t\telif movement_to_show == 0:\n\t\t\tst.write('Downward')\n\n\tst.write(\"\"\"\n\n\tDisclaimer: \n\tThis content is for educational purposes only. The Information should not be construed as investment/trading advice and is not meant to be a solicitation or recommendation to buy, sell, or hold any securities.\n\n\t\\- Mohammad Tari\n\n\t\"\"\")\n\n\nif __name__=='__main__': \n main()\t"
] |
[
[
"pandas.read_csv"
]
] |
wchatx/esridumpgdf
|
[
"4f30b066054ba166ccdeff07ea6ef4a5ebfd070d"
] |
[
"esridumpgdf/service.py"
] |
[
"from typing import Dict, Iterable, List\n\nfrom geopandas import GeoDataFrame\nfrom pandas import DataFrame, concat\n\nfrom ._base import Base\nfrom .layer import Layer\n\n\nclass Service(Base):\n def __init__(self, url):\n self.url = url\n super(Service, self).__init__(self.url)\n\n def layers(self, include_tables: bool = True) -> DataFrame:\n \"\"\"\n Get Service layers.\n\n :param include_tables: include Service tables\n :return:\n \"\"\"\n layers = DataFrame(\n data=[\n {**layer, \"url\": f'{self.url}/{layer[\"id\"]}'}\n for layer in self.meta[\"layers\"]\n ]\n )\n layers.set_index(\"id\", inplace=True)\n\n if include_tables and self.meta[\"tables\"]:\n tables = DataFrame(\n data=[\n {**table, \"url\": f'{self.url}/{table[\"id\"]}'}\n for table in self.meta[\"tables\"]\n ]\n )\n tables.set_index(\"id\", inplace=True)\n layers = concat([layers, tables])\n\n return layers\n\n def to_gpkg(\n self,\n filename: str,\n index: bool = True,\n schema: dict = None,\n include_tables: bool = True,\n **kwargs,\n ) -> str:\n \"\"\"\n Export an ArcGIS Server Map or Feature service to geopackage\n\n :param filename: File path or file handle to write to.\n :param index: If True, write index into one or more columns (for MultiIndex).\n Default None writes the index into one or more columns only if\n the index is named, is a MultiIndex, or has a non-integer data\n type. If False, no index is written.\n :param schema: If specified, the schema dictionary is passed to Fiona to\n better control how the file is written.\n :param include_tables: include Service tables\n :param kwargs: extra keyword arguments provided to the EsriDumper class\n :return: provided filename\n \"\"\"\n layers = self.layers(include_tables).to_dict(orient=\"records\")\n for layer in layers:\n if layer[\"type\"] in self._supported_types:\n Layer(layer[\"url\"], **kwargs).to_gdf().to_file(\n filename,\n driver=\"GPKG\",\n index=index,\n schema=schema,\n layer=layer[\"name\"],\n )\n return filename\n\n def to_gdfs(\n self, include_tables: bool = True, **kwargs\n ) -> List[Dict[str, GeoDataFrame]]:\n \"\"\"\n Export an ArcGIS Server Map or Feature service to GeoDataFrames\n\n :param include_tables: include Service tables\n :param kwargs: extra keyword arguments provided to the EsriDumper class\n :return: list of dicts with layer names and layer GeoDataFrames\n \"\"\"\n layers = self.layers(include_tables).to_dict(orient=\"records\")\n gdfs = []\n for layer in layers:\n if layer[\"type\"] in self._supported_types:\n gdfs.append(\n {\n \"name\": layer[\"name\"],\n \"gdf\": Layer(layer[\"url\"], **kwargs).to_gdf(),\n }\n )\n return gdfs\n"
] |
[
[
"pandas.concat",
"pandas.DataFrame"
]
] |
Microsoft/dowhy
|
[
"b84e257142df91e4ed792dcf6eee159f446ffef9"
] |
[
"dowhy/gcm/shapley.py"
] |
[
"\"\"\"This module provides functionality for shapley value estimation.\n\nClasses and functions in this module should be considered experimental, meaning there might be breaking API changes in\nthe future.\n\"\"\"\n\nimport itertools\nfrom enum import Enum\nfrom typing import Callable, Union, Tuple, List, Dict, Set, Optional\n\nimport numpy as np\nimport scipy\nfrom joblib import Parallel, delayed\nfrom scipy.special import comb\nfrom sklearn.linear_model import LinearRegression\nfrom tqdm import tqdm\n\nimport dowhy.gcm.config as config\nfrom dowhy.gcm.constant import EPS\nfrom dowhy.gcm.util.general import set_random_seed\n\n\nclass ShapleyApproximationMethods(Enum):\n \"\"\"\n AUTO: Using EXACT when number of players is below 6 and EARLY_STOPPING otherwise.\n EXACT: Generate all possible subsets and estimate Shapley values with corresponding subset weights.\n EXACT_FAST: Generate all possible subsets and estimate Shapley values via weighed least squares regression. This can\n be faster, but, depending on the set function, numerically less stable.\n SUBSET_SAMPLING: Randomly samples subsets and estimate Shapley values via weighed least squares regression. Here,\n only a certain number of randomly drawn subsets are used.\n EARLY_STOPPING: Estimate Shapley values based on a few randomly generated permutations. Stop the estimation process\n when the the Shapley values do not change much on average anymore between runs.\n PERMUTATION: Estimates Shapley values based on a fixed number of randomly generated permutations. By fine tuning\n hyperparameters, this can be potentially faster than the early stopping approach due to a better\n utilization of the parallelization.\n \"\"\"\n AUTO = 0,\n EXACT = 1,\n EXACT_FAST = 2,\n EARLY_STOPPING = 3,\n PERMUTATION = 4,\n SUBSET_SAMPLING = 5\n\n\nclass ShapleyConfig:\n def __init__(self,\n approximation_method: ShapleyApproximationMethods = ShapleyApproximationMethods.AUTO,\n num_samples: int = 5000,\n min_percentage_change_threshold: float = 0.01,\n n_jobs: Optional[int] = None) -> None:\n \"\"\"Config for estimating Shapley values.\n\n :param approximation_method: Type of approximation methods (see :py:class:`ShapleyApproximationMethods <dowhy.gcm.shapley.ShapleyApproximationMethods>`).\n :param num_samples: Number of samples used for approximating the Shapley values. Depending on the approximation\n method, this can either represent the number of drawn subsets (in SUBSET_SAMPLING) or the\n number of drawn permutations (in EARLY_STOPPING and PERMUTATION). In case of EARLY_STOPPING,\n this also represents a limit on the evaluation runs.\n :param min_percentage_change_threshold: This parameter is only relevant for EARLY_STOPPING and indicates the\n minimum required change of the Shapley values between two runs\n (i.e. evaluation of permutations) before the estimation stops.\n For instance, if the Shapley value changes less than the given value for\n a certain number of consecutive runs, the algorithm stops and returns\n the current result.\n :param n_jobs: Number of parallel jobs.\n \"\"\"\n self.approximation_method = approximation_method\n self.num_samples = num_samples\n self.min_percentage_change_threshold = min_percentage_change_threshold\n self.n_jobs = config.default_n_jobs if n_jobs is None else n_jobs\n\n\ndef estimate_shapley_values(set_func: Callable[[np.ndarray], Union[float, np.ndarray]],\n num_players: int,\n shapley_config: Optional[ShapleyConfig] = None) -> np.ndarray:\n \"\"\"Estimates the Shapley values based on the provided set function. A set function here is defined by taking a\n (subset) of players and returning a certain utility value. This is in the context of attributing the\n value of the i-th player to a subset of players S by evaluating v(S u {i}) - v(S), where v is the\n set function and i is not in S. While we use the term 'player' here, this is often a certain feature/variable.\n\n The input of the set function is a binary vector indicating which player is part of the set. For instance, given 4\n players (1,2,3,4) and a subset only contains players 1,2,4, then this is indicated by the vector [1, 1, 0, 1]. The\n function is expected to return a numeric value based on this input.\n\n Note: The set function can be arbitrary and can resemble computationally complex operations. Keep in mind\n that the estimation of Shapley values can become computationally expensive and requires a lot of memory. If the\n runtime is too slow, consider changing the default config.\n\n :param set_func: A set function that expects a binary vector as input which specifies which player is part of the\n subset.\n :param num_players: Total number of players.\n :param shapley_config: A config object for indicating the approximation method and other parameters. If None is\n given, a default config is used. For faster runtime or more accurate results, consider\n creating a custom config.\n :return: A numpy array representing the Shapley values for each player, i.e. there are as many Shapley values as\n num_players. The i-th entry belongs to the i-th player. Here, the set function defines which index belongs\n to which player and is responsible to keep it consistent.\n \"\"\"\n if shapley_config is None:\n shapley_config = ShapleyConfig()\n\n approximation_method = shapley_config.approximation_method\n if approximation_method == ShapleyApproximationMethods.AUTO:\n if num_players <= 5:\n approximation_method = ShapleyApproximationMethods.EXACT\n else:\n approximation_method = ShapleyApproximationMethods.EARLY_STOPPING\n\n if approximation_method == ShapleyApproximationMethods.EXACT:\n return _estimate_shapley_values_exact(set_func=set_func,\n num_players=num_players,\n n_jobs=shapley_config.n_jobs)\n elif approximation_method == ShapleyApproximationMethods.PERMUTATION:\n return _approximate_shapley_values_via_permutation_sampling(\n set_func=set_func,\n num_players=num_players,\n num_permutations=max(1, shapley_config.num_samples // num_players),\n n_jobs=shapley_config.n_jobs)\n elif approximation_method == ShapleyApproximationMethods.EARLY_STOPPING:\n return _approximate_shapley_values_via_early_stopping(\n set_func=set_func,\n num_players=num_players,\n max_runs=shapley_config.num_samples,\n min_percentage_change_threshold=shapley_config.min_percentage_change_threshold,\n n_jobs=shapley_config.n_jobs)\n elif approximation_method == ShapleyApproximationMethods.SUBSET_SAMPLING:\n return _approximate_shapley_values_via_least_squares_regression(\n set_func=set_func,\n num_players=num_players,\n use_subset_approximation=True,\n num_samples_for_approximation=shapley_config.num_samples,\n n_jobs=shapley_config.n_jobs)\n elif approximation_method == ShapleyApproximationMethods.EXACT_FAST:\n return _approximate_shapley_values_via_least_squares_regression(\n set_func=set_func,\n num_players=num_players,\n use_subset_approximation=False,\n num_samples_for_approximation=shapley_config.num_samples,\n n_jobs=shapley_config.n_jobs)\n else:\n raise ValueError(\"Unknown method for Shapley approximation!\")\n\n\ndef _estimate_shapley_values_exact(set_func: Callable[[np.ndarray], Union[float, np.ndarray]],\n num_players: int,\n n_jobs: int) -> np.ndarray:\n \"\"\" Following Eq. (2) in\n Janzing, D., Minorics, L., & Bloebaum, P. (2020).\n Feature relevance quantification in explainable AI: A causal problem.\n In International Conference on Artificial Intelligence and Statistics (pp. 2907-2916). PMLR. \"\"\"\n all_subsets = [tuple(subset) for subset in itertools.product([0, 1], repeat=num_players)]\n\n with Parallel(n_jobs=n_jobs) as parallel:\n subset_to_result_map = _evaluate_set_function(set_func, all_subsets, parallel)\n\n def compute_subset_weight(length: int) -> float:\n return 1 / (num_players * comb(num_players - 1, length))\n\n subset_weight_cache = {}\n\n shapley_values = [None] * num_players\n subsets_missing_one_player = np.array(list(itertools.product([0, 1], repeat=num_players - 1)))\n for player_index in range(num_players):\n subsets_with_player = [tuple(subset)\n for subset in np.insert(subsets_missing_one_player, player_index, 1, axis=1)]\n subsets_without_player = [tuple(subset)\n for subset in np.insert(subsets_missing_one_player, player_index, 0, axis=1)]\n\n for i in range(len(subsets_with_player)):\n subset_length = int(np.sum(subsets_without_player[i]))\n if subset_length not in subset_weight_cache:\n subset_weight_cache[subset_length] = compute_subset_weight(subset_length)\n\n weighted_diff = \\\n subset_weight_cache[subset_length] * (subset_to_result_map[subsets_with_player[i]]\n - subset_to_result_map[subsets_without_player[i]])\n # For estimating Shapley values for multiple samples (e.g. in feature relevance) and the number of samples\n # is unknown beforehand.\n if shapley_values[player_index] is None:\n shapley_values[player_index] = weighted_diff\n else:\n shapley_values[player_index] += weighted_diff\n\n return np.array(shapley_values).T\n\n\ndef _approximate_shapley_values_via_least_squares_regression(set_func: Callable[[np.ndarray],\n Union[float, np.ndarray]],\n num_players: int,\n use_subset_approximation: bool,\n num_samples_for_approximation: int,\n n_jobs: int,\n full_and_empty_subset_weight: float = 10 ** 20) \\\n -> np.ndarray:\n \"\"\" For more details about this approximation, see Section 4.1.1 in\n Janzing, D., Minorics, L., & Bloebaum, P. (2020).\n Feature relevance quantification in explainable AI: A causal problem.\n In International Conference on Artificial Intelligence and Statistics (pp. 2907-2916). PMLR. \"\"\"\n if not use_subset_approximation:\n all_subsets, weights = _create_subsets_and_weights_exact(num_players,\n full_and_empty_subset_weight)\n else:\n all_subsets, weights = _create_subsets_and_weights_approximation(num_players,\n full_and_empty_subset_weight,\n num_samples_for_approximation)\n\n def parallel_job(subset: np.ndarray, parallel_random_seed: int):\n set_random_seed(parallel_random_seed)\n\n return set_func(subset)\n\n with Parallel(n_jobs=n_jobs) as parallel:\n random_seeds = np.random.randint(np.iinfo(np.int32).max, size=len(all_subsets))\n set_function_results = parallel(delayed(parallel_job)(subset, random_seed)\n for subset, random_seed in\n tqdm(zip(all_subsets, random_seeds),\n desc=\"Estimate shapley values as least squares solution\",\n position=0, leave=True, disable=not config.show_progress_bars))\n\n return LinearRegression().fit(all_subsets, np.array(set_function_results), sample_weight=weights).coef_\n\n\ndef _approximate_shapley_values_via_permutation_sampling(\n set_func: Callable[[np.ndarray], Union[float, np.ndarray]],\n num_players: int,\n num_permutations: int,\n n_jobs: int) -> np.ndarray:\n \"\"\" For more details about this approximation, see\n Strumbelj, E., Kononenko, I. (2014).\n Explaining prediction models and individual predictions with feature contributions.\n In Knowledge and information systems, 41(3):647–665 \"\"\"\n full_subset_result, empty_subset_result = _estimate_full_and_emtpy_subset_results(set_func, num_players)\n\n subsets_to_evaluate = set()\n all_permutations = []\n for i in range(num_permutations):\n permutation = np.random.choice(num_players, num_players, replace=False)\n all_permutations.append(permutation)\n\n subsets_to_evaluate.update(_create_index_order_and_subset_tuples(permutation))\n\n with Parallel(n_jobs=n_jobs) as parallel:\n evaluated_subsets = _evaluate_set_function(set_func, subsets_to_evaluate, parallel)\n\n shapley_values = _estimate_shapley_values_of_permutation(all_permutations[0], evaluated_subsets,\n full_subset_result, empty_subset_result)\n for i in range(1, len(all_permutations)):\n shapley_values += _estimate_shapley_values_of_permutation(all_permutations[i], evaluated_subsets,\n full_subset_result, empty_subset_result)\n\n return shapley_values / len(all_permutations)\n\n\ndef _approximate_shapley_values_via_early_stopping(\n set_func: Callable[[np.ndarray], Union[float, np.ndarray]],\n num_players: int,\n max_runs: int,\n min_percentage_change_threshold: float,\n n_jobs: int,\n num_permutations_per_run: int = 5) -> np.ndarray:\n \"\"\" Combines the approximation method described in\n\n Strumbelj, E., Kononenko, I. (2014).\n Explaining prediction models and individual predictions with feature contributions.\n In Knowledge and information systems, 41(3):647–665\n\n with an early stopping criteria. This is, if the Shapley values change less than a certain threshold on average\n between two runs, then stop the estimation.\n \"\"\"\n full_subset_result, empty_subset_result = _estimate_full_and_emtpy_subset_results(set_func, num_players)\n\n shapley_values = None\n old_shap_proxy = np.zeros(num_players)\n evaluated_subsets = {}\n num_generated_permutations = 0\n run_counter = 0\n converged_run = 0\n\n if config.show_progress_bars:\n pbar = tqdm(total=1)\n\n with Parallel(n_jobs=n_jobs) as parallel:\n # The method stops if either the change between some consecutive runs is below the given threshold or the\n # maximum number of runs is reached.\n while True:\n run_counter += 1\n subsets_to_evaluate = set()\n\n # In each run, we create one random permutation of players. For instance, given 4 players, a permutation\n # could be [3,1,4,2].\n permutations = [np.random.choice(num_players, num_players, replace=False)\n for _ in range(num_permutations_per_run)]\n for permutation in permutations:\n num_generated_permutations += 1\n # Create all subsets belonging to the generated permutation. This is, if we have [3,1,4,2], then the\n # subsets are [3], [3,1], [3,1,4] [3,1,4,2].\n subsets_to_evaluate.update([subset_tuple for subset_tuple\n in _create_index_order_and_subset_tuples(permutation)\n if subset_tuple not in evaluated_subsets])\n\n # The result for each subset is cached such that if a subset that has already been evaluated appears again,\n # we can take this result directly.\n evaluated_subsets.update(_evaluate_set_function(set_func,\n subsets_to_evaluate,\n parallel,\n False))\n\n for permutation in permutations:\n # To improve the runtime, multiple permutations are evaluated in each run.\n if shapley_values is None:\n shapley_values = _estimate_shapley_values_of_permutation(permutation, evaluated_subsets,\n full_subset_result, empty_subset_result)\n else:\n shapley_values += _estimate_shapley_values_of_permutation(permutation, evaluated_subsets,\n full_subset_result, empty_subset_result)\n\n if run_counter > max_runs:\n break\n\n new_shap_proxy = np.array(shapley_values)\n new_shap_proxy[new_shap_proxy == 0] = EPS\n # The current Shapley values are the average of the estimated values, i.e. we need to divide by the number\n # of generated permutations here.\n new_shap_proxy /= num_generated_permutations\n\n if run_counter > 1:\n percentage_changes = 1 - new_shap_proxy / old_shap_proxy\n if config.show_progress_bars:\n pbar.set_description(f'Estimating Shapley Values. '\n f'Average change of Shapley values in run {run_counter} '\n f'({num_generated_permutations} evaluated permutations): '\n f'{np.mean(percentage_changes) * 100}%')\n\n if np.mean(percentage_changes) < min_percentage_change_threshold:\n # Here, the change between two runs is below the minimum threshold, but to reduce the likelihood\n # that this just happened by chance, we require that this happens at least for two runs in a row.\n converged_run += 1\n if converged_run >= 2:\n break\n else:\n converged_run = 0\n\n old_shap_proxy = new_shap_proxy\n\n if config.show_progress_bars:\n pbar.update(1)\n pbar.close()\n\n return shapley_values / num_generated_permutations\n\n\ndef _create_subsets_and_weights_exact(num_players: int, high_weight: float) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Creates all subsets and the exact weights of each subset. See Section 4.1.1. in\n\n Janzing, D., Minorics, L., & Bloebaum, P. (2020).\n Feature relevance quantification in explainable AI: A causal problem.\n In International Conference on Artificial Intelligence and Statistics (pp. 2907-2916). PMLR.\n\n for more details on this.\n\n :param num_players: Total number of players.\n :param high_weight: A 'high' weight for computational purposes. This is used to resemble 'infinity', but needs to be\n selected carefully to avoid numerical issues.\n :return: A tuple, where the first entry is a numpy array with all subsets and the second entry is an array with the\n corresponding weights to each subset.\n \"\"\"\n all_subsets = []\n\n num_iterations = int(np.ceil(num_players / 2))\n\n for i in range(num_iterations):\n # Create all (unique) subsets)\n all_subsets.extend(np.array([np.bincount(combs, minlength=num_players) for combs in\n itertools.combinations(range(num_players), i)]))\n\n all_subsets.extend(np.array([np.bincount(combs, minlength=num_players) for combs in\n itertools.combinations(range(num_players), num_players - i)]))\n\n if i == num_iterations - 1 and num_players % 2 == 0:\n all_subsets.extend(np.array([np.bincount(combs, minlength=num_players) for combs in\n itertools.combinations(range(num_players), i + 1)]))\n\n weights = np.zeros(len(all_subsets))\n\n for i, subset in enumerate(all_subsets):\n subset_size = np.sum(subset)\n if subset_size == num_players or subset_size == 0:\n # Assigning a 'high' weight, since this resembles \"infinity\".\n weights[i] = high_weight\n else:\n # The weight for a subset with a specific length (see paper mentioned in the docstring for more\n # information).\n weights[i] = (num_players - 1) / (\n scipy.special.binom(num_players, subset_size)\n * subset_size\n * (num_players - subset_size))\n\n return np.array(all_subsets, dtype=np.int), weights.astype(np.float)\n\n\ndef _create_subsets_and_weights_approximation(num_players: int, high_weight: float,\n num_subset_samples: int) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Randomly samples subsets and weights them based on the number of how often they appear.\n\n :param num_players: Total number of players.\n :param high_weight: A 'high' weight for computational purposes. This is used to resemble 'infinity', but needs to be\n selected carefully to avoid numerical issues.\n :param num_subset_samples: Number of subset samples.\n :return: A tuple, where the first entry is a numpy array with the sampled subsets and the second entry is an array\n with the corresponding weights to each subset.\n \"\"\"\n all_subsets = [np.zeros(num_players), np.ones(num_players)]\n weights = {tuple(all_subsets[0]): high_weight, tuple(all_subsets[1]): high_weight}\n\n probabilities_of_subset_length = np.zeros(num_players + 1)\n for i in range(1, num_players):\n probabilities_of_subset_length[i] = (num_players - 1) / (i * (num_players - i))\n\n probabilities_of_subset_length = probabilities_of_subset_length / np.sum(probabilities_of_subset_length)\n\n for i in range(num_subset_samples):\n subset_as_tuple = _convert_list_of_indices_to_binary_vector_as_tuple(\n np.random.choice(num_players,\n np.random.choice(num_players + 1, 1, p=probabilities_of_subset_length),\n replace=False), num_players)\n\n if subset_as_tuple not in weights:\n weights[subset_as_tuple] = 0\n all_subsets.append(np.array(subset_as_tuple))\n\n weights[subset_as_tuple] += 1\n\n weights = np.array([weights[tuple(x)] for x in all_subsets])\n\n return np.array(all_subsets, dtype=np.int), weights.astype(np.float)\n\n\ndef _convert_list_of_indices_to_binary_vector_as_tuple(list_of_indices: List[int], num_players: int) -> Tuple[int]:\n subset = np.zeros(num_players, dtype=np.int)\n subset[list_of_indices] = 1\n\n return tuple(subset)\n\n\ndef _evaluate_set_function(set_func: Callable[[np.ndarray], Union[float, np.ndarray]],\n evaluation_subsets: Union[Set[Tuple[int]], List[Tuple[int]]],\n parallel_context: Parallel,\n show_progressbar: bool = True) -> Dict[Tuple[int], Union[float, np.ndarray]]:\n def parallel_job(input_subset: Tuple[int], parallel_random_seed: int) -> Union[float, np.ndarray]:\n set_random_seed(parallel_random_seed)\n\n return set_func(np.array(input_subset))\n\n random_seeds = np.random.randint(np.iinfo(np.int32).max, size=len(evaluation_subsets))\n subset_results = parallel_context(delayed(parallel_job)(subset_to_evaluate, random_seed)\n for subset_to_evaluate, random_seed in\n tqdm(zip(evaluation_subsets, random_seeds),\n desc=\"Evaluate set function\",\n position=0,\n leave=True, disable=not config.show_progress_bars or not show_progressbar))\n\n subset_to_result_map = {}\n for (subset, result) in zip(evaluation_subsets, subset_results):\n subset_to_result_map[subset] = result\n\n return subset_to_result_map\n\n\ndef _estimate_full_and_emtpy_subset_results(set_func: Callable[[np.ndarray], Union[float, np.ndarray]],\n num_players: int) \\\n -> Tuple[Union[float, np.ndarray], Union[float, np.ndarray]]:\n return set_func(np.ones(num_players, dtype=np.int)), \\\n set_func(np.zeros(num_players, dtype=np.int))\n\n\ndef _create_index_order_and_subset_tuples(permutation: List[int]) -> List[Tuple[int]]:\n indices = []\n index_tuples = []\n\n for var in range(len(permutation) - 1):\n indices += [permutation[var]]\n index_tuples.append(_convert_list_of_indices_to_binary_vector_as_tuple(indices, len(permutation)))\n\n return index_tuples\n\n\ndef _estimate_shapley_values_of_permutation(permutation: List[int],\n evaluated_subsets: Dict[Tuple[int], Union[float, np.ndarray]],\n full_subset_result: Union[float, np.ndarray],\n empty_subset_result: Union[float, np.ndarray]) -> np.ndarray:\n current_variable_set = []\n shapley_values = [[]] * len(permutation)\n previous_result = empty_subset_result\n for n in range(len(permutation) - 1):\n current_variable_set += [permutation[n]]\n current_result = evaluated_subsets[\n _convert_list_of_indices_to_binary_vector_as_tuple(current_variable_set, len(permutation))]\n\n shapley_values[permutation[n]] = current_result - previous_result\n previous_result = current_result\n\n shapley_values[permutation[-1]] = full_subset_result - previous_result\n\n return np.array(shapley_values).T\n"
] |
[
[
"numpy.random.choice",
"scipy.special.binom",
"numpy.ones",
"scipy.special.comb",
"numpy.ceil",
"numpy.mean",
"numpy.iinfo",
"numpy.insert",
"sklearn.linear_model.LinearRegression",
"numpy.bincount",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] |
cielavenir/scipy
|
[
"7532d864bbaee2a9a925d18a5bfc2f470643a363"
] |
[
"scipy/sparse/linalg/eigen/arpack/setup.py"
] |
[
"from __future__ import division, print_function, absolute_import\n\nfrom os.path import join\n\n\ndef configuration(parent_package='',top_path=None):\n from scipy._build_utils.system_info import get_info, NotFoundError\n from numpy.distutils.misc_util import Configuration\n from scipy._build_utils import (get_g77_abi_wrappers,\n gfortran_legacy_flag_hook)\n\n lapack_opt = get_info('lapack_opt')\n\n config = Configuration('arpack', parent_package, top_path)\n\n arpack_sources = [join('ARPACK','SRC', '*.f')]\n arpack_sources.extend([join('ARPACK','UTIL', '*.f')])\n\n arpack_sources += get_g77_abi_wrappers(lapack_opt)\n\n config.add_library('arpack_scipy', sources=arpack_sources,\n include_dirs=[join('ARPACK', 'SRC')],\n _pre_build_hook=gfortran_legacy_flag_hook)\n\n ext_sources = ['arpack.pyf.src']\n ext = config.add_extension('_arpack',\n sources=ext_sources,\n libraries=['arpack_scipy'],\n extra_info=lapack_opt,\n depends=arpack_sources,\n )\n ext._pre_build_hook = gfortran_legacy_flag_hook\n\n config.add_data_dir('tests')\n\n # Add license files\n config.add_data_files('ARPACK/COPYING')\n\n return config\n\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n"
] |
[
[
"scipy._build_utils.system_info.get_info",
"numpy.distutils.misc_util.Configuration",
"scipy._build_utils.get_g77_abi_wrappers"
]
] |
RyuTake/PythonStudy
|
[
"7726f0cdaed2e65134027540d53ba77dc0ffe4cc"
] |
[
"5_2_clustering.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 29 13:47:57 2018\n\n@author: rtake\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom sklearn import cluster\nfrom sklearn import datasets\n\n#irisデータを読み込み\niris = datasets.load_iris()\ndata = iris['data']\n\n# 学習→クラスタの生成\nmodel = cluster.KMeans(n_clusters=3) #学習、クラスタを3つにわける\nmodel.fit(data) # 学習結果からクラスタを生成\n\n#学習結果のラベル取得\nlabels = model.labels_\n\n#グラフの描画\n#三つのクラスタのうち、ラベル0のクラスタをプロットする\nldata = data[labels == 0] \nplt.scatter(ldata[:,2], ldata[:,3], c='black', alpha= 0.3, s=100, marker=\"o\")\n\n#三つのクラスタのうち、ラベル1のクラスタをプロットする\nldata = data[labels == 1] \nplt.scatter(ldata[:,2], ldata[:,3], c='black', alpha= 0.3, s=100, marker=\"^\")\n\n#三つのクラスタのうち、ラベル2のクラスタをプロットする\nldata = data[labels == 2] \nplt.scatter(ldata[:,2], ldata[:,3], c='black', alpha= 0.3, s=100, marker=\"*\")\n\nplt.xlabel(iris['feature_names'][2])\nplt.ylabel(iris['feature_names'][3])\n\nplt.show()\n\n#%% クラスタリング結果を6つの図を一括で表示する\nMARKERS = [\"v\", \"^\", \"+\", \"x\", \"d\", \"p\",\"s\",\"1\",\"2\"]\n\n#指定されたインデックスのfeature値で散布図を作成する関数\n#defで関数定義\ndef scatter_by_features(feat_idx1, feat_idx2):\n for lbl in range(labels.max() + 1):\n clustered = data[labels == lbl]\n plt.scatter(clustered[:, feat_idx1], clustered[:, feat_idx2], c = 'black', alpha=0.3, s=100, marker = MARKERS[lbl], label='label{}'.format(lbl))\n \n plt.xlabel(iris['feature_names'][feat_idx1], fontsize='xx-large')\n plt.ylabel(iris['feature_names'][feat_idx2], fontsize='xx-large')\n \nplt.figure(figsize=(16,16))\n\n#feature \"sepal length\" & \"sepal width\"\nplt.subplot(3,2,1)\nscatter_by_features(0,1)\n\n#feature \"sepal length\" & \"petal length\"\nplt.subplot(3,2,2)\nscatter_by_features(0,2)\n\n#feature \"sepal length\" & \"petal width\"\nplt.subplot(3,2,3)\nscatter_by_features(0,3)\n\n#feature \"sepal width\" & \"petal length\"\nplt.subplot(3,2,4)\nscatter_by_features(1,2)\n\n#feature \"sepal width\" & \"petal width\"\nplt.subplot(3,2,5)\nscatter_by_features(1,3)\n\n#feature \"petal length\" & \"petal width\"\nplt.subplot(3,2,6)\nscatter_by_features(2,3)\n\nplt.tight_layout()\nplt.show()\n\n#%% こたえあわせ\nfrom sklearn import metrics\nprint(metrics.confusion_matrix(iris['target'], model.labels_))\niris['target_names']"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.scatter",
"sklearn.cluster.KMeans",
"matplotlib.pyplot.figure",
"sklearn.datasets.load_iris",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
AndraIonescu/reproducing-schema-matching
|
[
"a20f03413064f50723ae6bfdbe9f773f67b7cd71"
] |
[
"algorithms/clustering/quantile_histogram/histogram.py"
] |
[
"from numpy import ndarray\nimport numpy as np\nimport scipy.stats as ss\n\n\nclass QuantileHistogram(object):\n \"\"\"\n A class used to represent an equi depth quantile histogram\n\n e.g. Quantile histogram with 256 buckets\n\n |------||------||------| ... |------|\n 1 2 3 ... 256\n\n\n Attributes\n ----------\n\n\n\n Methods\n -------\n create_histogram(column: list, quantiles: int)\n Creates an equi depth quantile histogram for the specified column with the specified amount of quantiles\n\n create_histogram_from_reference(column: list)\n Creates an equi depth quantile histogram for the specified column based on the reference histogram\n\n get_buckets()\n Returns the histogram's buckets\n\n chunks(lst: ndarray, n: int)\n Splits a list into n equal sized chunks\n\n get_sorted_ranks(column: list)\n A UNIX-like \"sort -n\" function that works for mixed columns that might contain both numbers and strings.\n The strings are sorted first in lexicographic order and then the numbers in numeric and then it returns the\n ranks of the sorted data\n\n copy_buckets_without_values()\n Copies the bucket boundaries but not the values\n\n add_value_to_bucket(value: float)\n Adds a value into a bucket\n\n normalize_buckets(column_size: int)\n Normalizes the frequencies inside the buckets based on the column size\n\n print_histogram()\n Prints the histogram\n\n flatten_histogram()\n Flattens the histogram by creating two lists with all the bucket values with their weights\n \"\"\"\n def __init__(self, name: str, ranks: ndarray, normalization: int, quantiles: int, reference_hist=None):\n \"\"\"\n Parameters\n ----------\n ranks : list\n the column's ranked data\n quantiles : int\n the number of quantiles\n reference_hist : QuantileHistogram, optional\n the reference histogram that provides the bucket boundaries\n \"\"\"\n self.bucket_boundaries = {}\n self.bucket_values = {}\n self.name = name\n self.normalization_factor = normalization\n self.quantiles = quantiles\n self.dist_matrix = self.calc_dist_matrix()\n\n if reference_hist is None:\n self.add_buckets(ranks.min(), ss.mstats.mquantiles(ranks, np.array(list(range(1, quantiles + 1))) / quantiles))\n self.add_values(ranks)\n else:\n self.bucket_boundaries = reference_hist.bucket_boundaries\n self.add_values(ranks)\n\n @property\n def get_values(self):\n return np.array(list(self.bucket_values.values()))\n\n @property\n def is_empty(self):\n return np.sum(self.get_values) == 0\n\n def add_buckets(self, min_val, bb):\n self.bucket_boundaries[0] = (min_val, bb[0])\n i = 0\n while i < len(bb) - 1:\n self.bucket_boundaries[i+1] = (bb[i], bb[i+1])\n i = i + 1\n # self.bucket_boundaries[i] = (self.bucket_boundaries[i-1][1], math.inf)\n\n def add_values(self, values, norm=True):\n for i in range(len(self.bucket_boundaries.values())):\n self.bucket_values[i] = 0.0\n for value in values:\n idx = self.bucket_binary_search(value)\n if idx != -1:\n self.bucket_values[idx] = self.bucket_values[idx] + 1.0\n if norm:\n self.normalize_values()\n\n def normalize_values(self):\n self.bucket_values = {k: v / self.normalization_factor for k, v in self.bucket_values.items()}\n\n def bucket_binary_search(self, x):\n lelf = 0\n right = self.quantiles - 1\n while lelf <= right:\n mid = lelf + (right - lelf) // 2\n if self.bucket_boundaries[mid][0] <= x <= self.bucket_boundaries[mid][1]:\n return mid\n elif self.bucket_boundaries[mid][1] < x:\n lelf = mid + 1\n else:\n right = mid - 1\n return -1\n\n def calc_dist_matrix(self):\n q = np.array(list(range(1, self.quantiles + 1))) / self.quantiles\n dist = []\n for i in q:\n temp = []\n for j in q:\n temp.append(abs(i - j))\n dist.append(temp)\n return np.array(dist)\n"
] |
[
[
"numpy.array",
"numpy.sum"
]
] |
mitchellgordon95/mesh-transformer-jax
|
[
"13b095791ba239ce6ec5e0330eaece758ab21dd0"
] |
[
"train.py"
] |
[
"import argparse\nimport json\nimport time\n\nimport numpy as np\nimport wandb\nfrom tqdm import tqdm\n\nfrom mesh_transformer.build_model import build_model\nfrom lm_eval import evaluator, tasks\nfrom tasks.eval_harness import EvalHarnessAdaptor\nfrom tfrecord_loader import TFRecordNewInputs\nimport multiprocessing\n\n\ndef parse_args():\n # Parse command line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--tpu\", type=str, help=\"Name of TPU to train on.\")\n parser.add_argument(\"--tpu_region\", type=str, help=\"Region of TPU to train on.\")\n parser.add_argument(\"--preemptible\", action=\"store_true\")\n\n parser.add_argument(\"--config\", type=str, default=None, help=\"Config file location\")\n\n parser.add_argument(\"--new\", action=\"store_true\", help=\"If set, deletes previous checkpoint, if it exists, and \"\n \"starts a new training run\")\n\n parser.add_argument(\"--version\", type=int, default=1, help=\"Choose which model version to use\")\n\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n # huggingface tokenizers gets very angry if you fork\n multiprocessing.set_start_method(\"spawn\")\n\n args = parse_args()\n params = json.load(open(args.config))\n\n if args.new:\n print(f\"Starting experiment {params['name']} from scratch! \"\n f\"all data in gs://{params['bucket']}/{params['model_dir']}/ will be deleted\")\n input(\"Hit enter to continue\")\n\n tpu_name = args.tpu\n region = args.tpu_region\n preemptible = args.preemptible\n clean_start = args.new\n\n gradient_accumulation_steps = params.get(\"gradient_accumulation_steps\", 1)\n per_replica_batch = params[\"per_replica_batch\"]\n tpu_size = params[\"tpu_size\"]\n cores_per_replica = params[\"cores_per_replica\"]\n\n bucket = params[\"bucket\"]\n model_dir = params[\"model_dir\"]\n layers = params[\"layers\"]\n d_model = params[\"d_model\"]\n n_heads = params[\"n_heads\"]\n n_vocab = params[\"n_vocab\"]\n seq = params[\"seq\"]\n norm = params[\"norm\"]\n\n val_batches = params[\"val_batches\"]\n val_every = params[\"val_every\"]\n ckpt_every = params[\"ckpt_every\"]\n keep_every = params[\"keep_every\"]\n eval_tasks = params[\"eval_harness_tasks\"]\n total_steps = params[\"total_steps\"]\n\n pe = params[\"pe\"]\n assert pe in [\"fixed\", \"rotary\", \"t5\"]\n\n t = build_model(params, tpu_name, region, preemptible, version=args.version)\n\n try:\n # t.save(0, bucket, model_dir, init=True, overwrite=clean_start)\n step = 0\n train_load_restore = None\n except Exception as e:\n print(f\"Save failed with error {e}, trying to load instead...\", e)\n step, aux = t.load(bucket, model_dir)\n train_load_restore = aux.get(\"train_loader\", None)\n\n if train_load_restore is None:\n print(\"Failed to restore train loader state\")\n\n train_dataset = TFRecordNewInputs(f\"data/{params['train_set']}\",\n batch_size=(\n gradient_accumulation_steps,\n per_replica_batch * tpu_size // cores_per_replica),\n sample_size=params['seq'],\n restore_state=train_load_restore)\n\n global_val_batch = per_replica_batch * tpu_size // cores_per_replica\n\n val_sets = {}\n\n for k, v in params['val_set'].items():\n val_sets[k] = TFRecordNewInputs(f\"data/{v}\",\n batch_size=(global_val_batch,),\n sample_size=seq)\n\n # use dynamic seq length unless pe is fixed\n adaptor = EvalHarnessAdaptor(t, seq, global_val_batch * 4, shrink=pe != \"fixed\")\n\n start = time.time()\n t.train(train_dataset.get_samples())\n print(f\"Train fn compiled in {time.time() - start:.06}s\")\n\n start = time.time()\n for val_set in val_sets.values():\n t.eval(val_set.get_samples())\n print(f\"Eval fn compiled in {time.time() - start:.06}s\")\n\n wandb.init(project='mesh-transformer-jax', entity=\"eleutherai\", name=params[\"name\"], config=params)\n\n eval_task_dict = tasks.get_task_dict(eval_tasks)\n\n while True:\n loss, last_loss = t.train(train_dataset.get_samples())\n wandb.log({'train/loss': loss, 'train/last_loss': last_loss}, step)\n\n if (step % ckpt_every == 0 and step) or step == total_steps:\n t.save(step, bucket, model_dir,\n aux={\"train_loader\": train_dataset.get_state()},\n init=False,\n delete_old=step % keep_every != 0)\n\n if step == total_steps:\n print(\"training completed!\")\n exit()\n\n if step % 100 == 0:\n print(f\"step {step} done\")\n\n if step % val_every == 0:\n for name, val_set in val_sets.items():\n val_loss = []\n for i, _ in tqdm(zip(val_set.sample_once(), range(val_batches)),\n desc=f\"validation for step {step}, set {name}\",\n total=val_batches):\n val_loss.append(t.eval(i))\n val_loss = np.array(val_loss).mean()\n print(f\"validation loss for step {step}, set {name}: {val_loss}\")\n\n wandb.log({f'val/loss_{name}': float(val_loss)}, step)\n\n results = evaluator.evaluate(adaptor, eval_task_dict, False, 0, None)\n\n flat_results = {}\n\n for task_name, task_res in results[\"results\"].items():\n version = results[\"versions\"][task_name]\n for metric_name, metric_res in task_res.items():\n flat_results[f\"{task_name}-v{version}/{metric_name}\"] = float(metric_res)\n\n dumped = json.dumps(results, indent=2)\n print(f\"step {step} val results: {dumped}\")\n wandb.log(flat_results, step)\n step += 1\n"
] |
[
[
"numpy.array"
]
] |
StrayBird-ATSH/gluon-nlp
|
[
"5dc6b9c9fab9e99b155554a50466c514b879ea84",
"5dc6b9c9fab9e99b155554a50466c514b879ea84"
] |
[
"scripts/bert/data/create_pretraining_data.py",
"src/gluonnlp/model/transformer.py"
] |
[
"# Copyright 2018 The Google AI Language Team Authors and DMLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Create masked LM/next sentence masked_lm examples for BERT.\"\"\"\n\n\nimport argparse\nimport logging\nimport io\nimport os\nimport glob\nimport collections\nimport warnings\nimport random\nimport time\nfrom multiprocessing import Pool\nimport numpy as np\nimport gluonnlp as nlp\nfrom gluonnlp.data import BERTTokenizer\n\n\nclass TrainingInstance:\n \"\"\"A single training instance (sentence pair).\"\"\"\n\n def __init__(self, tokens, segment_ids, masked_lm_positions,\n masked_lm_labels, is_random_next, vocab):\n self.tokens = tokens\n self.segment_ids = segment_ids\n self.is_random_next = is_random_next\n self.masked_lm_positions = masked_lm_positions\n self.masked_lm_labels = masked_lm_labels\n self.vocab = vocab\n\n def __str__(self):\n tks = self.vocab.to_tokens(self.tokens)\n mask_tks = self.vocab.to_tokens(self.masked_lm_labels)\n s = ''\n s += 'tokens: %s\\n' % (' '.join(tks))\n s += 'segment_ids: %s\\n' % (' '.join(\n [str(x) for x in self.segment_ids]))\n s += 'is_random_next: %s\\n' % self.is_random_next\n s += 'masked_lm_positions: %s\\n' % (' '.join(\n [str(x) for x in self.masked_lm_positions]))\n s += 'masked_lm_labels: %s\\n' % (' '.join(mask_tks))\n s += '\\n'\n return s\n\n def __repr__(self):\n return self.__str__()\n\ndef transform(instance, max_seq_length):\n \"\"\"Transform instance to inputs for MLM and NSP.\"\"\"\n input_ids = instance.tokens\n assert len(input_ids) <= max_seq_length\n segment_ids = instance.segment_ids\n masked_lm_positions = instance.masked_lm_positions\n valid_lengths = len(input_ids)\n\n masked_lm_ids = instance.masked_lm_labels\n masked_lm_weights = [1.0] * len(masked_lm_ids)\n\n next_sentence_label = 1 if instance.is_random_next else 0\n\n features = {}\n features['input_ids'] = input_ids\n features['segment_ids'] = segment_ids\n features['masked_lm_positions'] = masked_lm_positions\n features['masked_lm_ids'] = masked_lm_ids\n features['masked_lm_weights'] = masked_lm_weights\n features['next_sentence_labels'] = [next_sentence_label]\n features['valid_lengths'] = [valid_lengths]\n return features\n\ndef print_example(instance, features):\n logging.debug('*** Example Instance ***')\n logging.debug('\\n%s', instance)\n\n for feature_name in features.keys():\n feature = features[feature_name]\n logging.debug('Generated %s: %s', feature_name, feature)\n\ndef write_to_files_np(features, tokenizer, max_seq_length,\n max_predictions_per_seq, output_files):\n # pylint: disable=unused-argument\n \"\"\"Write to numpy files from `TrainingInstance`s.\"\"\"\n next_sentence_labels = []\n valid_lengths = []\n\n assert len(output_files) == 1, 'numpy format only support single output file'\n output_file = output_files[0]\n (input_ids, segment_ids, masked_lm_positions, masked_lm_ids,\n masked_lm_weights, next_sentence_labels, valid_lengths) = features\n total_written = len(next_sentence_labels)\n\n # store variable length numpy array object directly.\n outputs = collections.OrderedDict()\n outputs['input_ids'] = np.array(input_ids, dtype=object)\n outputs['segment_ids'] = np.array(segment_ids, dtype=object)\n outputs['masked_lm_positions'] = np.array(masked_lm_positions, dtype=object)\n outputs['masked_lm_ids'] = np.array(masked_lm_ids, dtype=object)\n outputs['masked_lm_weights'] = np.array(masked_lm_weights, dtype=object)\n outputs['next_sentence_labels'] = np.array(next_sentence_labels, dtype='int32')\n outputs['valid_lengths'] = np.array(valid_lengths, dtype='int32')\n\n np.savez_compressed(output_file, **outputs)\n logging.info('Wrote %d total instances', total_written)\n\ndef tokenize_lines_fn(x):\n \"\"\"Worker function to tokenize lines based on the tokenizer, and perform vocabulary lookup.\"\"\"\n lines, tokenizer, vocab = x\n results = []\n for line in lines:\n if not line:\n break\n line = line.strip()\n # Empty lines are used as document delimiters\n if not line:\n results.append([])\n else:\n tokens = vocab[tokenizer(line)]\n if tokens:\n results.append(tokens)\n return results\n\ndef convert_to_npz(instances, max_seq_length):\n \"\"\"Create masked language model and next sentence prediction samples as numpy arrays.\"\"\"\n input_ids = []\n segment_ids = []\n masked_lm_positions = []\n masked_lm_ids = []\n masked_lm_weights = []\n next_sentence_labels = []\n valid_lengths = []\n\n for inst_index, instance in enumerate(instances):\n features = transform(instance, max_seq_length)\n input_id = features['input_ids']\n segment_id = features['segment_ids']\n masked_lm_position = features['masked_lm_positions']\n masked_lm_id = features['masked_lm_ids']\n masked_lm_weight = features['masked_lm_weights']\n next_sentence_label = features['next_sentence_labels'][0]\n valid_length = features['valid_lengths'][0]\n\n input_ids.append(np.ascontiguousarray(input_id, dtype='int32'))\n segment_ids.append(np.ascontiguousarray(segment_id, dtype='int32'))\n masked_lm_positions.append(np.ascontiguousarray(masked_lm_position, dtype='int32'))\n masked_lm_ids.append(np.ascontiguousarray(masked_lm_id, dtype='int32'))\n masked_lm_weights.append(np.ascontiguousarray(masked_lm_weight, dtype='float32'))\n next_sentence_labels.append(next_sentence_label)\n valid_lengths.append(valid_length)\n # debugging information\n if inst_index < 1:\n print_example(instance, features)\n return input_ids, masked_lm_ids, masked_lm_positions, masked_lm_weights,\\\n next_sentence_labels, segment_ids, valid_lengths\n\ndef create_training_instances(x):\n \"\"\"Create `TrainingInstance`s from raw text.\n\n The expected input file format is the following:\n\n (1) One sentence per line. These should ideally be actual sentences, not\n entire paragraphs or arbitrary spans of text. (Because we use the\n sentence boundaries for the \"next sentence prediction\" task).\n (2) Blank lines between documents. Document boundaries are needed so\n that the \"next sentence prediction\" task doesn't span between documents.\n\n The function expect arguments packed in a tuple as described below.\n\n Parameters\n ----------\n input_files : list of str\n List of paths to input text files.\n tokenizer : BERTTokenizer\n The BERT tokenizer\n max_seq_length : int\n The hard limit of maximum sequence length of sentence pairs\n dupe_factor : int\n Duplication factor.\n short_seq_prob : float\n The probability of sampling sequences shorter than the max_seq_length.\n masked_lm_prob : float\n The probability of replacing texts with masks/random words/original words.\n max_predictions_per_seq : int\n The hard limit of the number of predictions for masked words\n whole_word_mask : bool\n Whether to do masking for whole words\n vocab : BERTVocab\n The BERTVocab\n nworker : int\n The number of processes to help processing texts in parallel\n worker_pool : multiprocessing.Pool\n Must be provided if nworker > 1. The caller is responsible for the destruction of\n the worker pool.\n output_file : str or None\n Path to the output file. If None, the result is not serialized. If provided,\n results are stored in the order of (input_ids, segment_ids, masked_lm_positions,\n masked_lm_ids, masked_lm_weights, next_sentence_labels, valid_lengths).\n\n Returns\n -------\n A tuple of np.ndarray : input_ids, masked_lm_ids, masked_lm_positions, masked_lm_weights\n next_sentence_labels, segment_ids, valid_lengths\n \"\"\"\n (input_files, tokenizer, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, whole_word_mask, vocab,\n dupe_factor, nworker, worker_pool, output_file) = x\n\n time_start = time.time()\n if nworker > 1:\n assert worker_pool is not None\n\n all_documents = [[]]\n\n for input_file in input_files:\n logging.debug('*** Tokenizing file %s***', input_file)\n with io.open(input_file, 'r', encoding='utf-8') as reader:\n lines = reader.readlines()\n num_lines = len(lines)\n num_lines_per_worker = (num_lines + nworker - 1) // nworker\n process_args = []\n\n # tokenize in parallel\n for worker_idx in range(nworker):\n start = worker_idx * num_lines_per_worker\n end = min((worker_idx + 1) * num_lines_per_worker, num_lines)\n process_args.append((lines[start:end], tokenizer, vocab))\n if worker_pool:\n tokenized_results = worker_pool.map(tokenize_lines_fn, process_args)\n else:\n tokenized_results = [tokenize_lines_fn(process_args[0])]\n\n for tokenized_result in tokenized_results:\n for line in tokenized_result:\n if not line:\n if all_documents[-1]:\n all_documents.append([])\n else:\n all_documents[-1].append(line)\n\n # remove the empty document if any\n all_documents = [x for x in all_documents if x]\n random.shuffle(all_documents)\n\n # generate training instances\n instances = []\n if worker_pool:\n process_args = []\n for document_index in range(len(all_documents)):\n process_args.append((all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, whole_word_mask,\n vocab, tokenizer))\n for _ in range(dupe_factor):\n instances_results = worker_pool.map(create_instances_from_document, process_args)\n for instances_result in instances_results:\n instances.extend(instances_result)\n random.shuffle(instances)\n npz_instances = worker_pool.apply(convert_to_npz, (instances, max_seq_length))\n else:\n for _ in range(dupe_factor):\n for document_index in range(len(all_documents)):\n instances.extend(\n create_instances_from_document(\n (all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, whole_word_mask,\n vocab, tokenizer)))\n random.shuffle(instances)\n npz_instances = convert_to_npz(instances, max_seq_length)\n\n (input_ids, masked_lm_ids, masked_lm_positions, masked_lm_weights,\n next_sentence_labels, segment_ids, valid_lengths) = npz_instances\n\n # write output to files. Used when pre-generating files\n if output_file:\n features = (input_ids, segment_ids, masked_lm_positions, masked_lm_ids,\n masked_lm_weights, next_sentence_labels, valid_lengths)\n logging.debug('*** Writing to output file %s ***', output_file)\n write_to_files_np(features, tokenizer, max_seq_length,\n max_predictions_per_seq, [output_file])\n features = None\n else:\n features = (input_ids, masked_lm_ids, masked_lm_positions, masked_lm_weights,\n next_sentence_labels, segment_ids, valid_lengths)\n time_end = time.time()\n logging.debug('Process %d files took %.1f s', len(input_files), time_end - time_start)\n return features\n\ndef create_instances_from_document(x):\n \"\"\"Creates `TrainingInstance`s for a single document.\"\"\"\n (all_documents, document_index, max_seq_length, short_seq_prob,\n masked_lm_prob, max_predictions_per_seq, whole_word_mask, vocab, tokenizer) = x\n document = all_documents[document_index]\n _MASK_TOKEN = vocab[vocab.mask_token]\n _CLS_TOKEN = vocab[vocab.cls_token]\n _SEP_TOKEN = vocab[vocab.sep_token]\n\n # Account for [CLS], [SEP], [SEP]\n max_num_tokens = max_seq_length - 3\n\n # According to the original tensorflow implementation:\n # We *usually* want to fill up the entire sequence since we are padding\n # to `max_seq_length` anyways, so short sequences are generally wasted\n # computation. However, we *sometimes*\n # (i.e., short_seq_prob == 0.1, 10% of the time) want to use shorter\n # sequences to minimize the mismatch between pre-training and fine-tuning.\n # The `target_seq_length` is just a rough target however, whereas\n # `max_seq_length` is a hard limit.\n target_seq_length = max_num_tokens\n if random.random() < short_seq_prob:\n target_seq_length = random.randint(2, max_num_tokens)\n\n # We DON'T just concatenate all of the tokens from a document into a long\n # sequence and choose an arbitrary split point because this would make the\n # next sentence prediction task too easy. Instead, we split the input into\n # segments \"A\" and \"B\" based on the actual \"sentences\" provided by the user\n # input.\n instances = []\n current_chunk = []\n current_length = 0\n i = 0\n while i < len(document): # pylint: disable=R1702\n segment = document[i]\n current_chunk.append(segment)\n current_length += len(segment)\n if i == len(document) - 1 or current_length >= target_seq_length:\n if current_chunk:\n # `a_end` is how many segments from `current_chunk` go into the `A`\n # (first) sentence.\n a_end = 1\n if len(current_chunk) >= 2:\n a_end = random.randint(1, len(current_chunk) - 1)\n\n tokens_a = []\n for j in range(a_end):\n tokens_a.extend(current_chunk[j])\n\n tokens_b = []\n # Random next\n is_random_next = False\n if len(current_chunk) == 1 or random.random() < 0.5:\n is_random_next = True\n target_b_length = target_seq_length - len(tokens_a)\n\n # randomly choose a document other than itself\n random_document_index = random.randint(0, len(all_documents) - 2)\n if random_document_index == document_index:\n random_document_index = len(all_documents) - 1\n\n random_document = all_documents[random_document_index]\n random_start = random.randint(0, len(random_document) - 1)\n for j in range(random_start, len(random_document)):\n tokens_b.extend(random_document[j])\n if len(tokens_b) >= target_b_length:\n break\n # We didn't actually use these segments so we 'put them back' so\n # they don't go to waste.\n num_unused_segments = len(current_chunk) - a_end\n i -= num_unused_segments\n # Actual next\n else:\n is_random_next = False\n for j in range(a_end, len(current_chunk)):\n tokens_b.extend(current_chunk[j])\n truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)\n\n assert len(tokens_a) >= 1\n assert len(tokens_b) >= 1\n\n tokens = []\n segment_ids = []\n tokens.append(_CLS_TOKEN)\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(_SEP_TOKEN)\n segment_ids.append(0)\n\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(_SEP_TOKEN)\n segment_ids.append(1)\n\n (tokens, masked_lm_positions,\n masked_lm_labels) = create_masked_lm_predictions(\n tokens, masked_lm_prob, max_predictions_per_seq,\n whole_word_mask, vocab, tokenizer,\n _MASK_TOKEN, _CLS_TOKEN, _SEP_TOKEN)\n instance = TrainingInstance(\n tokens=tokens,\n segment_ids=segment_ids,\n is_random_next=is_random_next,\n masked_lm_positions=masked_lm_positions,\n masked_lm_labels=masked_lm_labels,\n vocab=vocab)\n instances.append(instance)\n current_chunk = []\n current_length = 0\n i += 1\n\n return instances\n\n\nMaskedLmInstance = collections.namedtuple('MaskedLmInstance',\n ['index', 'label'])\n\n\ndef create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq,\n whole_word_mask, vocab, tokenizer,\n _MASK_TOKEN, _CLS_TOKEN, _SEP_TOKEN):\n \"\"\"Creates the predictions for the masked LM objective.\"\"\"\n cand_indexes = []\n for (i, token) in enumerate(tokens):\n if token in [_CLS_TOKEN, _SEP_TOKEN]:\n continue\n # Whole Word Masking means that if we mask all of the subwords\n # corresponding to an original word. When a word has been split into\n # subwords, the first token does not have any marker and any subsequence\n # tokens are prefixed with ##. So whenever we see the ## token, we\n # append it to the previous set of word indexes.\n #\n # Note that Whole Word Masking does *not* change the training code\n # at all -- we still predict each subword independently, softmaxed\n # over the entire vocabulary.\n if whole_word_mask and len(cand_indexes) >= 1 and \\\n not tokenizer.is_first_subword(vocab.idx_to_token[token]):\n cand_indexes[-1].append(i)\n else:\n cand_indexes.append([i])\n\n random.shuffle(cand_indexes)\n\n output_tokens = list(tokens)\n\n num_to_predict = min(max_predictions_per_seq,\n max(1, int(round(len(tokens) * masked_lm_prob))))\n\n masked_lms = []\n covered_indexes = set()\n for index_set in cand_indexes:\n if len(masked_lms) >= num_to_predict:\n break\n # If adding a whole-word mask would exceed the maximum number of\n # predictions, then just skip this candidate.\n if len(masked_lms) + len(index_set) > num_to_predict:\n continue\n is_any_index_covered = False\n for index in index_set:\n if index in covered_indexes:\n is_any_index_covered = True\n break\n if is_any_index_covered:\n continue\n for index in index_set:\n covered_indexes.add(index)\n masked_token = None\n # 80% of the time, replace with [MASK]\n if random.random() < 0.8:\n masked_token = _MASK_TOKEN\n else:\n # 10% of the time, keep original\n if random.random() < 0.5:\n masked_token = tokens[index]\n # 10% of the time, replace with random word\n else:\n # generate a random word in [0, vocab_size - 1]\n masked_token = random.randint(0, len(vocab) - 1)\n\n output_tokens[index] = masked_token\n\n masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))\n assert len(masked_lms) <= num_to_predict\n masked_lms = sorted(masked_lms, key=lambda x: x.index)\n\n masked_lm_positions = []\n masked_lm_labels = []\n for p in masked_lms:\n masked_lm_positions.append(p.index)\n masked_lm_labels.append(p.label)\n\n return (output_tokens, masked_lm_positions, masked_lm_labels)\n\n\ndef truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):\n \"\"\"Truncates a pair of sequences to a maximum sequence length.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_num_tokens:\n break\n\n trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b\n assert len(trunc_tokens) >= 1\n\n # We want to sometimes truncate from the front and sometimes from the\n # back to add more randomness and avoid biases.\n if random.random() < 0.5:\n del trunc_tokens[0]\n else:\n trunc_tokens.pop()\n\n\ndef main():\n \"\"\"Main function.\"\"\"\n time_start = time.time()\n\n # random seed\n random.seed(args.random_seed)\n\n # create output dir\n output_dir = os.path.expanduser(args.output_dir)\n nlp.utils.mkdir(output_dir)\n\n # vocabulary and tokenizer\n if args.sentencepiece:\n logging.info('loading vocab file from sentence piece model: %s', args.sentencepiece)\n if args.dataset_name:\n warnings.warn('Both --dataset_name and --sentencepiece are provided. '\n 'The vocabulary will be loaded based on --sentencepiece.')\n vocab = nlp.vocab.BERTVocab.from_sentencepiece(args.sentencepiece)\n tokenizer = nlp.data.BERTSPTokenizer(args.sentencepiece, vocab, num_best=args.sp_nbest,\n alpha=args.sp_alpha, lower=not args.cased)\n else:\n logging.info('loading vocab file from pre-defined dataset: %s', args.dataset_name)\n vocab = nlp.data.utils._load_pretrained_vocab(args.dataset_name, root=output_dir,\n cls=nlp.vocab.BERTVocab)\n tokenizer = BERTTokenizer(vocab=vocab, lower='uncased' in args.dataset_name)\n\n # count the number of input files\n input_files = []\n for input_pattern in args.input_file.split(','):\n input_files.extend(glob.glob(os.path.expanduser(input_pattern)))\n for input_file in input_files:\n logging.info('\\t%s', input_file)\n num_inputs = len(input_files)\n num_outputs = min(args.num_outputs, len(input_files))\n logging.info('*** Reading from %d input files ***', num_inputs)\n\n # calculate the number of splits\n file_splits = []\n split_size = (num_inputs + num_outputs - 1) // num_outputs\n for i in range(num_outputs):\n split_start = i * split_size\n split_end = min(num_inputs, (i + 1) * split_size)\n file_splits.append(input_files[split_start:split_end])\n\n # prepare workload\n count = 0\n process_args = []\n\n for i, file_split in enumerate(file_splits):\n output_file = os.path.join(output_dir, 'part-{}.npz'.format(str(i).zfill(3)))\n count += len(file_split)\n process_args.append((file_split, tokenizer, args.max_seq_length, args.short_seq_prob,\n args.masked_lm_prob, args.max_predictions_per_seq,\n args.whole_word_mask,\n vocab, args.dupe_factor, 1, None, output_file))\n\n # sanity check\n assert count == len(input_files)\n\n # dispatch to workers\n nworker = args.num_workers\n if nworker > 1:\n pool = Pool(nworker)\n pool.map(create_training_instances, process_args)\n else:\n for process_arg in process_args:\n create_training_instances(process_arg)\n\n time_end = time.time()\n logging.info('Time cost=%.1f', time_end - time_start)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Pre-training data generator for BERT',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n '--input_file',\n type=str,\n required=True,\n help='Input files, separated by comma. For example, \"~/data/*.txt\"')\n\n parser.add_argument(\n '--output_dir',\n type=str,\n required=True,\n help='Output directory.')\n\n parser.add_argument(\n '--dataset_name',\n type=str,\n default=None,\n choices=['book_corpus_wiki_en_uncased', 'book_corpus_wiki_en_cased',\n 'wiki_multilingual_uncased', 'wiki_multilingual_cased', 'wiki_cn_cased'],\n help='The dataset name for the vocab file BERT model was trained on. For example, '\n '\"book_corpus_wiki_en_uncased\"')\n\n parser.add_argument(\n '--sentencepiece',\n type=str,\n default=None,\n help='Path to the sentencepiece .model file for both tokenization and vocab.')\n\n parser.add_argument(\n '--cased',\n action='store_true',\n help='Effective only if --sentencepiece is set')\n\n parser.add_argument('--sp_nbest', type=int, default=0,\n help='Number of best candidates for sampling subwords with sentencepiece. ')\n\n parser.add_argument('--sp_alpha', type=float, default=1.0,\n help='Inverse temperature for probability rescaling for sentencepiece '\n 'unigram sampling')\n\n parser.add_argument(\n '--whole_word_mask',\n action='store_true',\n help='Whether to use whole word masking rather than per-subword masking.')\n\n parser.add_argument(\n '--max_seq_length', type=int, default=512, help='Maximum sequence length.')\n\n parser.add_argument(\n '--max_predictions_per_seq',\n type=int,\n default=80,\n help='Maximum number of masked LM predictions per sequence. ')\n\n parser.add_argument(\n '--random_seed',\n type=int,\n default=12345,\n help='Random seed for data generation.')\n\n parser.add_argument(\n '--dupe_factor',\n type=int,\n default=5,\n help='Number of times to duplicate the input data (with different masks).')\n\n parser.add_argument(\n '--masked_lm_prob',\n type=float,\n default=0.15,\n help='Masked LM probability.')\n\n parser.add_argument(\n '--short_seq_prob',\n type=float,\n default=0.1,\n help='Probability of creating sequences which are shorter than the '\n 'maximum length. ')\n\n parser.add_argument(\n '--verbose',\n action='store_true',\n help='Print debug information')\n\n parser.add_argument(\n '--num_workers',\n type=int,\n default=8,\n help='Number of workers for parallel processing, where each generates an output file.')\n\n parser.add_argument(\n '--num_outputs',\n type=int,\n default=1,\n help='Number of desired output files, where each is processed independently by a worker.')\n\n args = parser.parse_args()\n logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.INFO)\n logging.info(args)\n main()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=too-many-lines\n\"\"\"Encoder and decoder usded in sequence-to-sequence learning.\"\"\"\n\n__all__ = ['TransformerEncoder', 'PositionwiseFFN', 'TransformerEncoderCell',\n 'transformer_en_de_512']\n\nimport math\nimport os\n\nimport numpy as np\nimport mxnet as mx\nfrom mxnet import cpu, gluon\nfrom mxnet.gluon import nn\nfrom mxnet.gluon.block import HybridBlock\nfrom mxnet.gluon.model_zoo import model_store\n\nfrom ..base import get_home_dir\nfrom ..utils.parallel import Parallelizable\nfrom .block import GELU\nfrom .seq2seq_encoder_decoder import (Seq2SeqDecoder, Seq2SeqEncoder,\n Seq2SeqOneStepDecoder)\nfrom .translation import NMTModel\nfrom .utils import _load_pretrained_params, _load_vocab\nfrom .attention_cell import _get_attention_cell\n\ndef _position_encoding_init(max_length, dim):\n \"\"\"Init the sinusoid position encoding table \"\"\"\n position_enc = np.arange(max_length).reshape((-1, 1)) \\\n / (np.power(10000, (2. / dim) * np.arange(dim).reshape((1, -1))))\n # Apply the cosine to even columns and sin to odds.\n position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) # dim 2i\n position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) # dim 2i+1\n return position_enc\n\n\n###############################################################################\n# ENCODER #\n###############################################################################\n\nclass PositionwiseFFN(HybridBlock):\n \"\"\"Positionwise Feed-Forward Neural Network.\n\n Parameters\n ----------\n units : int\n Number of units for the output\n hidden_size : int\n Number of units in the hidden layer of position-wise feed-forward networks\n dropout : float\n Dropout probability for the output\n use_residual : bool\n Add residual connection between the input and the output\n ffn1_dropout : bool, default False\n If True, apply dropout both after the first and second Positionwise\n Feed-Forward Neural Network layers. If False, only apply dropout after\n the second.\n activation : str, default 'relu'\n Activation function\n layer_norm_eps : float, default 1e-5\n Epsilon parameter passed to for mxnet.gluon.nn.LayerNorm\n weight_initializer : str or Initializer\n Initializer for the input weights matrix, used for the linear\n transformation of the inputs.\n bias_initializer : str or Initializer\n Initializer for the bias vector.\n prefix : str, default None\n Prefix for name of `Block`s\n (and name of weight if params is `None`).\n params : Parameter or None\n Container for weight sharing between cells.\n Created if `None`.\n \"\"\"\n\n def __init__(self, *, units=512, hidden_size=2048, dropout=0.0, use_residual=True,\n ffn1_dropout=False, activation='relu', layer_norm_eps=1e-5,\n weight_initializer=None, bias_initializer='zeros', prefix=None, params=None):\n super().__init__(prefix=prefix, params=params)\n self._use_residual = use_residual\n self._dropout = dropout\n self._ffn1_dropout = ffn1_dropout\n with self.name_scope():\n self.ffn_1 = nn.Dense(units=hidden_size, flatten=False,\n weight_initializer=weight_initializer,\n bias_initializer=bias_initializer,\n prefix='ffn_1_')\n self.activation = self._get_activation(activation) if activation else None\n self.ffn_2 = nn.Dense(units=units, flatten=False,\n weight_initializer=weight_initializer,\n bias_initializer=bias_initializer,\n prefix='ffn_2_')\n if dropout:\n self.dropout_layer = nn.Dropout(rate=dropout)\n self.layer_norm = nn.LayerNorm(in_channels=units, epsilon=layer_norm_eps)\n\n def _get_activation(self, act):\n \"\"\"Get activation block based on the name. \"\"\"\n if isinstance(act, str):\n if act.lower() == 'gelu':\n return GELU()\n elif act.lower() == 'approx_gelu':\n return GELU(approximate=True)\n else:\n return gluon.nn.Activation(act)\n assert isinstance(act, gluon.Block)\n return act\n\n def hybrid_forward(self, F, inputs): # pylint: disable=arguments-differ\n \"\"\"Position-wise encoding of the inputs.\n\n Parameters\n ----------\n inputs : Symbol or NDArray\n Input sequence. Shape (batch_size, length, C_in)\n\n Returns\n -------\n outputs : Symbol or NDArray\n Shape (batch_size, length, C_out)\n \"\"\"\n outputs = self.ffn_1(inputs)\n if self.activation:\n outputs = self.activation(outputs)\n if self._dropout and self._ffn1_dropout:\n outputs = self.dropout_layer(outputs)\n outputs = self.ffn_2(outputs)\n if self._dropout:\n outputs = self.dropout_layer(outputs)\n if self._use_residual:\n outputs = outputs + inputs\n outputs = self.layer_norm(outputs)\n return outputs\n\n\nclass TransformerEncoderCell(HybridBlock):\n \"\"\"Structure of the Transformer Encoder Cell.\n\n Parameters\n ----------\n attention_cell : AttentionCell or str, default 'multi_head'\n Arguments of the attention cell.\n Can be 'multi_head', 'scaled_luong', 'scaled_dot', 'dot', 'cosine', 'normed_mlp', 'mlp'\n units : int\n Number of units for the output\n hidden_size : int\n number of units in the hidden layer of position-wise feed-forward networks\n num_heads : int\n Number of heads in multi-head attention\n scaled : bool\n Whether to scale the softmax input by the sqrt of the input dimension\n in multi-head attention\n dropout : float\n use_residual : bool\n output_attention: bool\n Whether to output the attention weights\n attention_use_bias : bool, default False\n Whether to use bias when projecting the query/key/values in the attention cell.\n attention_proj_use_bias : bool, default False\n Whether to use bias when projecting the output of the attention cell.\n weight_initializer : str or Initializer\n Initializer for the input weights matrix, used for the linear\n transformation of the inputs.\n bias_initializer : str or Initializer\n Initializer for the bias vector.\n prefix : str, default None\n Prefix for name of `Block`s. (and name of weight if params is `None`).\n params : Parameter or None\n Container for weight sharing between cells. Created if `None`.\n activation : str, default None\n Activation methods in PositionwiseFFN\n layer_norm_eps : float, default 1e-5\n Epsilon for layer_norm\n\n Inputs:\n - **inputs** : input sequence. Shape (batch_size, length, C_in)\n - **mask** : mask for inputs. Shape (batch_size, length, length)\n\n Outputs:\n - **outputs**: output tensor of the transformer encoder cell.\n Shape (batch_size, length, C_out)\n - **additional_outputs**: the additional output of all the transformer encoder cell.\n \"\"\"\n\n def __init__(self, *, attention_cell='multi_head', units=128, hidden_size=512, num_heads=4,\n scaled=True, dropout=0.0, use_residual=True, output_attention=False,\n attention_proj_use_bias=False, attention_use_bias=False, weight_initializer=None,\n bias_initializer='zeros', prefix=None, params=None, activation='relu',\n layer_norm_eps=1e-5):\n super().__init__(prefix=prefix, params=params)\n self._dropout = dropout\n self._use_residual = use_residual\n self._output_attention = output_attention\n with self.name_scope():\n if dropout:\n self.dropout_layer = nn.Dropout(rate=dropout)\n self.attention_cell = _get_attention_cell(attention_cell, units=units,\n num_heads=num_heads, scaled=scaled,\n dropout=dropout, use_bias=attention_use_bias)\n self.proj = nn.Dense(units=units, flatten=False, use_bias=attention_proj_use_bias,\n weight_initializer=weight_initializer,\n bias_initializer=bias_initializer, prefix='proj_')\n self.ffn = PositionwiseFFN(units=units, hidden_size=hidden_size, dropout=dropout,\n use_residual=use_residual,\n weight_initializer=weight_initializer,\n bias_initializer=bias_initializer, activation=activation,\n layer_norm_eps=layer_norm_eps)\n self.layer_norm = nn.LayerNorm(in_channels=units, epsilon=layer_norm_eps)\n\n\n def hybrid_forward(self, F, inputs, mask=None): # pylint: disable=arguments-differ\n \"\"\"Transformer Encoder Attention Cell.\n\n Parameters\n ----------\n inputs : Symbol or NDArray\n Input sequence. Shape (batch_size, length, C_in)\n mask : Symbol or NDArray or None\n Mask for inputs. Shape (batch_size, length, length)\n\n Returns\n -------\n encoder_cell_outputs: list\n Outputs of the encoder cell. Contains:\n\n - outputs of the transformer encoder cell. Shape (batch_size, length, C_out)\n - additional_outputs of all the transformer encoder cell\n \"\"\"\n outputs, attention_weights = self.attention_cell(inputs, inputs, inputs, mask)\n outputs = self.proj(outputs)\n if self._dropout:\n outputs = self.dropout_layer(outputs)\n if self._use_residual:\n outputs = outputs + inputs\n outputs = self.layer_norm(outputs)\n outputs = self.ffn(outputs)\n additional_outputs = []\n if self._output_attention:\n additional_outputs.append(attention_weights)\n return outputs, additional_outputs\n\nclass TransformerEncoder(HybridBlock, Seq2SeqEncoder):\n \"\"\"Structure of the Transformer Encoder.\n\n Parameters\n ----------\n attention_cell : AttentionCell or str, default 'multi_head'\n Arguments of the attention cell.\n Can be 'multi_head', 'scaled_luong', 'scaled_dot', 'dot', 'cosine', 'normed_mlp', 'mlp'\n num_layers : int\n Number of attention layers.\n units : int\n Number of units for the output.\n hidden_size : int\n number of units in the hidden layer of position-wise feed-forward networks\n max_length : int\n Maximum length of the input sequence\n num_heads : int\n Number of heads in multi-head attention\n scaled : bool\n Whether to scale the softmax input by the sqrt of the input dimension\n in multi-head attention\n scale_embed : bool, default True\n Whether to scale the input embeddings by the sqrt of the `units`.\n norm_inputs : bool, default True\n Whether to normalize the input embeddings with LayerNorm. If dropout is\n enabled, normalization happens after dropout is applied to inputs.\n dropout : float\n Dropout probability of the attention probabilities.\n use_residual : bool\n Whether to use residual connection.\n output_attention: bool, default False\n Whether to output the attention weights\n output_all_encodings: bool, default False\n Whether to output encodings of all encoder's cells, or only the last one\n weight_initializer : str or Initializer\n Initializer for the input weights matrix, used for the linear\n transformation of the inputs.\n bias_initializer : str or Initializer\n Initializer for the bias vector.\n prefix : str, default None.\n Prefix for name of `Block`s. (and name of weight if params is `None`).\n params : Parameter or None\n Container for weight sharing between cells. Created if `None`.\n\n Inputs:\n - **inputs** : input sequence of shape (batch_size, length, C_in)\n - **states** : list of tensors for initial states and masks.\n - **valid_length** : valid lengths of each sequence. Usually used when part of sequence\n has been padded. Shape is (batch_size, )\n\n Outputs:\n - **outputs** : the output of the encoder. Shape is (batch_size, length, C_out)\n - **additional_outputs** : list of tensors.\n Either be an empty list or contains the attention weights in this step.\n The attention weights will have shape (batch_size, length, mem_length) or\n (batch_size, num_heads, length, mem_length)\n \"\"\"\n\n def __init__(self, *, attention_cell='multi_head', num_layers=2, units=512, hidden_size=2048,\n max_length=50, num_heads=4, scaled=True, scale_embed=True, norm_inputs=True,\n dropout=0.0, use_residual=True, output_attention=False, output_all_encodings=False,\n weight_initializer=None, bias_initializer='zeros', prefix=None, params=None):\n super().__init__(prefix=prefix, params=params)\n assert units % num_heads == 0,\\\n 'In TransformerEncoder, The units should be divided exactly ' \\\n 'by the number of heads. Received units={}, num_heads={}' \\\n .format(units, num_heads)\n self._max_length = max_length\n self._units = units\n self._output_attention = output_attention\n self._output_all_encodings = output_all_encodings\n self._dropout = dropout\n self._scale_embed = scale_embed\n self._norm_inputs = norm_inputs\n\n with self.name_scope():\n if dropout:\n self.dropout_layer = nn.Dropout(rate=dropout)\n if self._norm_inputs:\n self.layer_norm = nn.LayerNorm(in_channels=units, epsilon=1e-5)\n self.position_weight = self.params.get_constant(\n 'const', _position_encoding_init(max_length, units))\n self.transformer_cells = nn.HybridSequential()\n for i in range(num_layers):\n cell = TransformerEncoderCell(\n units=units, hidden_size=hidden_size, num_heads=num_heads,\n attention_cell=attention_cell, weight_initializer=weight_initializer,\n bias_initializer=bias_initializer, dropout=dropout, use_residual=use_residual,\n scaled=scaled, output_attention=output_attention, prefix='transformer%d_' % i)\n self.transformer_cells.add(cell)\n\n def __call__(self, inputs, states=None, valid_length=None): #pylint: disable=arguments-differ\n \"\"\"Encode the inputs given the states and valid sequence length.\n\n Parameters\n ----------\n inputs : NDArray or Symbol\n Input sequence. Shape (batch_size, length, C_in)\n states : list of NDArrays or Symbols\n Initial states. The list of initial states and masks\n valid_length : NDArray or Symbol\n Valid lengths of each sequence. This is usually used when part of sequence has\n been padded. Shape (batch_size,)\n Returns\n -------\n encoder_outputs: list\n Outputs of the encoder. Contains:\n\n - outputs of the transformer encoder. Shape (batch_size, length, C_out)\n - additional_outputs of all the transformer encoder\n \"\"\"\n return super().__call__(inputs, states, valid_length)\n\n def hybrid_forward(self, F, inputs, states=None, valid_length=None, position_weight=None):\n # pylint: disable=arguments-differ\n \"\"\"Encode the inputs given the states and valid sequence length.\n\n Parameters\n ----------\n inputs : NDArray or Symbol\n Input sequence. Shape (batch_size, length, C_in)\n states : list of NDArrays or Symbols\n Initial states. The list of initial states and masks\n valid_length : NDArray or Symbol\n Valid lengths of each sequence. This is usually used when part of sequence has\n been padded. Shape (batch_size,)\n position_weight : NDArray or Symbol\n The weight of positional encoding. Shape (max_len, C_in).\n\n Returns\n -------\n outputs : NDArray or Symbol, or List[NDArray] or List[Symbol]\n If output_all_encodings flag is False, then the output of the last encoder.\n If output_all_encodings flag is True, then the list of all outputs of all encoders.\n In both cases, shape of the tensor(s) is/are (batch_size, length, C_out)\n additional_outputs : list\n Either be an empty list or contains the attention weights in this step.\n The attention weights will have shape (batch_size, length, length) or\n (batch_size, num_heads, length, length)\n\n \"\"\"\n steps = F.contrib.arange_like(inputs, axis=1)\n if valid_length is not None:\n ones = F.ones_like(steps)\n mask = F.broadcast_lesser(F.reshape(steps, shape=(1, -1)),\n F.reshape(valid_length, shape=(-1, 1)))\n mask = F.broadcast_mul(F.expand_dims(mask, axis=1),\n F.broadcast_mul(ones, F.reshape(ones, shape=(-1, 1))))\n if states is None:\n states = [mask]\n else:\n states.append(mask)\n else:\n mask = None\n\n if states is None:\n states = [steps]\n else:\n states.append(steps)\n\n if self._scale_embed:\n inputs = inputs * math.sqrt(self._units)\n # Positional encoding\n positional_embed = F.Embedding(steps, position_weight, self._max_length, self._units)\n inputs = F.broadcast_add(inputs, F.expand_dims(positional_embed, axis=0))\n\n if self._dropout:\n inputs = self.dropout_layer(inputs)\n\n if self._norm_inputs:\n inputs = self.layer_norm(inputs)\n\n all_encodings_outputs = []\n additional_outputs = []\n for cell in self.transformer_cells:\n outputs, attention_weights = cell(inputs, mask)\n inputs = outputs\n if self._output_all_encodings:\n if valid_length is not None:\n outputs = F.SequenceMask(outputs, sequence_length=valid_length,\n use_sequence_length=True, axis=1)\n all_encodings_outputs.append(outputs)\n\n if self._output_attention:\n additional_outputs.append(attention_weights)\n\n if valid_length is not None and not self._output_all_encodings:\n # if self._output_all_encodings, SequenceMask is already applied above\n outputs = F.SequenceMask(outputs, sequence_length=valid_length,\n use_sequence_length=True, axis=1)\n\n if self._output_all_encodings:\n return all_encodings_outputs, additional_outputs\n return outputs, additional_outputs\n\n###############################################################################\n# DECODER #\n###############################################################################\n\nclass TransformerDecoderCell(HybridBlock):\n \"\"\"Structure of the Transformer Decoder Cell.\n\n Parameters\n ----------\n attention_cell : AttentionCell or str, default 'multi_head'\n Arguments of the attention cell.\n Can be 'multi_head', 'scaled_luong', 'scaled_dot', 'dot', 'cosine', 'normed_mlp', 'mlp'\n units : int\n Number of units for the output\n hidden_size : int\n number of units in the hidden layer of position-wise feed-forward networks\n num_heads : int\n Number of heads in multi-head attention\n scaled : bool\n Whether to scale the softmax input by the sqrt of the input dimension\n in multi-head attention\n dropout : float\n Dropout probability.\n use_residual : bool\n Whether to use residual connection.\n output_attention: bool\n Whether to output the attention weights\n weight_initializer : str or Initializer\n Initializer for the input weights matrix, used for the linear\n transformation of the inputs.\n bias_initializer : str or Initializer\n Initializer for the bias vector.\n prefix : str, default None\n Prefix for name of `Block`s\n (and name of weight if params is `None`).\n params : Parameter or None\n Container for weight sharing between cells.\n Created if `None`.\n \"\"\"\n def __init__(self, attention_cell='multi_head', units=128,\n hidden_size=512, num_heads=4, scaled=True,\n dropout=0.0, use_residual=True, output_attention=False,\n weight_initializer=None, bias_initializer='zeros',\n prefix=None, params=None):\n super(TransformerDecoderCell, self).__init__(prefix=prefix, params=params)\n self._units = units\n self._num_heads = num_heads\n self._dropout = dropout\n self._use_residual = use_residual\n self._output_attention = output_attention\n self._scaled = scaled\n with self.name_scope():\n if dropout:\n self.dropout_layer = nn.Dropout(rate=dropout)\n self.attention_cell_in = _get_attention_cell(attention_cell,\n units=units,\n num_heads=num_heads,\n scaled=scaled,\n dropout=dropout)\n self.attention_cell_inter = _get_attention_cell(attention_cell,\n units=units,\n num_heads=num_heads,\n scaled=scaled,\n dropout=dropout)\n self.proj_in = nn.Dense(units=units, flatten=False,\n use_bias=False,\n weight_initializer=weight_initializer,\n bias_initializer=bias_initializer,\n prefix='proj_in_')\n self.proj_inter = nn.Dense(units=units, flatten=False,\n use_bias=False,\n weight_initializer=weight_initializer,\n bias_initializer=bias_initializer,\n prefix='proj_inter_')\n self.ffn = PositionwiseFFN(hidden_size=hidden_size,\n units=units,\n use_residual=use_residual,\n dropout=dropout,\n weight_initializer=weight_initializer,\n bias_initializer=bias_initializer)\n\n self.layer_norm_in = nn.LayerNorm()\n self.layer_norm_inter = nn.LayerNorm()\n\n def hybrid_forward(self, F, inputs, mem_value, mask=None, mem_mask=None): #pylint: disable=unused-argument\n # pylint: disable=arguments-differ\n \"\"\"Transformer Decoder Attention Cell.\n\n Parameters\n ----------\n inputs : Symbol or NDArray\n Input sequence. Shape (batch_size, length, C_in)\n mem_value : Symbol or NDArrays\n Memory value, i.e. output of the encoder. Shape (batch_size, mem_length, C_in)\n mask : Symbol or NDArray or None\n Mask for inputs. Shape (batch_size, length, length)\n mem_mask : Symbol or NDArray or None\n Mask for mem_value. Shape (batch_size, length, mem_length)\n\n Returns\n -------\n decoder_cell_outputs: list\n Outputs of the decoder cell. Contains:\n\n - outputs of the transformer decoder cell. Shape (batch_size, length, C_out)\n - additional_outputs of all the transformer decoder cell\n \"\"\"\n outputs, attention_in_outputs =\\\n self.attention_cell_in(inputs, inputs, inputs, mask)\n outputs = self.proj_in(outputs)\n if self._dropout:\n outputs = self.dropout_layer(outputs)\n if self._use_residual:\n outputs = outputs + inputs\n outputs = self.layer_norm_in(outputs)\n inputs = outputs\n outputs, attention_inter_outputs = \\\n self.attention_cell_inter(inputs, mem_value, mem_value, mem_mask)\n outputs = self.proj_inter(outputs)\n if self._dropout:\n outputs = self.dropout_layer(outputs)\n if self._use_residual:\n outputs = outputs + inputs\n outputs = self.layer_norm_inter(outputs)\n outputs = self.ffn(outputs)\n additional_outputs = []\n if self._output_attention:\n additional_outputs.append(attention_in_outputs)\n additional_outputs.append(attention_inter_outputs)\n return outputs, additional_outputs\n\n\nclass _BaseTransformerDecoder(HybridBlock):\n def __init__(self, attention_cell='multi_head', num_layers=2, units=128, hidden_size=2048,\n max_length=50, num_heads=4, scaled=True, scale_embed=True, norm_inputs=True,\n dropout=0.0, use_residual=True, output_attention=False, weight_initializer=None,\n bias_initializer='zeros', prefix=None, params=None):\n super().__init__(prefix=prefix, params=params)\n assert units % num_heads == 0, 'In TransformerDecoder, the units should be divided ' \\\n 'exactly by the number of heads. Received units={}, ' \\\n 'num_heads={}'.format(units, num_heads)\n self._num_layers = num_layers\n self._units = units\n self._hidden_size = hidden_size\n self._num_states = num_heads\n self._max_length = max_length\n self._dropout = dropout\n self._use_residual = use_residual\n self._output_attention = output_attention\n self._scaled = scaled\n self._scale_embed = scale_embed\n self._norm_inputs = norm_inputs\n with self.name_scope():\n if dropout:\n self.dropout_layer = nn.Dropout(rate=dropout)\n if self._norm_inputs:\n self.layer_norm = nn.LayerNorm()\n encoding = _position_encoding_init(max_length, units)\n self.position_weight = self.params.get_constant('const', encoding.astype(np.float32))\n self.transformer_cells = nn.HybridSequential()\n for i in range(num_layers):\n self.transformer_cells.add(\n TransformerDecoderCell(units=units, hidden_size=hidden_size,\n num_heads=num_heads, attention_cell=attention_cell,\n weight_initializer=weight_initializer,\n bias_initializer=bias_initializer, dropout=dropout,\n scaled=scaled, use_residual=use_residual,\n output_attention=output_attention,\n prefix='transformer%d_' % i))\n\n def init_state_from_encoder(self, encoder_outputs, encoder_valid_length=None):\n \"\"\"Initialize the state from the encoder outputs.\n\n Parameters\n ----------\n encoder_outputs : list\n encoder_valid_length : NDArray or None\n\n Returns\n -------\n decoder_states : list\n The decoder states, includes:\n\n - mem_value : NDArray\n - mem_masks : NDArray or None\n \"\"\"\n mem_value = encoder_outputs\n decoder_states = [mem_value]\n mem_length = mem_value.shape[1]\n if encoder_valid_length is not None:\n dtype = encoder_valid_length.dtype\n ctx = encoder_valid_length.context\n mem_masks = mx.nd.broadcast_lesser(\n mx.nd.arange(mem_length, ctx=ctx, dtype=dtype).reshape((1, -1)),\n encoder_valid_length.reshape((-1, 1)))\n decoder_states.append(mem_masks)\n else:\n decoder_states.append(None)\n return decoder_states\n\n def hybrid_forward(self, F, inputs, states, valid_length=None, position_weight=None):\n #pylint: disable=arguments-differ\n \"\"\"Decode the decoder inputs. This function is only used for training.\n\n Parameters\n ----------\n inputs : NDArray, Shape (batch_size, length, C_in)\n states : list of NDArrays or None\n Initial states. The list of decoder states\n valid_length : NDArray or None\n Valid lengths of each sequence. This is usually used when part of sequence has\n been padded. Shape (batch_size,)\n\n Returns\n -------\n output : NDArray, Shape (batch_size, length, C_out)\n states : list\n The decoder states:\n - mem_value : NDArray\n - mem_masks : NDArray or None\n additional_outputs : list of list\n Either be an empty list or contains the attention weights in this step.\n The attention weights will have shape (batch_size, length, mem_length) or\n (batch_size, num_heads, length, mem_length)\n \"\"\"\n\n length_array = F.contrib.arange_like(inputs, axis=1)\n mask = F.broadcast_lesser_equal(length_array.reshape((1, -1)),\n length_array.reshape((-1, 1)))\n if valid_length is not None:\n batch_mask = F.broadcast_lesser(length_array.reshape((1, -1)),\n valid_length.reshape((-1, 1)))\n batch_mask = F.expand_dims(batch_mask, -1)\n mask = F.broadcast_mul(batch_mask, F.expand_dims(mask, 0))\n else:\n mask = F.expand_dims(mask, axis=0)\n mask = F.broadcast_like(mask, inputs, lhs_axes=(0, ), rhs_axes=(0, ))\n\n mem_value, mem_mask = states\n if mem_mask is not None:\n mem_mask = F.expand_dims(mem_mask, axis=1)\n mem_mask = F.broadcast_like(mem_mask, inputs, lhs_axes=(1, ), rhs_axes=(1, ))\n\n if self._scale_embed:\n inputs = inputs * math.sqrt(self._units)\n\n # Positional Encoding\n steps = F.contrib.arange_like(inputs, axis=1)\n positional_embed = F.Embedding(steps, position_weight, self._max_length, self._units)\n inputs = F.broadcast_add(inputs, F.expand_dims(positional_embed, axis=0))\n\n if self._dropout:\n inputs = self.dropout_layer(inputs)\n\n if self._norm_inputs:\n inputs = self.layer_norm(inputs)\n\n additional_outputs = []\n attention_weights_l = []\n outputs = inputs\n for cell in self.transformer_cells:\n outputs, attention_weights = cell(outputs, mem_value, mask, mem_mask)\n if self._output_attention:\n attention_weights_l.append(attention_weights)\n if self._output_attention:\n additional_outputs.extend(attention_weights_l)\n\n if valid_length is not None:\n outputs = F.SequenceMask(outputs, sequence_length=valid_length,\n use_sequence_length=True, axis=1)\n return outputs, states, additional_outputs\n\n\nclass TransformerDecoder(_BaseTransformerDecoder, Seq2SeqDecoder):\n \"\"\"Transformer Decoder.\n\n Multi-step ahead decoder for use during training with teacher forcing.\n\n Parameters\n ----------\n attention_cell : AttentionCell or str, default 'multi_head'\n Arguments of the attention cell.\n Can be 'multi_head', 'scaled_luong', 'scaled_dot', 'dot', 'cosine', 'normed_mlp', 'mlp'\n num_layers : int\n Number of attention layers.\n units : int\n Number of units for the output.\n hidden_size : int\n number of units in the hidden layer of position-wise feed-forward networks\n max_length : int\n Maximum length of the input sequence. This is used for constructing position encoding\n num_heads : int\n Number of heads in multi-head attention\n scaled : bool\n Whether to scale the softmax input by the sqrt of the input dimension\n in multi-head attention\n scale_embed : bool, default True\n Whether to scale the input embeddings by the sqrt of the `units`.\n norm_inputs : bool, default True\n Whether to normalize the input embeddings with LayerNorm. If dropout is\n enabled, normalization happens after dropout is applied to inputs.\n dropout : float\n Dropout probability.\n use_residual : bool\n Whether to use residual connection.\n output_attention: bool\n Whether to output the attention weights\n weight_initializer : str or Initializer\n Initializer for the input weights matrix, used for the linear\n transformation of the inputs.\n bias_initializer : str or Initializer\n Initializer for the bias vector.\n prefix : str, default 'rnn_'\n Prefix for name of `Block`s\n (and name of weight if params is `None`).\n params : Parameter or None\n Container for weight sharing between cells.\n Created if `None`.\n \"\"\"\n\n\nclass TransformerOneStepDecoder(_BaseTransformerDecoder, Seq2SeqOneStepDecoder):\n \"\"\"Transformer Decoder.\n\n One-step ahead decoder for use during inference.\n\n Parameters\n ----------\n attention_cell : AttentionCell or str, default 'multi_head'\n Arguments of the attention cell.\n Can be 'multi_head', 'scaled_luong', 'scaled_dot', 'dot', 'cosine', 'normed_mlp', 'mlp'\n num_layers : int\n Number of attention layers.\n units : int\n Number of units for the output.\n hidden_size : int\n number of units in the hidden layer of position-wise feed-forward networks\n max_length : int\n Maximum length of the input sequence. This is used for constructing position encoding\n num_heads : int\n Number of heads in multi-head attention\n scaled : bool\n Whether to scale the softmax input by the sqrt of the input dimension\n in multi-head attention\n scale_embed : bool, default True\n Whether to scale the input embeddings by the sqrt of the `units`.\n norm_inputs : bool, default True\n Whether to normalize the input embeddings with LayerNorm. If dropout is\n enabled, normalization happens after dropout is applied to inputs.\n dropout : float\n Dropout probability.\n use_residual : bool\n Whether to use residual connection.\n output_attention: bool\n Whether to output the attention weights\n weight_initializer : str or Initializer\n Initializer for the input weights matrix, used for the linear\n transformation of the inputs.\n bias_initializer : str or Initializer\n Initializer for the bias vector.\n prefix : str, default 'rnn_'\n Prefix for name of `Block`s\n (and name of weight if params is `None`).\n params : Parameter or None\n Container for weight sharing between cells.\n Created if `None`.\n \"\"\"\n\n def forward(self, step_input, states): # pylint: disable=arguments-differ\n # We implement forward, as the number of states changes between the\n # first and later calls of the one-step ahead Transformer decoder. This\n # is due to the lack of numpy shape semantics. Once we enable numpy\n # shape semantic in the GluonNLP code-base, the number of states should\n # stay constant, but the first state element will be an array of shape\n # (batch_size, 0, C_in) at the first call.\n if len(states) == 3: # step_input from prior call is included\n last_embeds, _, _ = states\n inputs = mx.nd.concat(last_embeds, mx.nd.expand_dims(step_input, axis=1), dim=1)\n states = states[1:]\n else:\n inputs = mx.nd.expand_dims(step_input, axis=1)\n return super().forward(inputs, states)\n\n def hybrid_forward(self, F, inputs, states, position_weight):\n # pylint: disable=arguments-differ\n \"\"\"One-step-ahead decoding of the Transformer decoder.\n\n Parameters\n ----------\n step_input : NDArray, Shape (batch_size, C_in)\n states : list of NDArray\n\n Returns\n -------\n step_output : NDArray\n The output of the decoder. Shape is (batch_size, C_out)\n new_states: list\n Includes\n - last_embeds : NDArray or None\n - mem_value : NDArray\n - mem_masks : NDArray, optional\n\n step_additional_outputs : list of list\n Either be an empty list or contains the attention weights in this step.\n The attention weights will have shape (batch_size, length, mem_length) or\n (batch_size, num_heads, length, mem_length)\n \"\"\"\n outputs, states, additional_outputs = super().hybrid_forward(\n F, inputs, states, valid_length=None, position_weight=position_weight)\n\n # Append inputs to states: They are needed in the next one-step ahead decoding step\n new_states = [inputs] + states\n # Only return one-step ahead\n step_output = F.slice_axis(outputs, axis=1, begin=-1, end=None).reshape((0, -1))\n\n return step_output, new_states, additional_outputs\n\n\n\n###############################################################################\n# MODEL API #\n###############################################################################\n\nmodel_store._model_sha1.update(\n {name: checksum for checksum, name in [\n ('e25287c5a924b7025e08d626f02626d5fa3af2d1', 'transformer_en_de_512_WMT2014'),\n ]})\n\ndef get_transformer_encoder_decoder(num_layers=2,\n num_heads=8, scaled=True,\n units=512, hidden_size=2048, dropout=0.0, use_residual=True,\n max_src_length=50, max_tgt_length=50,\n weight_initializer=None, bias_initializer='zeros',\n prefix='transformer_', params=None):\n \"\"\"Build a pair of Parallel Transformer encoder/decoder\n\n Parameters\n ----------\n num_layers : int\n num_heads : int\n scaled : bool\n units : int\n hidden_size : int\n dropout : float\n use_residual : bool\n max_src_length : int\n max_tgt_length : int\n weight_initializer : mx.init.Initializer or None\n bias_initializer : mx.init.Initializer or None\n prefix : str, default 'transformer_'\n Prefix for name of `Block`s.\n params : Parameter or None\n Container for weight sharing between layers.\n Created if `None`.\n\n Returns\n -------\n encoder : TransformerEncoder\n decoder : TransformerDecoder\n one_step_ahead_decoder : TransformerOneStepDecoder\n \"\"\"\n encoder = TransformerEncoder(\n num_layers=num_layers, num_heads=num_heads, max_length=max_src_length, units=units,\n hidden_size=hidden_size, dropout=dropout, scaled=scaled, use_residual=use_residual,\n weight_initializer=weight_initializer, bias_initializer=bias_initializer,\n prefix=prefix + 'enc_', params=params)\n decoder = TransformerDecoder(\n num_layers=num_layers, num_heads=num_heads, max_length=max_tgt_length, units=units,\n hidden_size=hidden_size, dropout=dropout, scaled=scaled, use_residual=use_residual,\n weight_initializer=weight_initializer, bias_initializer=bias_initializer,\n prefix=prefix + 'dec_', params=params)\n one_step_ahead_decoder = TransformerOneStepDecoder(\n num_layers=num_layers, num_heads=num_heads, max_length=max_tgt_length, units=units,\n hidden_size=hidden_size, dropout=dropout, scaled=scaled, use_residual=use_residual,\n weight_initializer=weight_initializer, bias_initializer=bias_initializer,\n prefix=prefix + 'dec_', params=decoder.collect_params())\n return encoder, decoder, one_step_ahead_decoder\n\n\ndef _get_transformer_model(model_cls, model_name, dataset_name, src_vocab, tgt_vocab, encoder,\n decoder, one_step_ahead_decoder, share_embed, embed_size, tie_weights,\n embed_initializer, pretrained, ctx, root, **kwargs):\n src_vocab = _load_vocab(dataset_name + '_src', src_vocab, root)\n tgt_vocab = _load_vocab(dataset_name + '_tgt', tgt_vocab, root)\n kwargs['encoder'] = encoder\n kwargs['decoder'] = decoder\n kwargs['one_step_ahead_decoder'] = one_step_ahead_decoder\n kwargs['src_vocab'] = src_vocab\n kwargs['tgt_vocab'] = tgt_vocab\n kwargs['share_embed'] = share_embed\n kwargs['embed_size'] = embed_size\n kwargs['tie_weights'] = tie_weights\n kwargs['embed_initializer'] = embed_initializer\n # XXX the existing model is trained with prefix 'transformer_'\n net = model_cls(prefix='transformer_', **kwargs)\n if pretrained:\n _load_pretrained_params(net, model_name, dataset_name, root, ctx)\n return net, src_vocab, tgt_vocab\n\n\ndef transformer_en_de_512(dataset_name=None, src_vocab=None, tgt_vocab=None, pretrained=False,\n ctx=cpu(), root=os.path.join(get_home_dir(), 'models'), **kwargs):\n r\"\"\"Transformer pretrained model.\n\n Embedding size is 400, and hidden layer size is 1150.\n\n Parameters\n ----------\n dataset_name : str or None, default None\n src_vocab : gluonnlp.Vocab or None, default None\n tgt_vocab : gluonnlp.Vocab or None, default None\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '$MXNET_HOME/models'\n Location for keeping the model parameters.\n MXNET_HOME defaults to '~/.mxnet'.\n\n Returns\n -------\n gluon.Block, gluonnlp.Vocab, gluonnlp.Vocab\n \"\"\"\n predefined_args = {'num_units': 512,\n 'hidden_size': 2048,\n 'dropout': 0.1,\n 'epsilon': 0.1,\n 'num_layers': 6,\n 'num_heads': 8,\n 'scaled': True,\n 'share_embed': True,\n 'embed_size': 512,\n 'tie_weights': True,\n 'embed_initializer': None}\n mutable_args = frozenset(['num_units', 'hidden_size', 'dropout', 'epsilon', 'num_layers',\n 'num_heads', 'scaled'])\n assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \\\n 'Cannot override predefined model settings.'\n predefined_args.update(kwargs)\n encoder, decoder, one_step_ahead_decoder = get_transformer_encoder_decoder(\n units=predefined_args['num_units'], hidden_size=predefined_args['hidden_size'],\n dropout=predefined_args['dropout'], num_layers=predefined_args['num_layers'],\n num_heads=predefined_args['num_heads'], max_src_length=530, max_tgt_length=549,\n scaled=predefined_args['scaled'])\n return _get_transformer_model(NMTModel, 'transformer_en_de_512', dataset_name, src_vocab,\n tgt_vocab, encoder, decoder, one_step_ahead_decoder,\n predefined_args['share_embed'], predefined_args['embed_size'],\n predefined_args['tie_weights'],\n predefined_args['embed_initializer'], pretrained, ctx, root)\n\n\nclass ParallelTransformer(Parallelizable):\n \"\"\"Data parallel transformer.\n\n Parameters\n ----------\n model : Block\n The transformer model.\n label_smoothing: Block\n The block to perform label smoothing.\n loss_function : Block\n The loss function to optimizer.\n rescale_loss : float\n The scale to which the loss is rescaled to avoid gradient explosion.\n \"\"\"\n def __init__(self, model, label_smoothing, loss_function, rescale_loss):\n self._model = model\n self._label_smoothing = label_smoothing\n self._loss = loss_function\n self._rescale_loss = rescale_loss\n\n def forward_backward(self, x):\n \"\"\"Perform forward and backward computation for a batch of src seq and dst seq\"\"\"\n (src_seq, tgt_seq, src_valid_length, tgt_valid_length), batch_size = x\n with mx.autograd.record():\n out, _ = self._model(src_seq, tgt_seq[:, :-1],\n src_valid_length, tgt_valid_length - 1)\n smoothed_label = self._label_smoothing(tgt_seq[:, 1:])\n ls = self._loss(out, smoothed_label, tgt_valid_length - 1).sum()\n ls = (ls * (tgt_seq.shape[1] - 1)) / batch_size / self._rescale_loss\n ls.backward()\n return ls\n"
] |
[
[
"numpy.ascontiguousarray",
"numpy.savez_compressed",
"numpy.array"
],
[
"numpy.arange",
"numpy.cos",
"numpy.sin"
]
] |
red-cheese/icsic-vae-for-bridges
|
[
"640d529c41501db193170c0415ed6c1fb2d61a41"
] |
[
"methods/vae.py"
] |
[
"\n\nimport rec_proba\n\nfrom keras import backend as K\nfrom keras import Model\nfrom keras import initializers\nfrom keras.layers import Dense, Input, Lambda, LSTM, RepeatVector, Reshape\nimport os\nimport numpy as np\nimport pickle\n\n\nBATCH_SIZE = 64\nEPOCHS = 50\nLATENT_DIM = 2\nSTDDEV_INIT = 0.01\nKERNEL_INIT = initializers.RandomNormal(stddev=STDDEV_INIT)\nKERNEL_REG = 'l2'\n\n\nclass _VAEBase:\n \"\"\"\n Variational Auto-Encoder (Kingma and Welling, 2013).\n \"\"\"\n\n _NAME = None\n _MODEL_NAME = None\n _MODEL_FILE_PATTERN = './models/{}.model'\n\n def __init__(self, input_dim, suffix=None):\n self._name = self._MODEL_NAME\n if suffix:\n self._name += '_{}'.format(suffix)\n self._file = self._MODEL_FILE_PATTERN.format(self._name)\n self._input_dim = input_dim\n self._vae, self._encoder = self._build_model()\n\n def _build_model(self):\n raise NotImplementedError\n\n def fit(self, train_data, shuffle=True):\n print('*** VAE: Training ***')\n\n if os.path.isfile(self._file):\n print('Loading model from', self._file)\n self._vae.load_weights(self._file)\n else:\n print('Fitting')\n self._vae.fit(train_data,\n nb_epoch=EPOCHS,\n batch_size=BATCH_SIZE,\n shuffle=shuffle,\n verbose=1)\n print('Saving model to', self._file)\n self._vae.save_weights(self._file)\n\n print('*** VAE: Training completed ***')\n print()\n\n def predict(self, test_data, batch_size=BATCH_SIZE):\n output_mus, output_logsigmas = self._vae.predict(test_data, batch_size=batch_size)\n return output_mus, output_logsigmas\n\n\nclass DenseVAE(_VAEBase):\n \"\"\"\n Variational Auto-Encoder with Dense networks as its encoder and decoder.\n \"\"\"\n\n _NAME = 'dense'\n _MODEL_NAME = 'vae_epochs{e}_batch{b}_ldim{ld}'.format(e=EPOCHS, b=BATCH_SIZE, ld=LATENT_DIM)\n\n def _build_model(self):\n # Encoding.\n x = Input(shape=(self._input_dim,))\n encoder_h = Dense(40, activation='relu', kernel_initializer=KERNEL_INIT, kernel_regularizer=KERNEL_REG)(x)\n z_mean = Dense(LATENT_DIM, kernel_initializer=KERNEL_INIT, kernel_regularizer=KERNEL_REG)(encoder_h)\n z_log_sigma = Dense(LATENT_DIM, kernel_initializer=KERNEL_INIT, kernel_regularizer=KERNEL_REG)(encoder_h)\n\n def sampling(args):\n z_mean, z_log_sigma = args\n # 1 sample per 1 data point.\n epsilon = K.random_normal(shape=(BATCH_SIZE, LATENT_DIM),\n mean=0., stddev=1.)\n return z_mean + K.exp(z_log_sigma) * epsilon\n\n # Sampling from latent space.\n z = Lambda(sampling, output_shape=(LATENT_DIM,))([z_mean, z_log_sigma])\n\n # Decoding samples.\n decoder_h = Dense(40, activation='relu', kernel_initializer=KERNEL_INIT, kernel_regularizer=KERNEL_REG)(z)\n x_decoded_mean = Dense(self._input_dim, kernel_initializer=KERNEL_INIT, kernel_regularizer=KERNEL_REG)(decoder_h)\n # Assume diagonal predicted covariance matrix.\n x_decoded_log_sigma_2 = Dense(self._input_dim, kernel_initializer=KERNEL_INIT, kernel_regularizer=KERNEL_REG)(decoder_h)\n\n # End-to-end VAE.\n vae = Model(x, [x_decoded_mean, x_decoded_log_sigma_2])\n # Save the encoder part separately as we will need it later.\n encoder = Model(x, [z_mean, z_log_sigma])\n\n # Loss: -ELBO.\n reconstruction_loss = -K.sum(\n # x_decoded_log_sigma_2 - matrix of shape (batch_size, input dim).\n -(0.5 * np.log(2 * np.pi) + 0.5 * x_decoded_log_sigma_2)\n # x, x_decoded_mean - matrices of shape (batch_size, input dim).\n - 0.5 * (K.square(x - x_decoded_mean) / K.exp(x_decoded_log_sigma_2)),\n axis=1\n )\n kl_loss = 1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma)\n kl_loss = K.sum(kl_loss, axis=-1)\n kl_loss *= -0.5\n # Mean over batch elements.\n vae_loss = K.mean(reconstruction_loss + kl_loss)\n vae.add_loss(vae_loss)\n vae.compile(optimizer='adam')\n\n return vae, encoder\n\n\nclass RNNVAE(_VAEBase):\n \"\"\"\n Variational Auto-Encoder with RNNs as its encoder and decoder.\n \"\"\"\n\n _NAME = 'rnn'\n _MODEL_NAME = 'rnnvae_epochs{e}_batch{b}_ldim{ld}'.format(e=EPOCHS, b=BATCH_SIZE, ld=LATENT_DIM)\n\n def _build_model(self):\n timesteps = 1\n features = self._input_dim // timesteps\n\n # Encoding.\n x = Input(shape=(self._input_dim,))\n x_reshaped = Reshape((timesteps, features))(x)\n encoder_h = LSTM(40, return_sequences=True)(x_reshaped)\n z_mean = LSTM(LATENT_DIM, activation=None)(encoder_h)\n z_log_sigma = LSTM(LATENT_DIM, activation=None)(encoder_h)\n\n def sampling(args):\n z_mean, z_log_sigma = args\n # 1 sample per 1 data point.\n epsilon = K.random_normal(shape=(BATCH_SIZE, LATENT_DIM),\n mean=0., stddev=1.)\n return z_mean + K.exp(z_log_sigma) * epsilon\n\n # Sampling from latent space.\n z = Lambda(sampling, output_shape=(LATENT_DIM,))([z_mean, z_log_sigma])\n\n # Decoding samples.\n decoder_h = LSTM(40, return_sequences=True)\n # Don't return sequences as the original input did not have the\n # timesteps axis.\n decoder_mean = LSTM(features, activation=None)\n decoder_std = LSTM(features, activation=None)\n\n h_decoded = RepeatVector(timesteps)(z)\n h_decoded = decoder_h(h_decoded)\n\n x_decoded_mean = decoder_mean(h_decoded)\n x_decoded_log_sigma_2 = decoder_std(h_decoded)\n\n # End-to-end VAE.\n vae = Model(x, [x_decoded_mean, x_decoded_log_sigma_2])\n # Save the encoder part separately as we will need it later.\n encoder = Model(x, [z_mean, z_log_sigma])\n\n # Loss: -ELBO.\n reconstruction_loss = -K.sum(\n # x_decoded_log_sigma_2 - matrix of shape (batch_size, input dim).\n -(0.5 * np.log(2 * np.pi) + 0.5 * x_decoded_log_sigma_2)\n # x, x_decoded_mean - matrices of shape (batch_size, input dim).\n - 0.5 * (K.square(x - x_decoded_mean) / K.exp(x_decoded_log_sigma_2)),\n axis=1\n )\n kl_loss = 1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma)\n kl_loss = K.sum(kl_loss, axis=-1)\n kl_loss *= -0.5\n # Mean over batch elements.\n vae_loss = K.mean(reconstruction_loss + kl_loss)\n vae.add_loss(vae_loss)\n vae.compile(optimizer='adam')\n\n vae.summary()\n\n return vae, encoder\n\n\nclass VAEClassifier:\n def __init__(self, vae_class, input_dim, suffix=None,\n # Set empirically, based on dataset 1.\n # -130 is for the DenseVAE.\n recproba_threshold=-130):\n assert vae_class in (DenseVAE, RNNVAE)\n self._vae_class = vae_class\n print('*** VAEClassifier: VAE class: {} ***'.format(vae_class))\n self._vae = vae_class(input_dim=input_dim, suffix=suffix)\n self._recproba_threshold = recproba_threshold\n\n def fit(self, train_data, shuffle=True,\n dump_latent=False, dump_latent_true_labels=None):\n print('*** VAEClassifier: Training the model ***')\n if self._vae_class is RNNVAE:\n assert not shuffle\n self._vae.fit(train_data, shuffle=shuffle)\n print('*** VAEClassifier: Training completed ***')\n print()\n\n output_mus, output_logsigmas_2 = self._vae.predict(train_data, batch_size=BATCH_SIZE)\n recprobas = rec_proba.rec_proba(train_data, output_mus, output_logsigmas_2)\n recproba_mean = np.mean(recprobas)\n recproba_std = np.std(recprobas)\n print('* Rec proba mean:', recproba_mean)\n print('* Rec proba std:', recproba_std)\n print('* VAE rec proba threshold:', self._recproba_threshold)\n\n # Save to plot the latent space, marked event/no event.\n if dump_latent:\n assert dump_latent_true_labels is not None # Ground truth (manual) labels.\n z_mus, z_logsigmas = self._vae._encoder.predict(train_data, batch_size=BATCH_SIZE)\n print('Dumping VAE latent space')\n with open('figure8_{}.pkl'.format(self._vae._NAME), 'wb') as f:\n pickle.dump((z_mus, z_logsigmas, dump_latent_true_labels), f)\n\n def predict(self, test_data):\n mus, logsigmas = self._vae.predict(test_data)\n rec_probas = rec_proba.rec_proba(test_data, mus, logsigmas)\n preds = np.where(rec_probas < self._recproba_threshold, 1., 0.)\n return preds\n"
] |
[
[
"numpy.std",
"numpy.mean",
"numpy.where",
"numpy.log"
]
] |
patdring/CarND-Term1-Behavioral-Cloning-P3
|
[
"3c2dc0327c7bd829d5c19d42c9eed299bb974f7a"
] |
[
"drive.py"
] |
[
"import argparse\nimport base64\nfrom datetime import datetime\nimport os\nimport shutil\nimport tensorflow as tf\nimport numpy as np\nimport socketio\nimport eventlet\nimport eventlet.wsgi\nfrom PIL import Image\nfrom flask import Flask\nfrom io import BytesIO\n\nfrom keras.models import load_model\nimport h5py\nfrom keras import __version__ as keras_version\n\nsio = socketio.Server()\napp = Flask(__name__)\nmodel = None\nprev_image_array = None\n\n\nclass SimplePIController:\n def __init__(self, Kp, Ki):\n self.Kp = Kp\n self.Ki = Ki\n self.set_point = 0.\n self.error = 0.\n self.integral = 0.\n\n def set_desired(self, desired):\n self.set_point = desired\n\n def update(self, measurement):\n # proportional error\n self.error = self.set_point - measurement\n\n # integral error\n self.integral += self.error\n\n return self.Kp * self.error + self.Ki * self.integral\n\n\ncontroller = SimplePIController(0.1, 0.002)\nset_speed = 9\ncontroller.set_desired(set_speed)\n\n\n@sio.on('telemetry')\ndef telemetry(sid, data):\n if data:\n # The current steering angle of the car\n steering_angle = data[\"steering_angle\"]\n # The current throttle of the car\n throttle = data[\"throttle\"]\n # The current speed of the car\n speed = data[\"speed\"]\n # The current image from the center camera of the car\n imgString = data[\"image\"]\n image = Image.open(BytesIO(base64.b64decode(imgString)))\n image_array = np.asarray(image)\n steering_angle = float(model.predict(image_array[None, :, :, :], batch_size=1))\n\n throttle = controller.update(float(speed))\n\n print(steering_angle, throttle)\n send_control(steering_angle, throttle)\n\n # save frame\n if args.image_folder != '':\n timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]\n image_filename = os.path.join(args.image_folder, timestamp)\n image.save('{}.jpg'.format(image_filename))\n else:\n # NOTE: DON'T EDIT THIS.\n sio.emit('manual', data={}, skip_sid=True)\n\n\n@sio.on('connect')\ndef connect(sid, environ):\n print(\"connect \", sid)\n send_control(0, 0)\n\n\ndef send_control(steering_angle, throttle):\n sio.emit(\n \"steer\",\n data={\n 'steering_angle': steering_angle.__str__(),\n 'throttle': throttle.__str__()\n },\n skip_sid=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Remote Driving')\n parser.add_argument(\n 'model',\n type=str,\n help='Path to model h5 file. Model should be on the same path.'\n )\n parser.add_argument(\n 'image_folder',\n type=str,\n nargs='?',\n default='',\n help='Path to image folder. This is where the images from the run will be saved.'\n )\n args = parser.parse_args()\n\n # check that model Keras version is same as local Keras version\n f = h5py.File(args.model, mode='r')\n model_version = f.attrs.get('keras_version')\n keras_version = str(keras_version).encode('utf8')\n\n if model_version != keras_version:\n print('You are using Keras version ', keras_version,\n ', but the model was built using ', model_version)\n\n model = load_model(args.model)\n\n if args.image_folder != '':\n print(\"Creating image folder at {}\".format(args.image_folder))\n if not os.path.exists(args.image_folder):\n os.makedirs(args.image_folder)\n else:\n shutil.rmtree(args.image_folder)\n os.makedirs(args.image_folder)\n print(\"RECORDING THIS RUN ...\")\n else:\n print(\"NOT RECORDING THIS RUN ...\")\n\n # wrap Flask application with engineio's middleware\n app = socketio.Middleware(sio, app)\n\n # deploy as an eventlet WSGI server\n eventlet.wsgi.server(eventlet.listen(('', 4567)), app)\n"
] |
[
[
"numpy.asarray"
]
] |
jinyiliu/plancklens
|
[
"ae9a77efd7eceb1662acd037e31f2bbe8754dfc3"
] |
[
"plancklens/filt/filt_simple.py"
] |
[
"\"\"\"simple CMB filtering module.\n\nThis module collects a couple of fast (non-iterative) filtering methods.\n\n\"\"\"\nfrom __future__ import print_function\n\nimport healpy as hp\nimport numpy as np\nimport pickle as pk\nimport os\n\nfrom plancklens.helpers import mpi\nfrom plancklens import utils\n\nclass library_sepTP(object):\n \"\"\"Template class for CMB inverse-variance and Wiener-filtering library.\n\n This is suitable whenever the temperature and polarization maps are independently filtered.\n\n Args:\n lib_dir (str): directory where hashes and filtered maps will be cached.\n sim_lib : simulation library instance. *sim_lib* must have *get_sim_tmap* and *get_sim_pmap* methods.\n cl_weights: CMB spectra, used to compute the Wiener-filtered CMB from the inverse variance filtered maps.\n\n \"\"\"\n def __init__(self, lib_dir, sim_lib, cl_weights, soltn_lib=None, cache=True):\n\n\n self.lib_dir = lib_dir\n self.sim_lib = sim_lib\n self.cl = cl_weights\n self.soltn_lib = soltn_lib\n self.cache = cache\n fn_hash = os.path.join(lib_dir, 'filt_hash.pk')\n if mpi.rank == 0:\n if not os.path.exists(lib_dir):\n os.makedirs(lib_dir)\n if not os.path.exists(fn_hash):\n pk.dump(self.hashdict(), open(fn_hash, 'wb'), protocol=2)\n mpi.barrier()\n utils.hash_check(pk.load(open(fn_hash, 'rb')), self.hashdict())\n\n def hashdict(self):\n assert 0, 'override this'\n\n def get_fmask(self):\n assert 0, 'override this'\n\n def _apply_ivf_t(self, tmap, soltn=None):\n assert 0, 'override this'\n\n def _apply_ivf_p(self, pmap, soltn=None):\n assert 0, 'override this'\n\n def get_ftl(self):\n \"\"\"Isotropic approximation to temperature inverse variance filtering.\n\n :math:`F^{T}_\\ell = (C_\\ell^{TT} + N^{T}_\\ell / b_\\ell^2)^{-1}`\n\n \"\"\"\n assert 0, 'override this'\n\n def get_fel(self):\n \"\"\"Isotropic approximation to E-polarization inverse variance filtering.\n\n :math:`F^{E}_\\ell = (C_\\ell^{EE} + N^{E}_\\ell / b_\\ell^2)^{-1}`\n\n \"\"\"\n assert 0, 'override this'\n\n def get_fbl(self):\n \"\"\"Isotropic approximation to B-polarization inverse variance filtering.\n\n :math:`F^{B}_\\ell = (C_\\ell^{BB} + N^{B}_\\ell / b_\\ell^2)^{-1}`\n\n\n \"\"\"\n assert 0, 'override this'\n\n def get_tal(self, a):\n assert 0, 'override this'\n\n def get_sim_tlm(self, idx):\n \"\"\"Returns an inverse-filtered temperature simulation.\n\n Args:\n idx: simulation index\n\n Returns:\n inverse-filtered temperature healpy alm array\n\n \"\"\"\n tfname = os.path.join(self.lib_dir, 'sim_%04d_tlm.fits'%idx if idx >= 0 else 'dat_tlm.fits')\n if not os.path.exists(tfname):\n tlm = self._apply_ivf_t(self.sim_lib.get_sim_tmap(idx), soltn=None if self.soltn_lib is None else self.soltn_lib.get_sim_tmliklm(idx))\n if self.cache: hp.write_alm(tfname, tlm)\n return tlm\n return hp.read_alm(tfname)\n\n def get_sim_elm(self, idx):\n \"\"\"Returns an inverse-filtered E-polarization simulation.\n\n Args:\n idx: simulation index\n\n Returns:\n inverse-filtered E-polarization healpy alm array\n\n \"\"\"\n tfname = os.path.join(self.lib_dir, 'sim_%04d_elm.fits'%idx if idx >= 0 else 'dat_elm.fits')\n if not os.path.exists(tfname):\n if self.soltn_lib is None:\n soltn = None\n else:\n soltn = np.array([self.soltn_lib.get_sim_emliklm(idx), self.soltn_lib.get_sim_bmliklm(idx)])\n elm, blm = self._apply_ivf_p(self.sim_lib.get_sim_pmap(idx), soltn=soltn)\n if self.cache:\n hp.write_alm(tfname, elm)\n hp.write_alm(os.path.join(self.lib_dir, 'sim_%04d_blm.fits'%idx if idx >= 0 else 'dat_blm.fits'), blm)\n return elm\n else:\n return hp.read_alm(tfname)\n\n def get_sim_blm(self, idx):\n \"\"\"Returns an inverse-filtered B-polarization simulation.\n\n Args:\n idx: simulation index\n\n Returns:\n inverse-filtered B-polarization healpy alm array\n\n \"\"\"\n tfname = os.path.join(self.lib_dir, 'sim_%04d_blm.fits'%idx if idx >= 0 else 'dat_blm.fits')\n if not os.path.exists(tfname):\n if self.soltn_lib is None:\n soltn = None\n else:\n soltn = np.array([self.soltn_lib.get_sim_emliklm(idx), self.soltn_lib.get_sim_bmliklm(idx)])\n elm, blm = self._apply_ivf_p(self.sim_lib.get_sim_pmap(idx), soltn=soltn)\n if self.cache:\n hp.write_alm(tfname, blm)\n hp.write_alm(os.path.join(self.lib_dir, 'sim_%04d_elm.fits'%idx if idx >= 0 else 'dat_elm.fits'), elm)\n return blm\n else:\n return hp.read_alm(tfname)\n\n def get_sim_tmliklm(self, idx):\n \"\"\"Returns a Wiener-filtered temperature simulation.\n\n Args:\n idx: simulation index\n\n Returns:\n Wiener-filtered temperature healpy alm array\n\n \"\"\"\n return hp.almxfl(self.get_sim_tlm(idx), self.cl['tt'])\n\n def get_sim_emliklm(self, idx):\n \"\"\"Returns a Wiener-filtered E-polarization simulation.\n\n Args:\n idx: simulation index\n\n Returns:\n Wiener-filtered E-polarization healpy alm array\n\n \"\"\"\n return hp.almxfl(self.get_sim_elm(idx), self.cl['ee'])\n\n def get_sim_bmliklm(self, idx):\n \"\"\"Returns a Wiener-filtered B-polarization simulation.\n\n Args:\n idx: simulation index\n\n Returns:\n Wiener-filtered B-polarization healpy alm array\n\n \"\"\"\n return hp.almxfl(self.get_sim_blm(idx), self.cl['bb'])\n\n\n\nclass library_jTP(object):\n \"\"\"Template class for CMB inverse-variance and Wiener-filtering library.\n\n This one is suitable whenever the temperature and polarization maps are jointly filtered.\n\n Args:\n lib_dir (str): directory where hashes and filtered maps will be cached.\n sim_lib : simulation library instance. *sim_lib* must have *get_sim_tmap* and *get_sim_pmap* methods.\n cl_weights: CMB spectra, used to compute the Wiener-filtered CMB from the inverse variance filtered maps.\n\n \"\"\"\n def __init__(self, lib_dir, sim_lib, cl_weights, soltn_lib=None, cache=True):\n\n assert np.all([k in cl_weights.keys() for k in ['tt', 'ee', 'bb']])\n self.lib_dir = lib_dir\n self.sim_lib = sim_lib\n self.cl = cl_weights\n self.soltn_lib = soltn_lib\n self.cache = cache\n fn_hash = os.path.join(lib_dir, 'filt_hash.pk')\n if mpi.rank == 0:\n if not os.path.exists(lib_dir):\n os.makedirs(lib_dir)\n if not os.path.exists(fn_hash):\n pk.dump(self.hashdict(), open(fn_hash, 'wb'), protocol=2)\n mpi.barrier()\n utils.hash_check(pk.load(open(fn_hash, 'rb')), self.hashdict())\n\n def hashdict(self):\n assert 0, 'override this'\n\n def get_fmask(self):\n assert 0, 'override this'\n\n def _apply_ivf(self, tqumap, soltn=None):\n assert 0, 'override this'\n\n\n def get_fal(self):\n \"\"\"Isotropic matrix approximation to inverse variance filtering\n\n :math:`F_\\ell \\sim (C_\\ell + N_\\ell / b_\\ell^2)^{-1}`\n\n Output is dictionary with the usual 'tt', 'ee', 'te', 'bb', ... keys.\n\n \"\"\"\n assert 0, 'override this'\n\n def _get_alms(self, a, idx):\n assert a in ['t', 'e', 'b']\n tfname = os.path.join(self.lib_dir, 'sim_%04d_tlm.fits' % idx if idx >= 0 else 'dat_tlm.fits')\n fname = tfname.replace('tlm.fits', a + 'lm.fits')\n if not os.path.exists(fname):\n T = self.sim_lib.get_sim_tmap(idx)\n Q, U = self.sim_lib.get_sim_pmap(idx)\n if self.soltn_lib is None:\n soltn = None\n else:\n tlm = self.soltn_lib.get_sim_tmliklm(idx)\n elm = self.soltn_lib.get_sim_emliklm(idx)\n blm = self.soltn_lib.get_sim_bmliklm(idx)\n soltn = (tlm, elm, blm)\n tlm, elm, blm = self._apply_ivf([T, Q, U], soltn=soltn)\n if self.cache:\n hp.write_alm(tfname.replace('tlm.fits', 'tlm.fits'), tlm)\n hp.write_alm(tfname.replace('tlm.fits', 'elm.fits'), elm)\n hp.write_alm(tfname.replace('tlm.fits', 'blm.fits'), blm)\n return hp.read_alm(fname)\n\n def get_sim_tlm(self, idx):\n \"\"\"Returns an inverse-filtered temperature simulation.\n\n Args:\n idx: simulation index\n\n Returns:\n inverse-filtered temperature healpy alm array\n\n \"\"\"\n return self._get_alms('t', idx)\n\n def get_sim_elm(self, idx):\n \"\"\"Returns an inverse-filtered E-polarization simulation.\n\n Args:\n idx: simulation index\n\n Returns:\n inverse-filtered E-polarization healpy alm array\n\n \"\"\"\n return self._get_alms('e', idx)\n\n\n def get_sim_blm(self, idx):\n \"\"\"Returns an inverse-filtered B-polarization simulation.\n\n Args:\n idx: simulation index\n\n Returns:\n inverse-filtered B-polarization healpy alm array\n\n \"\"\"\n return self._get_alms('b', idx)\n\n\n def get_sim_tmliklm(self, idx):\n \"\"\"Returns a Wiener-filtered temperature simulation.\n\n Args:\n idx: simulation index\n\n Returns:\n Wiener-filtered temperature healpy alm array\n\n \"\"\"\n ret = hp.almxfl(self.get_sim_tlm(idx), self.cl['tt'])\n for k in ['te', 'tb']:\n cl = self.cl.get(k[0] + k[1], self.cl.get(k[1] + k[0], None))\n if cl is not None:\n ret += hp.almxfl(self._get_alms(k[1], idx), cl)\n return ret\n\n def get_sim_emliklm(self, idx):\n \"\"\"Returns a Wiener-filtered E-polarization simulation.\n\n Args:\n idx: simulation index\n\n Returns:\n Wiener-filtered E-polarization healpy alm array\n\n \"\"\"\n ret = hp.almxfl(self.get_sim_elm(idx), self.cl['ee'])\n for k in ['et', 'eb']:\n cl = self.cl.get(k[0] + k[1], self.cl.get(k[1] + k[0], None))\n if cl is not None:\n ret += hp.almxfl(self._get_alms(k[1], idx), cl)\n return ret\n\n def get_sim_bmliklm(self, idx):\n \"\"\"Returns a Wiener-filtered B-polarization simulation.\n\n Args:\n idx: simulation index\n\n Returns:\n Wiener-filtered B-polarization healpy alm array\n\n \"\"\"\n ret = hp.almxfl(self.get_sim_blm(idx), self.cl['bb'])\n for k in ['bt', 'be']:\n cl = self.cl.get(k[0] + k[1], self.cl.get(k[1] + k[0], None))\n if cl is not None:\n ret += hp.almxfl(self._get_alms(k[1], idx), cl)\n return ret\n\n\nclass library_fullsky_sepTP(library_sepTP):\n \"\"\"Full-sky isotropic filtering instance.\n\n Args:\n lib_dir: directory where hashes and filtered maps will be cached.\n sim_lib: simulation library instance to inverse-filter\n nside: healpix resolution of the simulation library\n cl_len : CMB spectra, used to compute the Wiener-filtered CMB from the inverse variance filtered maps.\n transf : fiducial transfer function of the CMB maps. (if dict, then must have keys 't' 'e' and 'b' for individual transfer functions)\n ftl (1d-array): isotropic filtering array for temperature (filtered tlm's are ftl * tlm of the data)\n fel (1d-array): isotropic filtering array for E-pol. (filtered elm's are fel * elm of the data)\n fbl (1d-array): isotropic filtering array for B-po. (filtered blm's are fbl * blm of the data)\n cache: filtered alm's will be cached if set.\n\n \"\"\"\n def __init__(self, lib_dir, sim_lib, nside, transf:np.ndarray or dict, cl_len, ftl, fel, fbl, cache=False):\n\n transfd = transf if isinstance(transf, dict) else {'t': transf, 'e': transf, 'b': transf}\n assert 't' in transfd.keys() and 'e' in transfd.keys() and 'b' in transfd.keys()\n\n self.sim_lib = sim_lib\n self.ftl = ftl\n self.fel = fel\n self.fbl = fbl\n self.lmax_fl = np.max([len(ftl), len(fel), len(fbl)]) - 1\n self.nside = nside\n self.transf = transfd\n\n super(library_fullsky_sepTP, self).__init__(lib_dir, sim_lib, cl_len, cache=cache)\n\n def hashdict(self):\n return {'sim_lib':self.sim_lib.hashdict(), 'transf': utils.clhash(self.transf['t']),\n 'cl_len': {k: utils.clhash(self.cl[k]) for k in ['tt', 'ee', 'bb']},\n 'ftl': utils.clhash(self.ftl), 'fel': utils.clhash(self.fel), 'fbl': utils.clhash(self.fbl)}\n\n def get_fmask(self):\n return np.ones(hp.nside2npix(self.nside), dtype=float)\n\n def get_tal(self, a):\n assert (a.lower() in ['t', 'e', 'b'])\n return utils.cli(self.transf[a.lower()])\n\n def get_ftl(self):\n return np.copy(self.ftl)\n\n def get_fel(self):\n return np.copy(self.fel)\n\n def get_fbl(self):\n return np.copy(self.fbl)\n\n def _apply_ivf_t(self, tmap, soltn=None):\n assert len(tmap) == hp.nside2npix(self.nside), (hp.npix2nside(tmap.size), self.nside)\n alm = hp.map2alm(tmap, lmax=self.lmax_fl, iter=0)\n return hp.almxfl(alm, self.get_ftl() * utils.cli(self.transf['t'][:len(self.ftl)]))\n\n def _apply_ivf_p(self, pmap, soltn=None):\n assert len(pmap[0]) == hp.nside2npix(self.nside) and len(pmap[0]) == len(pmap[1])\n elm, blm = hp.map2alm_spin([m for m in pmap], 2, lmax=self.lmax_fl)\n elm = hp.almxfl(elm, self.get_fel() * utils.cli(self.transf['e'][:len(self.fel)]))\n blm = hp.almxfl(blm, self.get_fbl() * utils.cli(self.transf['b'][:len(self.fbl)]))\n return elm, blm\n\nclass library_fullsky_alms_sepTP(library_sepTP):\n \"\"\"Full-sky isotropic filtering instance, but with harmonic space inputs\n\n Args:\n lib_dir: directory where hashes and filtered maps will be cached.\n sim_lib: simulation library instance to inverse-filter\n cl_len : CMB spectra, used to compute the Wiener-filtered CMB from the inverse variance filtered maps.\n transf : fiducial transfer function of the CMB maps. (if dict, then must have keys 't' 'e' and 'b' for individual transfer functions)\n ftl (1d-array): isotropic filtering array for temperature (filtered tlm's are ftl * tlm of the data)\n fel (1d-array): isotropic filtering array for E-pol. (filtered elm's are fel * elm of the data)\n fbl (1d-array): isotropic filtering array for B-po. (filtered blm's are fbl * blm of the data)\n cache: filtered alm's will be cached if set.\n\n \"\"\"\n def __init__(self, lib_dir, sim_lib, transf:np.ndarray or dict, cl_len, ftl, fel, fbl, cache=False):\n\n transfd = transf if isinstance(transf, dict) else {'t': transf, 'e': transf, 'b': transf}\n assert 't' in transfd.keys() and 'e' in transfd.keys() and 'b' in transfd.keys()\n\n self.sim_lib = sim_lib\n self.ftl = ftl\n self.fel = fel\n self.fbl = fbl\n self.lmax_fl = np.max([len(ftl), len(fel), len(fbl)]) - 1\n self.transf = transfd\n\n super(library_fullsky_alms_sepTP, self).__init__(lib_dir, sim_lib, cl_len, cache=cache)\n\n def hashdict(self):\n return {'sim_lib':self.sim_lib.hashdict(), 'transf': utils.clhash(self.transf['t']),\n 'cl_len': {k: utils.clhash(self.cl[k]) for k in ['tt', 'ee', 'bb']},\n 'ftl': utils.clhash(self.ftl), 'fel': utils.clhash(self.fel), 'fbl': utils.clhash(self.fbl)}\n\n def get_fmask(self):\n return np.array([1.]) # For compatibility purposes only...\n\n def get_tal(self, a):\n assert (a.lower() in ['t', 'e', 'b'])\n return utils.cli(self.transf[a.lower()])\n\n def get_ftl(self):\n return np.copy(self.ftl)\n\n def get_fel(self):\n return np.copy(self.fel)\n\n def get_fbl(self):\n return np.copy(self.fbl)\n\n def _apply_ivf_t(self, tlm, soltn=None):\n return hp.almxfl(tlm, self.get_ftl() * utils.cli(self.transf['t'][:len(self.ftl)]))\n\n def _apply_ivf_p(self, eblm, soltn=None):\n elm = hp.almxfl(eblm[0], self.get_fel() * utils.cli(self.transf['e'][:len(self.fel)]))\n blm = hp.almxfl(eblm[1], self.get_fbl() * utils.cli(self.transf['b'][:len(self.fbl)]))\n return elm, blm\n\n\nclass library_apo_sepTP(library_sepTP):\n \"\"\"\n Library to perform inverse variance filtering on the sim_lib library using simple mask apo and isotropic filtering.\n\n Args:\n lib_dir: directory where hashes and filtered maps will be cached.\n sim_lib: simulation library instance to inverse-filter\n apomask_path : path of the (presumably apodized) mask\n cl_len : CMB spectra, used to compute the Wiener-filtered CMB from the inverse variance filtered maps.\n transf : fiducial transfer function of the CMB maps.\n ftl (1d-array): isotropic filtering array for temperature (filtered tlm's are ftl * tlm of the data)\n fel (1d-array): isotropic filtering array for E-pol. (filtered elm's are fel * elm of the data)\n fbl (1d-array): isotropic filtering array for B-po. (filtered blm's are fbl * blm of the data)\n cache: filtered alm's will be cached if set.\n\n \"\"\"\n def __init__(self, lib_dir, sim_lib, apomask_path, cl_len, transf, ftl, fel, fbl, cache=False):\n assert len(transf) >= np.max([len(ftl), len(fel), len(fbl)])\n assert np.all([k in cl_len.keys() for k in ['tt', 'ee', 'bb']])\n assert os.path.exists(apomask_path)\n\n self.ftl = ftl\n self.fel = fel\n self.fbl = fbl\n self.transf = transf\n self.lmax_fl = np.max([len(ftl), len(fel), len(fbl)]) - 1\n self.apomask_path = apomask_path\n self.nside = hp.npix2nside(hp.read_map(apomask_path).size)\n super(library_apo_sepTP, self).__init__(lib_dir, sim_lib, cl_len, cache=cache)\n\n def hashdict(self):\n return {'sim_lib':self.sim_lib.hashdict(),\n 'apomask': self.apomask_path, 'transf': utils.clhash(self.transf),\n 'cl_len': {k: utils.clhash(self.cl[k]) for k in ['tt', 'ee', 'bb']},\n 'ftl': utils.clhash(self.ftl), 'fel': utils.clhash(self.fel), 'fbl': utils.clhash(self.fbl)}\n\n def get_fmask(self):\n return hp.read_map(self.apomask_path)\n\n def get_tal(self, a):\n assert (a.lower() in ['t', 'e', 'b'])\n return utils.cli(self.transf)\n\n def get_ftl(self):\n return np.copy(self.ftl)\n\n def get_fel(self):\n return np.copy(self.fel)\n\n def get_fbl(self):\n return np.copy(self.fbl)\n\n def _apply_ivf_t(self, tmap, soltn=None):\n assert len(tmap) == hp.nside2npix(self.nside), (hp.npix2nside(tmap.size), self.nside)\n alm = hp.map2alm(tmap * self.get_fmask(), lmax=self.lmax_fl, iter=0)\n return hp.almxfl(alm, self.get_ftl() * utils.cli(self.transf[:len(self.ftl)]))\n\n def _apply_ivf_p(self, pmap, soltn=None):\n assert len(pmap[0]) == hp.nside2npix(self.nside) and len(pmap[0]) == len(pmap[1])\n elm, blm = hp.map2alm_spin([m * self.get_fmask() for m in pmap], 2, lmax=self.lmax_fl)\n elm = hp.almxfl(elm, self.get_fel() * utils.cli(self.transf[:len(self.fel)]))\n blm = hp.almxfl(blm, self.get_fbl() * utils.cli(self.transf[:len(self.fbl)]))\n return elm, blm"
] |
[
[
"numpy.copy",
"numpy.array"
]
] |
kanji95/pytorch
|
[
"b8e6144e0a328fe49067ae7d18d5582c8f1a61b0"
] |
[
"torch/testing/_internal/common_modules.py"
] |
[
"import torch\nfrom copy import deepcopy\nfrom functools import wraps, partial\nfrom itertools import chain\nfrom torch.testing import floating_types\nfrom torch.testing._internal.common_device_type import (\n _TestParametrizer, _dtype_test_suffix, _update_param_kwargs, skipIf)\nfrom torch.testing._internal.common_nn import nllloss_reference\nfrom torch.testing._internal.common_utils import make_tensor\nfrom types import ModuleType\nfrom typing import List, Tuple, Type, Set, Dict\n\n\n# List of all namespaces containing modules to test.\nMODULE_NAMESPACES: List[ModuleType] = [\n torch.nn.modules,\n torch.nn.qat.modules,\n torch.nn.quantizable.modules,\n torch.nn.quantized.modules,\n]\n\n# Modules that shouldn't be tested for one reason or another.\nMODULES_TO_SKIP: Set[Type] = {\n torch.nn.Module, # abstract base class\n torch.nn.Container, # deprecated\n torch.nn.NLLLoss2d, # deprecated\n torch.nn.quantized.modules._ConvNd, # abstract base class\n torch.nn.quantized.MaxPool2d, # aliases to nn.MaxPool2d\n}\n\n# List of all module classes to test.\nMODULE_CLASSES: List[Type] = list(chain(*[\n [getattr(namespace, module_name) for module_name in namespace.__all__] # type: ignore[attr-defined]\n for namespace in MODULE_NAMESPACES]))\nMODULE_CLASSES = [cls for cls in MODULE_CLASSES if cls not in MODULES_TO_SKIP]\n\n# Dict of module class -> common name. Useful for making test names more intuitive.\n# Example: torch.nn.modules.linear.Linear -> \"nn.Linear\"\nMODULE_CLASS_NAMES: Dict[Type, str] = {}\nfor namespace in MODULE_NAMESPACES:\n for module_name in namespace.__all__: # type: ignore[attr-defined]\n module_cls = getattr(namespace, module_name)\n namespace_name = namespace.__name__.replace('torch.', '').replace('.modules', '')\n MODULE_CLASS_NAMES[module_cls] = f'{namespace_name}.{module_name}'\n\n\nclass modules(_TestParametrizer):\n \"\"\" PROTOTYPE: Decorator for specifying a list of modules over which to run a test. \"\"\"\n def __init__(self, module_info_list):\n self.module_info_list = module_info_list\n\n def _parametrize_test(self, test, generic_cls, device_cls):\n for module_info in self.module_info_list:\n # TODO: Factor some of this out since it's similar to OpInfo.\n for dtype in floating_types():\n # Construct the test name.\n test_name = '{}_{}_{}{}'.format(test.__name__,\n module_info.name.replace('.', '_'),\n device_cls.device_type,\n _dtype_test_suffix(dtype))\n\n # Construct parameter kwargs to pass to the test.\n param_kwargs = {'module_info': module_info}\n _update_param_kwargs(param_kwargs, 'dtype', dtype)\n\n try:\n active_decorators = []\n if module_info.should_skip(generic_cls.__name__, test.__name__, device_cls.device_type, dtype):\n active_decorators.append(skipIf(True, \"Skipped!\"))\n\n if module_info.decorators is not None:\n for decorator in module_info.decorators:\n # Can't use isinstance as it would cause a circular import\n if decorator.__class__.__name__ == 'DecorateInfo':\n if decorator.is_active(generic_cls.__name__, test.__name__,\n device_cls.device_type, dtype):\n active_decorators += decorator.decorators\n else:\n active_decorators.append(decorator)\n\n @wraps(test)\n def test_wrapper(*args, **kwargs):\n return test(*args, **kwargs)\n\n for decorator in active_decorators:\n test_wrapper = decorator(test_wrapper)\n\n yield (test_wrapper, test_name, param_kwargs)\n except Exception as ex:\n # Provides an error message for debugging before rethrowing the exception\n print(\"Failed to instantiate {0} for module {1}!\".format(test_name, module_info.name))\n raise ex\n\n\ndef formatted_module_name(module_cls):\n \"\"\" Returns the common name of the module class formatted for use in test names. \"\"\"\n return MODULE_CLASS_NAMES[module_cls].replace('.', '_')\n\n\nclass FunctionInput(object):\n \"\"\" Contains args and kwargs to pass as input to a function. \"\"\"\n __slots__ = ['args', 'kwargs']\n\n def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n\nclass ModuleInput(object):\n \"\"\" Contains args / kwargs for module instantiation + forward pass. \"\"\"\n __slots__ = ['constructor_input', 'forward_input', 'desc', 'reference_fn']\n\n def __init__(self, constructor_input, forward_input=None, desc='', reference_fn=None):\n self.constructor_input = constructor_input # Inputs to pass during construction\n self.forward_input = forward_input # Inputs to pass to forward()\n self.desc = desc # Description for this set of inputs\n self.reference_fn = reference_fn # Reference with signature: reference_fn(module, parameters, *args, **kwargs)\n\n if reference_fn is not None:\n\n @wraps(reference_fn)\n def copy_reference_fn(m, *args, **kwargs):\n # Copy inputs to avoid undesired side effects from calling the reference.\n args, kwargs = deepcopy(args), deepcopy(kwargs)\n\n # Note that module parameters are passed in for convenience.\n return reference_fn(m, list(m.parameters()), *args, **kwargs)\n\n self.reference_fn = copy_reference_fn\n\n\nclass ModuleInfo(object):\n \"\"\" Module information to be used in testing. \"\"\"\n\n def __init__(self,\n module_cls, # Class object for the module under test\n *,\n module_inputs_func, # Function to generate module inputs\n skips=(), # Indicates which tests to skip\n decorators=None, # Additional decorators to apply to generated tests\n ):\n self.module_cls = module_cls\n self.module_inputs_func = module_inputs_func\n self.skips = skips\n self.decorators = decorators\n\n def should_skip(self, cls_name, test_name, device_type, dtype):\n return any(si.is_active(cls_name, test_name, device_type, dtype) for si in self.skips)\n\n @property\n def name(self):\n return formatted_module_name(self.module_cls)\n\n\ndef module_inputs_torch_nn_Linear(module_info, device, dtype, requires_grad, **kwargs):\n make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n module_inputs = [\n ModuleInput(constructor_input=FunctionInput(10, 8),\n forward_input=FunctionInput(make_input((4, 10))),\n reference_fn=lambda m, p, i: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8)),\n ModuleInput(constructor_input=FunctionInput(10, 8, bias=False),\n forward_input=FunctionInput(make_input((4, 10))),\n desc='no_bias',\n reference_fn=lambda m, p, i: torch.mm(i, p[0].t())),\n ModuleInput(constructor_input=FunctionInput(3, 5),\n forward_input=FunctionInput(make_input(3)),\n desc='no_batch_dim',\n reference_fn=lambda m, p, i: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1])\n ]\n\n return module_inputs\n\n\ndef module_inputs_torch_nn_NLLLoss(module_info, device, dtype, requires_grad, **kwargs):\n make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad)\n\n cases: List[Tuple[str, dict]] = [\n ('', {}),\n ('ignore_index', {'ignore_index': 2}),\n ('weights', {'weight': make_input(10)}),\n ('weights_ignore_index', {'weight': make_input(10), 'ignore_index': 2}),\n ('weights_ignore_index_neg', {'weight': make_input(10), 'ignore_index': -1})\n ]\n module_inputs = []\n for desc, constructor_kwargs in cases:\n\n def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs):\n return nllloss_reference(i, t, **constructor_kwargs)\n\n module_inputs.append(\n ModuleInput(constructor_input=FunctionInput(**constructor_kwargs),\n forward_input=FunctionInput(make_input((15, 10)).log_softmax(dim=1),\n torch.empty(15, device=device).uniform_().mul(10).floor().long()),\n desc=desc,\n reference_fn=reference_fn)\n )\n\n return module_inputs\n\n\n# Database of ModuleInfo entries in alphabetical order.\nmodule_db: List[ModuleInfo] = [\n ModuleInfo(torch.nn.Linear,\n module_inputs_func=module_inputs_torch_nn_Linear),\n ModuleInfo(torch.nn.NLLLoss,\n module_inputs_func=module_inputs_torch_nn_NLLLoss)\n]\n"
] |
[
[
"torch.testing._internal.common_device_type._dtype_test_suffix",
"torch.empty",
"torch.testing._internal.common_device_type.skipIf",
"torch.testing._internal.common_nn.nllloss_reference",
"torch.testing._internal.common_device_type._update_param_kwargs",
"torch.testing.floating_types"
]
] |
Woodenonez/multimodal_motion_prediction
|
[
"e1c799626a2b99780afe63b64e29042cb8043dd3"
] |
[
"misc_mixture_density_network.py"
] |
[
"import sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn as nn\nimport torch.tensor as ts\nimport torch.optim as optim\n\nimport mdn_base\nfrom mdn_base import MDN_Module as MDN # classic_MDN_Module just use exp for sigma\n# from mdn_base import classic_MDN_Module as MDN # classic_MDN_Module just use exp for sigma\n\n\nclass Mixture_Density_Network(nn.Module):\n \"\"\" \n \"\"\"\n def __init__(self, data_shape, labels_shape, num_gaus, layer_param=None, verbose=True):\n super(Mixture_Density_Network, self).__init__()\n self.data_shape = data_shape\n self.labels_shape = labels_shape\n self.is_mdn = (num_gaus is not None)\n self.vb = verbose\n\n self.dim_input = data_shape[1]\n self.dim_prob = labels_shape[1]\n self.num_gaus = num_gaus\n self.Loss = []\n self.Val_loss= []\n\n if layer_param is not None:\n self.layer_param = layer_param\n else:\n self.layer_param = [256, 64, 32, 8] # beta: 64,32,8; betaP: 256+beta; One: [512, 256, 64, 64, 32]\n\n def build_Network(self):\n self.gen_Model()\n self.gen_Optimizer(self.model.parameters())\n if self.vb:\n print(self.model)\n\n def gen_Model(self):\n lp = self.layer_param\n self.model = nn.Sequential(\n nn.Linear(self.dim_input, lp[0]), \n nn.ReLU()\n )\n for i in range(len(lp)-1):\n self.model.add_module('Linear'+str(i+1), nn.Linear(lp[i], lp[i+1]))\n self.model.add_module('BN'+str(i+1), nn.BatchNorm1d(lp[i+1], affine=False))\n self.model.add_module('ReLU'+str(i+1), nn.ReLU())\n if self.is_mdn:\n self.model.add_module('MDN', MDN(lp[-1], self.dim_prob, self.num_gaus)) # dim_fea, dim_prob, num_gaus\n else:\n self.model.add_module('Linear'+str(len(lp)), nn.Linear(lp[-1], self.dim_prob)) # dim_fea, dim_prob\n return self.model\n\n def gen_Optimizer(self, parameters):\n self.optimizer = optim.Adam(parameters, lr=1e-3)\n return self.optimizer\n\n def predict(self, data):\n alp, mu, sigma = self.model(data)\n return alp, mu, sigma\n\n def validate(self, data, labels):\n if self.is_mdn:\n alp, mu, sigma = self.model(data)\n loss = mdn_base.loss_NLL(alp, mu, sigma, labels)\n else:\n mse_loss = nn.MSELoss()\n loss = mse_loss(self.model(data), labels)\n return loss\n\n def train_batch(self, batch, label):\n self.model.zero_grad() # clear the gradient buffer for updating\n loss = self.validate(batch, label)\n loss.backward()\n self.optimizer.step()\n return loss\n\n def train(self, data_handler, batch_size, epoch):\n print('\\nTraining...')\n dh = data_handler\n data_val, labels_val, _, _ = dh.split_train_val()\n n = int(dh.return_num_data()*(1-dh.val_p))\n cnt = 0\n for ep in range(epoch):\n for _ in range(int(n/batch_size)+1):\n cnt += batch_size\n batch, label = dh.return_batch(batch_size=batch_size)\n loss = self.train_batch(batch, label) # train here\n val_loss = self.validate(ts(data_val).float(), ts(labels_val).float())\n self.Loss.append(loss.item())\n self.Val_loss.append(val_loss.item())\n assert(~np.isnan(loss.item())),(\"Loss goes to NaN!\")\n if (cnt%2000 == 0) & (self.vb):\n print(\"\\rLoss/Val_loss: {}/{}, {}k/{}k, Epoch {}/{} \".format(\n round(loss.item(),4), round(val_loss.item(),4), cnt/1000, n/1000, ep+1, epoch), end='')\n cnt = 0\n print()\n print('\\nTraining Complete!')\n\n\n def plot_history_loss(self):\n plt.plot(self.Loss, label='loss')\n plt.plot(self.Val_loss, label='val_loss')\n plt.xlabel('#batch')\n plt.legend()\n plt.show()\n"
] |
[
[
"torch.optim.Adam",
"matplotlib.pyplot.legend",
"torch.nn.BatchNorm1d",
"torch.tensor",
"matplotlib.pyplot.plot",
"torch.nn.Linear",
"matplotlib.pyplot.xlabel",
"torch.nn.ReLU",
"matplotlib.pyplot.show",
"torch.nn.MSELoss"
]
] |
tnwei/gradio
|
[
"472c1a9372f7c75040665083eb759f60f18906fc"
] |
[
"gradio/outputs.py"
] |
[
"\"\"\"\nThis module defines various classes that can serve as the `output` to an interface. Each class must inherit from\n`OutputComponent`, and each class must define a path to its template. All of the subclasses of `OutputComponent` are\nautomatically added to a registry, which allows them to be easily referenced in other parts of the code.\n\"\"\"\n\nfrom gradio.component import Component\nimport numpy as np\nimport json\nfrom gradio import processing_utils\nimport datetime\nimport operator\nfrom numbers import Number\nimport warnings\nimport tempfile\nimport scipy\nimport os\nimport pandas as pd\nimport PIL\nfrom types import ModuleType\n\nclass OutputComponent(Component):\n \"\"\"\n Output Component. All output components subclass this.\n \"\"\"\n\n def postprocess(self, y):\n \"\"\"\n Any postprocessing needed to be performed on function output.\n \"\"\"\n return y\n\n\n\nclass Textbox(OutputComponent):\n '''\n Component creates a textbox to render output text or number.\n Output type: Union[str, float, int]\n Demos: hello_world.py, sentence_builder.py\n '''\n\n def __init__(self, type=\"auto\", label=None):\n '''\n Parameters:\n type (str): Type of value to be passed to component. \"str\" expects a string, \"number\" expects a float value, \"auto\" detects return type.\n label (str): component name in interface.\n '''\n self.type = type\n super().__init__(label)\n\n def get_template_context(self):\n return {\n **super().get_template_context()\n }\n\n @classmethod\n def get_shortcut_implementations(cls):\n return {\n \"text\": {\"type\": \"str\"},\n \"textbox\": {\"type\": \"str\"},\n \"number\": {\"type\": \"number\"},\n }\n\n def postprocess(self, y):\n if self.type == \"str\" or self.type == \"auto\":\n return str(y)\n elif self.type == \"number\":\n return y\n else:\n raise ValueError(\"Unknown type: \" + self.type + \". Please choose from: 'str', 'number'\")\n\n\nclass Label(OutputComponent):\n '''\n Component outputs a classification label, along with confidence scores of top categories if provided. Confidence scores are represented as a dictionary mapping labels to scores between 0 and 1.\n Output type: Union[Dict[str, float], str, int, float]\n Demos: image_classifier.py, main_note.py, titanic_survival.py\n '''\n\n CONFIDENCES_KEY = \"confidences\"\n\n def __init__(self, num_top_classes=None, type=\"auto\", label=None):\n '''\n Parameters:\n num_top_classes (int): number of most confident classes to show.\n type (str): Type of value to be passed to component. \"value\" expects a single out label, \"confidences\" expects a dictionary mapping labels to confidence scores, \"auto\" detects return type.\n label (str): component name in interface.\n '''\n self.num_top_classes = num_top_classes\n self.type = type\n super().__init__(label)\n\n def postprocess(self, y):\n if self.type == \"label\" or (self.type == \"auto\" and (isinstance(y, str) or isinstance(y, Number))):\n return {\"label\": str(y)}\n elif self.type == \"confidences\" or (self.type == \"auto\" and isinstance(y, dict)):\n sorted_pred = sorted(\n y.items(),\n key=operator.itemgetter(1),\n reverse=True\n )\n if self.num_top_classes is not None:\n sorted_pred = sorted_pred[:self.num_top_classes]\n return {\n \"label\": sorted_pred[0][0],\n \"confidences\": [\n {\n \"label\": pred[0],\n \"confidence\": pred[1]\n } for pred in sorted_pred\n ]\n }\n else:\n raise ValueError(\"The `Label` output interface expects one of: a string label, or an int label, a \"\n \"float label, or a dictionary whose keys are labels and values are confidences.\")\n\n @classmethod\n def get_shortcut_implementations(cls):\n return {\n \"label\": {},\n }\n\n def save_flagged(self, dir, label, data, encryption_key):\n \"\"\"\n Returns: (Union[str, Dict[str, number]]): Either a string representing the main category label, or a dictionary with category keys mapping to confidence levels.\n \"\"\"\n if \"confidences\" in data:\n return json.dumps({example[\"label\"]: example[\"confidence\"] for example in data[\"confidences\"]})\n else:\n return data[\"label\"]\n \n def restore_flagged(self, data):\n try:\n data = json.loads(data)\n return data\n except:\n return data\n\nclass Image(OutputComponent):\n '''\n Component displays an output image. \n Output type: Union[numpy.array, PIL.Image, str, matplotlib.pyplot, Tuple[Union[numpy.array, PIL.Image, str], List[Tuple[str, float, float, float, float]]]]\n Demos: image_mod.py, webcam.py\n '''\n\n def __init__(self, type=\"auto\", labeled_segments=False, plot=False, label=None):\n '''\n Parameters:\n type (str): Type of value to be passed to component. \"numpy\" expects a numpy array with shape (width, height, 3), \"pil\" expects a PIL image object, \"file\" expects a file path to the saved image, \"plot\" expects a matplotlib.pyplot object, \"auto\" detects return type.\n labeled_segments (bool): If True, expects a two-element tuple to be returned. The first element of the tuple is the image of format specified by type. The second element is a list of tuples, where each tuple represents a labeled segment within the image. The first element of the tuple is the string label of the segment, followed by 4 floats that represent the left-x, top-y, right-x, and bottom-y coordinates of the bounding box.\n plot (bool): DEPRECATED. Whether to expect a plot to be returned by the function.\n label (str): component name in interface.\n '''\n self.labeled_segments = labeled_segments\n if plot:\n warnings.warn(\"The 'plot' parameter has been deprecated. Set parameter 'type' to 'plot' instead.\", DeprecationWarning)\n self.type = \"plot\"\n else:\n self.type = type\n super().__init__(label)\n\n @classmethod\n def get_shortcut_implementations(cls):\n return {\n \"image\": {},\n \"segmented_image\": {\"labeled_segments\": True},\n \"plot\": {\"type\": \"plot\"},\n \"pil\": {\"type\": \"pil\"}\n }\n\n def postprocess(self, y):\n if self.labeled_segments:\n y, coordinates = y\n else:\n coordinates = []\n if self.type == \"auto\":\n if isinstance(y, np.ndarray):\n dtype = \"numpy\"\n elif isinstance(y, PIL.Image.Image):\n dtype = \"pil\"\n elif isinstance(y, str):\n dtype = \"file\"\n elif isinstance(y, ModuleType):\n dtype = \"plot\"\n else:\n raise ValueError(\"Unknown type. Please choose from: 'numpy', 'pil', 'file', 'plot'.\")\n else:\n dtype = self.type\n if dtype in [\"numpy\", \"pil\"]:\n if dtype == \"pil\":\n y = np.array(y)\n out_y = processing_utils.encode_array_to_base64(y)\n elif dtype == \"file\":\n out_y = processing_utils.encode_file_to_base64(y)\n elif dtype == \"plot\":\n out_y = processing_utils.encode_plot_to_base64(y)\n else:\n raise ValueError(\"Unknown type: \" + dtype + \". Please choose from: 'numpy', 'pil', 'file', 'plot'.\")\n return out_y, coordinates\n\n def save_flagged(self, dir, label, data, encryption_key):\n \"\"\"\n Returns: (str) path to image file\n \"\"\"\n return self.save_flagged_file(dir, label, data[0], encryption_key)\n\n\nclass Video(OutputComponent):\n '''\n Used for video output. \n Output type: filepath\n Demos: video_flip.py\n '''\n\n def __init__(self, label=None):\n '''\n Parameters:\n label (str): component name in interface.\n '''\n super().__init__(label)\n\n @classmethod\n def get_shortcut_implementations(cls):\n return {\n \"video\": {},\n }\n\n def postprocess(self, y):\n return processing_utils.encode_file_to_base64(y, type=\"video\")\n\n def save_flagged(self, dir, label, data, encryption_key):\n \"\"\"\n Returns: (str) path to image file\n \"\"\"\n return self.save_flagged_file(dir, label, data, encryption_key)\n\n\nclass KeyValues(OutputComponent):\n '''\n Component displays a table representing values for multiple fields. \n Output type: Union[Dict, List[Tuple[str, Union[str, int, float]]]]\n Demos: text_analysis.py\n '''\n\n def __init__(self, label=None):\n '''\n Parameters:\n label (str): component name in interface.\n '''\n super().__init__(label)\n\n def postprocess(self, y):\n if isinstance(y, dict):\n return list(y.items())\n elif isinstance(y, list):\n return y\n else:\n raise ValueError(\"The `KeyValues` output interface expects an output that is a dictionary whose keys are \"\n \"labels and values are corresponding values.\")\n\n @classmethod\n def get_shortcut_implementations(cls):\n return {\n \"key_values\": {},\n }\n \n def save_flagged(self, dir, label, data, encryption_key):\n return json.dumps(data)\n\n def restore_flagged(self, data):\n return json.loads(data)\n\n\nclass HighlightedText(OutputComponent):\n '''\n Component creates text that contains spans that are highlighted by category or numerical value.\n Output is represent as a list of Tuple pairs, where the first element represents the span of text represented by the tuple, and the second element represents the category or value of the text.\n Output type: List[Tuple[str, Union[float, str]]]\n Demos: diff_texts.py, text_analysis.py\n '''\n\n def __init__(self, color_map=None, label=None):\n '''\n Parameters:\n color_map (Dict[str, str]): Map between category and respective colors\n label (str): component name in interface.\n '''\n self.color_map = color_map\n super().__init__(label)\n\n def get_template_context(self):\n return {\n \"color_map\": self.color_map,\n **super().get_template_context()\n }\n\n @classmethod\n def get_shortcut_implementations(cls):\n return {\n \"highlight\": {},\n }\n\n def postprocess(self, y):\n return y\n\n def save_flagged(self, dir, label, data, encryption_key):\n return json.dumps(data)\n\n def restore_flagged(self, data):\n return json.loads(data)\n\n\nclass Audio(OutputComponent):\n '''\n Creates an audio player that plays the output audio.\n Output type: Union[Tuple[int, numpy.array], str]\n Demos: generate_tone.py, reverse_audio.py\n '''\n\n def __init__(self, type=\"auto\", label=None):\n '''\n Parameters:\n type (str): Type of value to be passed to component. \"numpy\" returns a 2-set tuple with an integer sample_rate and the data numpy.array of shape (samples, 2), \"file\" returns a temporary file path to the saved wav audio file, \"auto\" detects return type.\n label (str): component name in interface.\n '''\n self.type = type\n super().__init__(label)\n\n def get_template_context(self):\n return {\n **super().get_template_context()\n }\n\n @classmethod\n def get_shortcut_implementations(cls):\n return {\n \"audio\": {},\n }\n\n def postprocess(self, y):\n if self.type in [\"numpy\", \"file\", \"auto\"]:\n if self.type == \"numpy\" or (self.type == \"auto\" and isinstance(y, tuple)):\n file = tempfile.NamedTemporaryFile(delete=False)\n scipy.io.wavfile.write(file, y[0], y[1]) \n y = file.name\n return processing_utils.encode_file_to_base64(y, type=\"audio\", ext=\"wav\")\n else:\n raise ValueError(\"Unknown type: \" + self.type + \". Please choose from: 'numpy', 'file'.\")\n\n def save_flagged(self, dir, label, data, encryption_key):\n \"\"\"\n Returns: (str) path to audio file\n \"\"\"\n return self.save_flagged_file(dir, label, data, encryption_key)\n\n\nclass JSON(OutputComponent):\n '''\n Used for JSON output. Expects a JSON string or a Python object that is JSON serializable. \n Output type: Union[str, Any]\n Demos: zip_to_json.py\n '''\n\n def __init__(self, label=None):\n '''\n Parameters:\n label (str): component name in interface.\n '''\n super().__init__(label)\n\n def postprocess(self, y):\n if isinstance(y, str):\n return json.dumps(y)\n else:\n return y\n\n\n @classmethod\n def get_shortcut_implementations(cls):\n return {\n \"json\": {},\n }\n\n def save_flagged(self, dir, label, data, encryption_key):\n return json.dumps(data)\n\n def restore_flagged(self, data):\n return json.loads(data)\n\n\nclass HTML(OutputComponent):\n '''\n Used for HTML output. Expects an HTML valid string. \n Output type: str\n Demos: text_analysis.py\n '''\n\n def __init__(self, label=None):\n '''\n Parameters:\n label (str): component name in interface.\n '''\n super().__init__(label)\n\n\n @classmethod\n def get_shortcut_implementations(cls):\n return {\n \"html\": {},\n }\n\n\nclass File(OutputComponent):\n '''\n Used for file output. \n Output type: Union[file-like, str]\n Demos: zip_two_files.py\n '''\n\n def __init__(self, label=None):\n '''\n Parameters:\n label (str): component name in interface.\n '''\n super().__init__(label)\n\n\n @classmethod\n def get_shortcut_implementations(cls):\n return {\n \"file\": {},\n }\n\n def postprocess(self, y):\n return {\n \"name\": os.path.basename(y),\n \"size\": os.path.getsize(y), \n \"data\": processing_utils.encode_file_to_base64(y, header=False)\n }\n\n def save_flagged(self, dir, label, data, encryption_key):\n \"\"\"\n Returns: (str) path to image file\n \"\"\"\n return self.save_flagged_file(dir, label, data[\"data\"], encryption_key)\n\n\nclass Dataframe(OutputComponent):\n \"\"\"\n Component displays 2D output through a spreadsheet interface.\n Output type: Union[pandas.DataFrame, numpy.array, List[Union[str, float]], List[List[Union[str, float]]]]\n Demos: filter_records.py, matrix_transpose.py, fraud_detector.py\n \"\"\"\n\n def __init__(self, headers=None, max_rows=20, max_cols=None, overflow_row_behaviour=\"paginate\", type=\"auto\", label=None):\n '''\n Parameters:\n headers (List[str]): Header names to dataframe. Only applicable if type is \"numpy\" or \"array\".\n max_rows (int): Maximum number of rows to display at once. Set to None for infinite. \n max_cols (int): Maximum number of columns to display at once. Set to None for infinite.\n overflow_row_behaviour (str): If set to \"paginate\", will create pages for overflow rows. If set to \"show_ends\", will show initial and final rows and truncate middle rows. \n type (str): Type of value to be passed to component. \"pandas\" for pandas dataframe, \"numpy\" for numpy array, or \"array\" for Python array, \"auto\" detects return type.\n label (str): component name in interface.\n '''\n self.headers = headers\n self.max_rows = max_rows\n self.max_cols = max_cols\n self.overflow_row_behaviour = overflow_row_behaviour\n self.type = type\n super().__init__(label)\n\n\n def get_template_context(self):\n return {\n \"headers\": self.headers,\n \"max_rows\": self.max_rows,\n \"max_cols\": self.max_cols,\n \"overflow_row_behaviour\": self.overflow_row_behaviour,\n **super().get_template_context()\n }\n\n @classmethod\n def get_shortcut_implementations(cls):\n return {\n \"dataframe\": {},\n \"numpy\": {\"type\": \"numpy\"},\n \"matrix\": {\"type\": \"array\"},\n \"list\": {\"type\": \"array\"},\n }\n\n def postprocess(self, y):\n if self.type == \"auto\":\n if isinstance(y, pd.core.frame.DataFrame):\n dtype = \"pandas\"\n elif isinstance(y, np.ndarray):\n dtype = \"numpy\"\n elif isinstance(y, list):\n dtype = \"array\"\n else:\n dtype = self.type\n if dtype == \"pandas\":\n return {\"headers\": list(y.columns), \"data\": y.values.tolist()}\n elif dtype in (\"numpy\", \"array\"):\n if dtype == \"numpy\":\n y = y.tolist()\n if len(y) == 0 or not isinstance(y[0], list):\n y = [y]\n return {\"data\": y} \n else:\n raise ValueError(\"Unknown type: \" + self.type + \". Please choose from: 'pandas', 'numpy', 'array'.\")\n\n def save_flagged(self, dir, label, data, encryption_key):\n \"\"\"\n Returns: (List[List[Union[str, float]]]) 2D array\n \"\"\"\n return json.dumps(data[\"data\"])\n\n def restore_flagged(self, data):\n return json.loads(data)\n\n\nclass Carousel(OutputComponent):\n \"\"\"\n Component displays a set of output components that can be scrolled through.\n Output type: List[List[Any]]\n Demos: disease_report.py\n \"\"\"\n\n def __init__(self, components, label=None):\n '''\n Parameters:\n components (Union[List[OutputComponent], OutputComponent]): Classes of component(s) that will be scrolled through.\n label (str): component name in interface.\n '''\n if not isinstance(components, list):\n components = [components]\n self.components = [get_output_instance(component) for component in components]\n super().__init__(label)\n\n\n def get_template_context(self):\n return {\n \"components\": [component.get_template_context() for component in self.components],\n **super().get_template_context()\n }\n\n def postprocess(self, y):\n if isinstance(y, list):\n if len(y) != 0 and not isinstance(y[0], list):\n y = [[z] for z in y]\n output = []\n for row in y:\n output_row = []\n for i, cell in enumerate(row):\n output_row.append(self.components[i].postprocess(cell))\n output.append(output_row)\n return output\n else:\n raise ValueError(\"Unknown type. Please provide a list for the Carousel.\")\n\n def save_flagged(self, dir, label, data, encryption_key):\n return json.dumps([\n [\n component.save_flagged(dir, f\"{label}_{j}\", data[i][j], encryption_key)\n for j, component in enumerate(self.components)\n ] for i, sample in enumerate(data)])\n\n\n\ndef get_output_instance(iface):\n if isinstance(iface, str):\n shortcut = OutputComponent.get_all_shortcut_implementations()[iface]\n return shortcut[0](**shortcut[1])\n elif isinstance(iface, OutputComponent):\n return iface\n else:\n raise ValueError(\n \"Output interface must be of type `str` or \"\n \"`OutputComponent`\"\n )\n\nclass Timeseries(OutputComponent):\n \"\"\"\n Component accepts pandas.DataFrame.\n Output type: pandas.DataFrame\n Demos: fraud_detector.py\n \"\"\"\n\n def __init__(self, x=None, y=None, label=None):\n \"\"\"\n Parameters:\n x (str): Column name of x (time) series. None if csv has no headers, in which case first column is x series.\n y (Union[str, List[str]]): Column name of y series, or list of column names if multiple series. None if csv has no headers, in which case every column after first is a y series.\n label (str): component name in interface.\n \"\"\"\n self.x = x\n if isinstance(y, str):\n y = [y]\n self.y = y\n super().__init__(label)\n\n def get_template_context(self):\n return {\n \"x\": self.x,\n \"y\": self.y,\n **super().get_template_context()\n }\n\n @classmethod\n def get_shortcut_implementations(cls):\n return {\n \"timeseries\": {},\n }\n\n def postprocess(self, y):\n return {\n \"headers\": y.columns.values.tolist(),\n \"data\": y.values.tolist()\n\n }\n\n\n def save_flagged(self, dir, label, data, encryption_key):\n \"\"\"\n Returns: (List[List[Union[str, float]]]) 2D array\n \"\"\"\n return json.dumps(data)\n\n def restore_flagged(self, data):\n return json.loads(data)\n\n"
] |
[
[
"numpy.array",
"scipy.io.wavfile.write"
]
] |
sjmluo/myfuzzbench
|
[
"d953a65feab096aa958a338dadbeb746602684da"
] |
[
"analysis/plotting.py"
] |
[
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Plotting functions.\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport numpy as np\nimport Orange\nimport seaborn as sns\n\nfrom analysis import data_utils\nfrom common import experiment_utils\n\n_DEFAULT_TICKS_COUNT = 12\n_DEFAULT_LABEL_ROTATION = 30\n\n\ndef _formatted_hour_min(seconds):\n \"\"\"Turns |seconds| seconds into %H:%m format.\n\n We don't use to_datetime() or to_timedelta(), because we want to\n show hours larger than 23, e.g.: 24h:00m.\n \"\"\"\n time_string = ''\n hours = int(seconds / 60 / 60)\n minutes = int(seconds / 60) % 60\n if hours:\n time_string += '%dh' % hours\n if minutes:\n if hours:\n time_string += ':'\n time_string += '%dm' % minutes\n return time_string\n\n\ndef _formatted_title(benchmark_snapshot_df):\n \"\"\"Return a formatted title with time and trial count.\"\"\"\n benchmark_name = benchmark_snapshot_df.benchmark.unique()[0]\n stats_string = benchmark_name\n stats_string += ' ('\n\n snapshot_time = benchmark_snapshot_df.time.unique()[0]\n stats_string += _formatted_hour_min(snapshot_time)\n\n trial_count = benchmark_snapshot_df.fuzzer.value_counts().min()\n stats_string += ', %d trials/fuzzer' % trial_count\n stats_string += ')'\n return stats_string\n\n\nclass Plotter:\n \"\"\"Plotter that uses the same color for the same fuzzer.\"\"\"\n # Tableau 20 colors.\n _COLOR_PALETTE = [\n '#1f77b4',\n '#98df8a',\n '#d62728',\n '#c7c7c7',\n '#ff7f0e',\n '#ff9896',\n '#e377c2',\n '#dbdb8d',\n '#2ca02c',\n '#c5b0d5',\n '#7f7f7f',\n '#9edae5',\n '#aec7e8',\n '#8c564b',\n '#c49c94',\n '#bcbd22',\n '#ffbb78',\n '#9467bd',\n '#f7b6d2',\n '#17becf',\n ]\n\n # We need a manually specified marker list due to:\n # https://github.com/mwaskom/seaborn/issues/1513\n # We specify 20 markers for the 20 colors above.\n _MARKER_PALETTE = [\n 'o', 'v', '^', '<', '>', '8', 's', 'p', '*', 'h', 'H', 'D', 'd', 'P',\n 'X', ',', '+', 'x', '|', '_'\n ]\n\n def __init__(self, fuzzers, quick=False, logscale=False):\n \"\"\"Instantiates plotter with list of |fuzzers|. If |quick| is True,\n creates plots faster but, with less detail.\n \"\"\"\n self._fuzzer_colors = {\n fuzzer: self._COLOR_PALETTE[idx % len(self._COLOR_PALETTE)]\n for idx, fuzzer in enumerate(sorted(fuzzers))\n }\n self._fuzzer_markers = {\n fuzzer: self._MARKER_PALETTE[idx % len(self._MARKER_PALETTE)]\n for idx, fuzzer in enumerate(sorted(fuzzers))\n }\n\n self._quick = quick\n self._logscale = logscale\n\n # pylint: disable=no-self-use\n def _write_plot_to_image(self,\n plot_function,\n data,\n image_path,\n wide=False,\n **kwargs):\n \"\"\"Writes the result of |plot_function(data)| to |image_path|.\n\n If |wide|, then the image size will be twice as wide as normal.\n \"\"\"\n width = 6.4\n height = 4.8\n figsize = (2 * width, height) if wide else (width, height)\n fig, axes = plt.subplots(figsize=figsize)\n try:\n plot_function(data, axes=axes, **kwargs)\n fig.savefig(image_path, bbox_inches=\"tight\")\n finally:\n plt.close(fig)\n\n def _common_datafame_checks(self, benchmark_df, snapshot=False):\n \"\"\"Assertions common to several plotting functions.\"\"\"\n benchmark_names = benchmark_df.benchmark.unique()\n assert len(benchmark_names) == 1, 'Not a single benchmark data!'\n if snapshot:\n assert benchmark_df.time.nunique() == 1, 'Not a snapshot!'\n\n def coverage_growth_plot(self,\n benchmark_df,\n axes=None,\n logscale=False,\n bugs=False):\n \"\"\"Draws edge (or bug) coverage growth plot on given |axes|.\n\n The fuzzer labels will be in the order of their mean coverage at the\n snapshot time (typically, the end of experiment).\n \"\"\"\n self._common_datafame_checks(benchmark_df)\n\n column_of_interest = 'bugs_covered' if bugs else 'edges_covered'\n\n benchmark_snapshot_df = data_utils.get_benchmark_snapshot(benchmark_df)\n snapshot_time = benchmark_snapshot_df.time.unique()[0]\n fuzzer_order = data_utils.benchmark_rank_by_mean(\n benchmark_snapshot_df, key=column_of_interest).index\n\n axes = sns.lineplot(\n y=column_of_interest,\n x='time',\n hue='fuzzer',\n hue_order=fuzzer_order,\n data=benchmark_df[benchmark_df.time <= snapshot_time],\n ci=None if bugs or self._quick else 95,\n estimator=np.median,\n palette=self._fuzzer_colors,\n style='fuzzer',\n dashes=False,\n markers=self._fuzzer_markers,\n ax=axes)\n\n axes.set_title(_formatted_title(benchmark_snapshot_df))\n\n # Indicate the snapshot time with a big red vertical line.\n axes.axvline(x=snapshot_time, color='r')\n\n # Move legend outside of the plot.\n axes.legend(bbox_to_anchor=(1.00, 1),\n borderaxespad=0,\n loc='upper left',\n frameon=False)\n\n axes.set(ylabel='Bug coverage' if bugs else 'Code region coverage')\n axes.set(xlabel='Time (hour:minute)')\n\n if self._logscale or logscale:\n axes.set_xscale('log')\n ticks = np.logspace(\n # Start from the time of the first measurement.\n np.log10(experiment_utils.DEFAULT_SNAPSHOT_SECONDS),\n np.log10(snapshot_time + 1), # Include tick at end time.\n _DEFAULT_TICKS_COUNT)\n else:\n ticks = np.arange(\n experiment_utils.DEFAULT_SNAPSHOT_SECONDS,\n snapshot_time + 1, # Include tick at end time.\n snapshot_time / _DEFAULT_TICKS_COUNT)\n\n axes.set_xticks(ticks)\n axes.set_xticklabels([_formatted_hour_min(t) for t in ticks])\n\n sns.despine(ax=axes, trim=True)\n\n def write_coverage_growth_plot( # pylint: disable=too-many-arguments\n self,\n benchmark_df,\n image_path,\n wide=False,\n logscale=False,\n bugs=False):\n \"\"\"Writes coverage growth plot.\"\"\"\n self._write_plot_to_image(self.coverage_growth_plot,\n benchmark_df,\n image_path,\n wide=wide,\n logscale=logscale,\n bugs=bugs)\n\n def box_or_violin_plot(self,\n benchmark_snapshot_df,\n axes=None,\n bugs=False,\n violin=False):\n \"\"\"Draws a box or violin plot based on parameter.\n\n The fuzzer labels will be in the order of their median coverage.\n With boxplot the median/min/max/etc is more visible than on the violin,\n especially with distributions with high variance. It does not have\n however violinplot's kernel density estimation.\n \"\"\"\n self._common_datafame_checks(benchmark_snapshot_df, snapshot=True)\n\n column_of_interest = 'bugs_covered' if bugs else 'edges_covered'\n\n fuzzer_order = data_utils.benchmark_rank_by_median(\n benchmark_snapshot_df, key=column_of_interest).index\n\n mean_props = {\n 'markersize': '10',\n 'markeredgecolor': 'black',\n 'markerfacecolor': 'white'\n }\n\n common_args = dict(y=column_of_interest,\n x='fuzzer',\n data=benchmark_snapshot_df,\n order=fuzzer_order,\n ax=axes)\n\n if violin:\n sns.violinplot(**common_args, palette=self._fuzzer_colors)\n\n else:\n sns.boxplot(**common_args,\n palette=self._fuzzer_colors,\n showmeans=True,\n meanprops=mean_props)\n\n sns.stripplot(**common_args, size=3, color=\"black\", alpha=0.6)\n\n axes.set_title(_formatted_title(benchmark_snapshot_df))\n ylabel = 'Reached {} coverage'.format('bug' if bugs else 'region')\n axes.set(ylabel=ylabel)\n axes.set(xlabel='Fuzzer (highest median coverage on the left)')\n axes.set_xticklabels(axes.get_xticklabels(),\n rotation=_DEFAULT_LABEL_ROTATION,\n horizontalalignment='right')\n\n sns.despine(ax=axes, trim=True)\n\n def write_violin_plot(self, benchmark_snapshot_df, image_path, bugs=False):\n \"\"\"Writes violin plot.\"\"\"\n self._write_plot_to_image(self.box_or_violin_plot,\n benchmark_snapshot_df,\n image_path,\n bugs=bugs,\n violin=True)\n\n def write_box_plot(self, benchmark_snapshot_df, image_path, bugs=False):\n \"\"\"Writes box plot.\"\"\"\n self._write_plot_to_image(self.box_or_violin_plot,\n benchmark_snapshot_df,\n image_path,\n bugs=bugs)\n\n def distribution_plot(self, benchmark_snapshot_df, axes=None, bugs=False):\n \"\"\"Draws distribution plot.\n\n The fuzzer labels will be in the order of their median coverage.\n \"\"\"\n self._common_datafame_checks(benchmark_snapshot_df, snapshot=True)\n\n column_of_interest = 'bugs_covered' if bugs else 'edges_covered'\n\n fuzzers_in_order = data_utils.benchmark_rank_by_median(\n benchmark_snapshot_df, key=column_of_interest).index\n for fuzzer in fuzzers_in_order:\n measurements_for_fuzzer = benchmark_snapshot_df[\n benchmark_snapshot_df.fuzzer == fuzzer]\n sns.distplot(measurements_for_fuzzer[column_of_interest],\n hist=False,\n label=fuzzer,\n color=self._fuzzer_colors[fuzzer],\n ax=axes)\n\n axes.set_title(_formatted_title(benchmark_snapshot_df))\n axes.legend(loc='upper right', frameon=False)\n\n axes.set(xlabel='Bug coverage' if bugs else 'Code region coverage')\n axes.set(ylabel='Density')\n axes.set_xticklabels(axes.get_xticklabels(),\n rotation=_DEFAULT_LABEL_ROTATION,\n horizontalalignment='right')\n\n def write_distribution_plot(self, benchmark_snapshot_df, image_path):\n \"\"\"Writes distribution plot.\"\"\"\n self._write_plot_to_image(self.distribution_plot, benchmark_snapshot_df,\n image_path)\n\n def ranking_plot(self, benchmark_snapshot_df, axes=None, bugs=False):\n \"\"\"Draws ranking plot.\n\n The fuzzer labels will be in the order of their median coverage.\n \"\"\"\n self._common_datafame_checks(benchmark_snapshot_df, snapshot=True)\n\n column_of_interest = 'bugs_covered' if bugs else 'edges_covered'\n\n fuzzer_order = data_utils.benchmark_rank_by_median(\n benchmark_snapshot_df, key=column_of_interest).index\n\n axes = sns.barplot(y=column_of_interest,\n x='fuzzer',\n data=benchmark_snapshot_df,\n order=fuzzer_order,\n estimator=np.median,\n palette=self._fuzzer_colors,\n ax=axes)\n\n axes.set_title(_formatted_title(benchmark_snapshot_df))\n ylabel = 'Reached {} coverage'.format('bug' if bugs else 'region')\n axes.set(ylabel=ylabel)\n axes.set(xlabel='Fuzzer (highest median coverage on the left)')\n axes.set_xticklabels(axes.get_xticklabels(),\n rotation=_DEFAULT_LABEL_ROTATION,\n horizontalalignment='right')\n\n sns.despine(ax=axes, trim=True)\n\n def write_ranking_plot(self, benchmark_snapshot_df, image_path):\n \"\"\"Writes ranking plot.\"\"\"\n self._write_plot_to_image(self.ranking_plot, benchmark_snapshot_df,\n image_path)\n\n def better_than_plot(self, better_than_table, axes=None):\n \"\"\"Draws better than plot.\"\"\"\n cmap = ['white', '#005a32']\n sns.heatmap(better_than_table,\n vmin=0,\n vmax=1,\n cmap=cmap,\n linewidths=0.5,\n linecolor='0.5',\n cbar=False,\n ax=axes)\n\n axes.set_title('One-tailed statistical test result')\n axes.set(ylabel='If green, then fuzzer in the row')\n xlabel = 'is statistically significantly better than fuzzer in column.'\n axes.set(xlabel=xlabel)\n axes.set_xticklabels(axes.get_xticklabels(),\n rotation=_DEFAULT_LABEL_ROTATION,\n horizontalalignment='right')\n\n def write_better_than_plot(self, better_than_table, image_path):\n \"\"\"Writes better than plot.\"\"\"\n self._write_plot_to_image(self.better_than_plot, better_than_table,\n image_path)\n\n @staticmethod\n def _generic_heatmap_plot(values, axes, args, shrink_cbar=0.2):\n \"\"\"Custom heatmap plot which mimics SciPy's sign_plot.\"\"\"\n args.update({'linewidths': 0.5, 'linecolor': '0.5', 'square': True})\n # Annotate with values if less than 12 fuzzers.\n if values.shape[0] > 11 and args.get('annot'):\n args['annot'] = False\n\n axis = sns.heatmap(values, ax=axes, **args)\n axis.set_ylabel(\"\")\n axis.set_xlabel(\"\")\n label_args = {'rotation': 0, 'horizontalalignment': 'right'}\n axis.set_yticklabels(axis.get_yticklabels(), **label_args)\n label_args = {'rotation': 270, 'horizontalalignment': 'right'}\n axis.set_xticklabels(axis.get_xticklabels(), **label_args)\n\n cbar_ax = axis.collections[0].colorbar\n cbar_ax.outline.set_linewidth(1)\n cbar_ax.outline.set_edgecolor('0.5')\n\n pos_bbox = cbar_ax.ax.get_position()\n pos_bbox.y0 += shrink_cbar\n pos_bbox.y1 -= shrink_cbar\n cbar_ax.ax.set_position(pos_bbox)\n return axis\n\n def _pvalue_heatmap_plot(self, p_values, axes=None, symmetric=False):\n \"\"\"Draws heatmap plot for visualizing statistical test results.\n\n If |symmetric| is enabled, it masks out the upper triangle of the\n p-value table (as it is redundant with the lower triangle).\n \"\"\"\n cmap_colors = ['#005a32', '#238b45', '#a1d99b', '#fbd7d4']\n cmap = colors.ListedColormap(cmap_colors)\n\n # TODO(lszekeres): Add 1 back to this list.\n boundaries = [0, 0.001, 0.01, 0.05]\n norm = colors.BoundaryNorm(boundaries, cmap.N)\n\n if symmetric:\n mask = np.zeros_like(p_values)\n mask[np.triu_indices_from(p_values)] = True\n\n heatmap_args = {\n 'cmap': cmap,\n 'mask': mask if symmetric else None,\n 'fmt': \".3f\",\n 'norm': norm\n }\n\n axis = self._generic_heatmap_plot(p_values, axes, heatmap_args)\n\n cbar_ax = axis.collections[0].colorbar\n cbar_ax.set_ticklabels(['p < 0.001', 'p < 0.01', 'p < 0.05', 'NS'])\n cbar_ax.set_ticks([0.0005, 0.005, 0.03, 0.5])\n cbar_ax.ax.tick_params(size=0)\n return axis\n\n def write_heatmap_plot(self, p_values, image_path, symmetric=False):\n \"\"\"Writes heatmap plot.\"\"\"\n self._write_plot_to_image(self._pvalue_heatmap_plot,\n p_values,\n image_path,\n symmetric=symmetric)\n\n def _a12_heatmap_plot(self, a12_values, axes=None):\n \"\"\"Draws heatmap plot for visualizing effect size results.\n \"\"\"\n\n palette_args = {\n 'h_neg': 12,\n 'h_pos': 128,\n 's': 99,\n 'l': 47,\n 'sep': 20,\n 'as_cmap': True\n }\n\n rdgn = sns.diverging_palette(**palette_args)\n\n heatmap_args = {\n 'cmap': rdgn,\n 'vmin': 0.0,\n 'vmax': 1.0,\n 'square': True,\n 'annot': True,\n 'fmt': \".2f\"\n }\n return self._generic_heatmap_plot(a12_values,\n axes,\n heatmap_args,\n shrink_cbar=0.1)\n\n def write_a12_heatmap_plot(self, a12_values, image_path):\n \"\"\"Writes A12 heatmap plot.\"\"\"\n self._write_plot_to_image(self._a12_heatmap_plot, a12_values,\n image_path)\n\n def write_critical_difference_plot(self, average_ranks, num_of_benchmarks,\n image_path):\n \"\"\"Writes critical difference diagram.\"\"\"\n critical_difference = Orange.evaluation.compute_CD(\n average_ranks.values, num_of_benchmarks)\n\n Orange.evaluation.graph_ranks(average_ranks.values, average_ranks.index,\n critical_difference)\n fig = plt.gcf()\n try:\n fig.savefig(image_path, bbox_inches=\"tight\")\n finally:\n plt.close(fig)\n\n def unique_coverage_ranking_plot(self,\n unique_region_cov_df_combined,\n axes=None):\n \"\"\"Draws unique_coverage_ranking plot. The fuzzer labels will be in\n the order of their coverage.\"\"\"\n\n fuzzer_order = unique_region_cov_df_combined.sort_values(\n by='unique_regions_covered', ascending=False).fuzzer\n\n axes = sns.barplot(y='unique_regions_covered',\n x='fuzzer',\n data=unique_region_cov_df_combined,\n order=fuzzer_order,\n palette=self._fuzzer_colors,\n ax=axes)\n\n for patch in axes.patches:\n axes.annotate(\n format(patch.get_height(), '.0f'),\n (patch.get_x() + patch.get_width() / 2., patch.get_height()),\n ha='center',\n va='center',\n xytext=(0, 10),\n textcoords='offset points')\n\n sns.barplot(y='aggregated_edges_covered',\n x='fuzzer',\n data=unique_region_cov_df_combined,\n order=fuzzer_order,\n facecolor=(1, 1, 1, 0),\n edgecolor='0.2',\n ax=axes)\n\n axes.set(ylabel='Reached unique edge coverage')\n axes.set(xlabel='Fuzzer (highest coverage on the left)')\n axes.set_xticklabels(axes.get_xticklabels(),\n rotation=_DEFAULT_LABEL_ROTATION,\n horizontalalignment='right')\n\n sns.despine(ax=axes, trim=True)\n\n def write_unique_coverage_ranking_plot(self, unique_region_cov_df_combined,\n image_path):\n \"\"\"Writes ranking plot for unique coverage.\"\"\"\n self._write_plot_to_image(self.unique_coverage_ranking_plot,\n unique_region_cov_df_combined,\n image_path,\n wide=True)\n\n def pairwise_unique_coverage_heatmap_plot(self,\n pairwise_unique_coverage_table,\n axes=None):\n \"\"\"Draws the heatmap to visualize the unique coverage between\n each pair of fuzzers.\"\"\"\n heatmap_args = {\n 'annot': True,\n 'fmt': 'd',\n 'cmap': 'Blues',\n 'linewidths': 0.5\n }\n axes = sns.heatmap(pairwise_unique_coverage_table,\n ax=axes,\n **heatmap_args)\n axes.set(ylabel='Not covered by')\n axes.set(xlabel='Covered by')\n\n def write_pairwise_unique_coverage_heatmap_plot(\n self, pairwise_unique_coverage_table, image_path):\n \"\"\"Writes pairwise unique coverage heatmap plot.\"\"\"\n self._write_plot_to_image(self.pairwise_unique_coverage_heatmap_plot,\n pairwise_unique_coverage_table,\n image_path,\n wide=True)\n"
] |
[
[
"matplotlib.colors.BoundaryNorm",
"numpy.arange",
"numpy.triu_indices_from",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.gcf",
"numpy.log10",
"matplotlib.colors.ListedColormap",
"numpy.zeros_like",
"matplotlib.pyplot.close"
]
] |
MrForExample/Weighted-UFC-Fight-Predictor
|
[
"bd5744bfb8fcab4d054a7e038ccc3e854c5fd4fa"
] |
[
"Code/Feature_Engineering/BuildFightData_No_NC_And_DCD.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport calendar\nimport re\nimport datetime\nimport dateutil\nfrom ast import literal_eval\n\nfrom os import path\nimport pickle\nfrom Code.Settings import build_settings as bs\n\ndef init_columns_name(last_words, base_columns):\n new_columns_name = []\n for col in base_columns:\n for i in range(2):\n new_columns_name.append(col + last_words + str(i))\n return new_columns_name\n\n# Define some global average columns, columns that related to other fighter's physical attributes\nstate_data_columns = ['TKO_WIN_%', 'SUB_WIN_%', 'DEC_WIN_%']\nstate_side_columns = ['TKO_LOSS_%', 'SUB_LOSS_%', 'DEC_LOSS_%']\nfighter_dynamic_columns = ['AGE', 'FIGHT_COUNT', 'FIGHT_MINUTE', 'REV_I', 'REV_P', 'STANCE_ORT', 'STANCE_SOU', 'STANCE_SWI']\nfighter_static_columns = ['REACH', 'HEIGHT']\n\nfight_state_data_columns = init_columns_name('_', state_data_columns)\nfight_state_side_columns = init_columns_name('_', state_side_columns)\nfight_fighter_dynamic_columns = init_columns_name('_', fighter_dynamic_columns)\nfight_fighter_static_columns = init_columns_name('_', fighter_static_columns)\nfight_other_data_columns = ['MAX_FIGHT_TIME', 'WOMEN', 'STR', 'FLY', 'BAN', 'FEA', 'LIG', 'WEL', 'MID', 'LIGHEA', 'HEA']\n\nfight_input_columns = fight_state_data_columns + fight_state_side_columns + fight_fighter_dynamic_columns + fight_fighter_static_columns + fight_other_data_columns\n\n# Define weight value columns and they related average and per minute columns\nneed_weight_data_columns = ['KD', 'SIG_STR', 'TOTAL_STR', 'TD', 'SUB_ATT', 'CTRL', 'HEAD', 'BODY', 'LEG', 'DISTANCE', 'CLINCH', 'GROUND']\nneed_weight_side_columns = ['KD_DEF', 'SIG_DEF', 'TOTAL_DEF', 'TD_DEF', 'SUB_ATT_DEF', 'CTRL_DEF', 'HEAD_DEF', 'BODY_DEF', 'LEG_DEF', 'DISTANCE_DEF', 'CLINCH_DEF', 'GROUND_DEF']\n\nfight_weighted_data_columns = init_columns_name('_W_', need_weight_data_columns)\nfight_weighted_side_columns = init_columns_name('_W_', need_weight_side_columns)\nfight_avg_data_columns = init_columns_name('_AVG_', need_weight_data_columns)\nfight_avg_side_columns = init_columns_name('_AVG_', need_weight_side_columns)\nfight_pm_data_columns = init_columns_name('_PM_', need_weight_data_columns)\nfight_pm_side_columns = init_columns_name('_PM_', need_weight_side_columns)\n\nfight_input_columns += fight_weighted_data_columns + fight_weighted_side_columns + fight_avg_data_columns + fight_avg_side_columns + fight_pm_data_columns + fight_pm_side_columns\n\ninit_value = dict.fromkeys(need_weight_data_columns + need_weight_side_columns, 0.5)\nextra_init_value = {'KD': 0, 'KD_DEF': 1, 'SUB_ATT': 0, 'SUB_ATT_DEF': 1}\ninit_value.update(extra_init_value)\n\n# Define training target columns\ntarget_win_columns = ['TKO_WIN_%_D', 'SUB_WIN_%_D', 'DEC_WIN_%_D'] \ntarget_win_columns = init_columns_name('_TAR_', target_win_columns)\ntarget_n_raw_data_columns = ['KD_N', 'CTRL_N', 'REV_N', 'SUB_ATT_N']\ntarget_n_data_columns = init_columns_name('_TAR_', target_n_raw_data_columns)\ntarget_np_raw_data_columns = ['SIG_STR_N', 'SIG_STR_%_P', 'TOTAL_STR_N', 'TOTAL_STR_%_P', 'TD_N', 'TD_%_P', \n 'HEAD_N', 'HEAD_%_P', 'BODY_N', 'BODY_%_P', 'LEG_N', 'LEG_%_P', 'DISTANCE_N', 'DISTANCE_%_P', 'CLINCH_N', 'CLINCH_%_P', 'GROUND_N', 'GROUND_%_P']\ntarget_np_data_columns = init_columns_name('_TAR_', target_np_raw_data_columns)\ntarget_np_data_columns += ['FIGHT_MINUTE_P_TAR_2']\nfight_target_columns = target_win_columns + target_n_data_columns + target_np_data_columns\n\n# Build training data frame\nfight_train_columns = fight_input_columns + fight_target_columns\nfight_train_df = pd.DataFrame(columns=fight_train_columns)\nfight_train_df_path = \"./Data/FightAllTrainTestData.csv\"\n\n# Load reformatted raw data frame\nlist_data_columns = ['KD', 'SIG_STR', 'SIG_STR_%', 'TOTAL_STR', 'TD', 'TD_%', 'SUB_ATT',\n 'REV', 'CTRL', 'HEAD', 'BODY', 'LEG', 'DISTANCE', 'CLINCH', 'GROUND']\nlist_data_columns = init_columns_name('_', list_data_columns)\nlist_data_columns = ['FIGHT_TIME', 'WIN', 'FIGHTER_ID', 'FIGHTER'] + list_data_columns\n\ncolumns_converters = {}\nfor col in list_data_columns:\n columns_converters[col] = literal_eval\n\nfight_reformat_df = pd.read_csv(\"./Data/FightReformatData.csv\", converters=columns_converters, index_col=0)\nfighter_reformat_df = pd.read_csv(\"./Data/FighterReformatData.csv\", index_col=0)\n\nfighter_build_info_path = \"./Data/fighter_build_info.pickle\"\n\ndef build_train_fight_data():\n global fight_train_df\n # Store info for build weighted values: \n # { fighter id: [ { last fight weighted values }, { all last fight numbers per minute }, { average values }, \n # { value update count }, [ all fight count, all fight minute ] ] }\n fighter_build_info = {}\n # Fight data is ordered by date, top to buttom -> Nearest to farthest\n reverse_reformat_df = fight_reformat_df.iloc[::-1]\n for _, reformat_row in reverse_reformat_df.iterrows(): \n new_fight_row = {}\n\n new_fight_row['MAX_FIGHT_TIME'] = reformat_row['FIGHT_TIME'][1]\n # Get fight weight class use one hot encoding\n fight_weight_class = reformat_row['WEIGHT_CLASS'].split(' ')\n new_fight_row['WOMEN'] = 0\n if fight_weight_class[0].upper() == 'WOMEN':\n new_fight_row['WOMEN'] = 1\n del fight_weight_class[0]\n fight_weight_class = ''.join([w.upper()[:3] for w in fight_weight_class])\n for w_col in fight_other_data_columns[2:]:\n new_fight_row[w_col] = int(w_col == fight_weight_class)\n\n for id_i in range(len(reformat_row['FIGHTER_ID'])):\n fighter_id = reformat_row['FIGHTER_ID'][id_i]\n # Init value for calculate weighted, average and per minute value\n if fighter_id not in fighter_build_info:\n fight_weighted_values = {}\n fight_num_per_minute = {}\n fight_avg_value = {}\n fight_value_count = {}\n for i in range(len(need_weight_data_columns)):\n i_w = i * 2\n\n fight_avg_value[fight_avg_data_columns[i_w][:-2]] = init_value[need_weight_data_columns[i]]\n fight_avg_value[fight_avg_side_columns[i_w][:-2]] = init_value[need_weight_side_columns[i]]\n fight_avg_value[fight_pm_data_columns[i_w][:-2]] = 0\n fight_avg_value[fight_pm_side_columns[i_w][:-2]] = 0\n\n fight_weighted_values[fight_weighted_data_columns[i_w][:-2]] = init_value[need_weight_data_columns[i]]\n fight_weighted_values[fight_weighted_side_columns[i_w][:-2]] = init_value[need_weight_side_columns[i]]\n fight_num_per_minute[fight_pm_data_columns[i_w][:-2]] = 0\n fight_num_per_minute[fight_pm_side_columns[i_w][:-2]] = 0\n fight_value_count[need_weight_data_columns[i]] = 0\n fight_value_count[need_weight_side_columns[i]] = 0\n\n for i in range(len(state_data_columns)):\n fight_avg_value[state_data_columns[i]] = 0\n fight_avg_value[state_side_columns[i]] = 0\n\n fight_avg_value['REV_I'] = 0\n fight_avg_value['REV_P'] = 0\n fight_value_count['REV_I'] = 0\n fight_value_count['REV_P'] = 0\n fighter_build_info[fighter_id] = [fight_weighted_values, fight_num_per_minute, fight_avg_value, fight_value_count, [0, 0]]\n\n get_needed_fighter_data(new_fight_row, fighter_id, id_i, reformat_row['DATE'])\n\n new_fight_row['FIGHT_COUNT_' + str(id_i)] = fighter_build_info[fighter_id][4][0]\n new_fight_row['FIGHT_MINUTE_' + str(id_i)] = fighter_build_info[fighter_id][4][1]\n fighter_build_info[fighter_id][4][0] += 1\n fighter_build_info[fighter_id][4][1] += reformat_row['FIGHT_TIME'][0]\n\n # Calculate weighted value, they related average and per minute for both fighter, from all they past fight up to given fight\n for id_i in range(len(reformat_row['FIGHTER_ID'])):\n fighter_id = reformat_row['FIGHTER_ID'][id_i]\n other_id_i = reverse_01(id_i)\n other_fighter_id = reformat_row['FIGHTER_ID'][other_id_i]\n # \n for i in range(len(need_weight_data_columns)):\n i_w = i * 2 + id_i\n other_i_w = i * 2 + other_id_i\n # Store value before update\n get_needed_fight_stats_data(new_fight_row, fighter_build_info, i_w, other_i_w, fighter_id, other_fighter_id)\n\n num_list = reformat_row[need_weight_data_columns[i] + '_' + str(id_i)]\n if len(num_list) > 2 and num_list[1] > 0:\n fighter_build_info[fighter_id][3][need_weight_data_columns[i]] += 1\n fighter_build_info[other_fighter_id][3][need_weight_side_columns[i]] += 1\n\n last_weighted_value = fighter_build_info[fighter_id][0][fight_weighted_data_columns[i_w][:-2]]\n last_side_weighted_value = fighter_build_info[other_fighter_id][0][fight_weighted_side_columns[i_w][:-2]]\n\n now_num_per_minute = num_list[1] / reformat_row['FIGHT_TIME'][0]\n # Accuracy: success / all\n now_target_weighted_value = num_list[0] / num_list[1]\n now_expect_weighted_value = (last_weighted_value + (1 - last_side_weighted_value)) / 2\n max_update_value = now_target_weighted_value - now_expect_weighted_value\n\n all_num_per_minute_i = fighter_build_info[fighter_id][1][fight_pm_data_columns[i_w][:-2]] + now_num_per_minute\n all_num_per_minute_p = fighter_build_info[other_fighter_id][1][fight_pm_side_columns[i_w][:-2]] + now_num_per_minute\n fighter_build_info[fighter_id][1][fight_pm_data_columns[i_w][:-2]] = all_num_per_minute_i\n fighter_build_info[other_fighter_id][1][fight_pm_side_columns[i_w][:-2]] = all_num_per_minute_p\n \n change_percent_i = now_num_per_minute / all_num_per_minute_i\n change_percent_p = now_num_per_minute / all_num_per_minute_p\n\n new_weighted_value = (last_weighted_value + max_update_value) * change_percent_i + (1 - change_percent_i) * last_weighted_value\n new_side_weighted_value = (last_side_weighted_value + max_update_value) * change_percent_p + (1 - change_percent_p) * last_side_weighted_value\n\n fighter_build_info[fighter_id][0][fight_weighted_data_columns[i_w][:-2]] = new_weighted_value\n fighter_build_info[other_fighter_id][0][fight_weighted_side_columns[i_w][:-2]] = new_side_weighted_value\n\n # Calculate average values with respect to fight, e.g average strike and defend accuracy\n now_num_percent = num_list[0] / num_list[1]\n now_avg_percent = get_avg_value(now_num_percent, \n fighter_build_info[fighter_id][2][fight_avg_data_columns[i_w][:-2]], \n fighter_build_info[fighter_id][3][need_weight_data_columns[i]])\n fighter_build_info[fighter_id][2][fight_avg_data_columns[i_w][:-2]] = now_avg_percent\n now_avg_percent = get_avg_value(1 - now_num_percent, \n fighter_build_info[other_fighter_id][2][fight_avg_side_columns[i_w][:-2]], \n fighter_build_info[other_fighter_id][3][need_weight_side_columns[i]])\n fighter_build_info[other_fighter_id][2][fight_avg_side_columns[i_w][:-2]] = now_avg_percent\n # Calculate average number per minute with respect to fight, e.g average strike per minute\n now_avg_num_per_minute = get_avg_value(now_num_per_minute, \n fighter_build_info[fighter_id][2][fight_pm_data_columns[i_w][:-2]], \n fighter_build_info[fighter_id][3][need_weight_data_columns[i]])\n fighter_build_info[fighter_id][2][fight_pm_data_columns[i_w][:-2]] = now_avg_num_per_minute\n now_avg_num_per_minute = get_avg_value(now_num_per_minute, \n fighter_build_info[other_fighter_id][2][fight_pm_side_columns[i_w][:-2]], \n fighter_build_info[other_fighter_id][3][need_weight_side_columns[i]])\n fighter_build_info[other_fighter_id][2][fight_pm_side_columns[i_w][:-2]] = now_avg_num_per_minute\n\n # Calculate average reversal and been reversal number per fight\n new_fight_row['REV_I_' + str(id_i)] = fighter_build_info[fighter_id][2]['REV_I']\n new_fight_row['REV_P_' + str(other_id_i)] = fighter_build_info[other_fighter_id][2]['REV_P']\n\n rev_num_list = reformat_row['REV_' + str(id_i)]\n if len(rev_num_list) > 0:\n rev_num = rev_num_list[0]\n fighter_build_info[fighter_id][3]['REV_I'] += 1\n fighter_build_info[other_fighter_id][3]['REV_P'] += 1\n\n now_avg_rev_i = get_avg_value(rev_num, \n fighter_build_info[fighter_id][2]['REV_I'], \n fighter_build_info[fighter_id][3]['REV_I'])\n fighter_build_info[fighter_id][2]['REV_I'] = now_avg_rev_i\n now_avg_rev_p = get_avg_value(rev_num, \n fighter_build_info[other_fighter_id][2]['REV_P'], \n fighter_build_info[other_fighter_id][3]['REV_P'])\n fighter_build_info[other_fighter_id][2]['REV_P'] = now_avg_rev_p \n\n # Get numerical and percentage target values\n for i in range(len(target_n_raw_data_columns)):\n i_w = i * 2 + id_i\n num_list = reformat_row[target_n_raw_data_columns[i][:-1] + str(id_i)]\n i_n = int(target_n_raw_data_columns[i][:-2] == 'SUB_ATT')\n if len(num_list) > i_n + 1:\n new_fight_row[target_n_data_columns[i_w]] = num_list[i_n] / reformat_row['FIGHT_TIME'][0]\n\n for i in range(0, len(target_np_raw_data_columns), 2):\n i_w = i * 2 + id_i\n i_p = i_w + 2\n num_list = reformat_row[target_np_raw_data_columns[i][:-1] + str(id_i)]\n if len(num_list) > 2 and num_list[1] > 0: \n new_fight_row[target_np_data_columns[i_w]] = num_list[1] / reformat_row['FIGHT_TIME'][0]\n new_fight_row[target_np_data_columns[i_p]] = num_list[0] / num_list[1] \n\n if reformat_row['FIGHT_TIME'][1] >= reformat_row['FIGHT_TIME'][0]: \n new_fight_row['FIGHT_MINUTE_P_TAR_2'] = reformat_row['FIGHT_TIME'][0] / reformat_row['FIGHT_TIME'][1]\n\n # Calculate win, tko, submission, decision average value for both fighter from all they past fight up to given fight (not include)\n id_i = 0\n other_id_i = 1\n fighter_id = reformat_row['FIGHTER_ID'][id_i]\n other_fighter_id = reformat_row['FIGHTER_ID'][other_id_i]\n\n is_win = reformat_row['WIN'][0] == 'W'\n is_draw = reformat_row['WIN'][0] == reformat_row['WIN'][1]\n fight_win_method = 'DCD' if reformat_row['METHOD'] == 'NC' else reformat_row['METHOD']\n all_win_method = ['TKO', 'SUB', 'DC']\n for i in range(len(state_data_columns)):\n i_w = i * 2 + id_i\n other_i_w = i * 2 + other_id_i\n is_method_used = all_win_method[i] in fight_win_method\n # Store value before update\n get_needed_fight_state_data(new_fight_row, fighter_build_info, i, i_w, other_i_w, fighter_id, other_fighter_id)\n\n now_win_value = 1 if is_method_used and is_win else 0.5 if is_method_used and is_draw else 0\n now_other_win_value = 1 if is_method_used and not is_win else 0.5 if is_method_used and is_draw else 0\n now_win_percent = get_avg_value(now_win_value, \n fighter_build_info[fighter_id][2][state_data_columns[i]], \n fighter_build_info[fighter_id][4][0])\n now_loss_percent = get_avg_value(now_other_win_value, \n fighter_build_info[fighter_id][2][state_side_columns[i]], \n fighter_build_info[fighter_id][4][0])\n fighter_build_info[fighter_id][2][state_data_columns[i]] = now_win_percent\n fighter_build_info[fighter_id][2][state_side_columns[i]] = now_loss_percent\n\n now_other_win_percent = get_avg_value(now_other_win_value, \n fighter_build_info[other_fighter_id][2][state_data_columns[i]], \n fighter_build_info[other_fighter_id][4][0])\n now_other_loss_percent = get_avg_value(now_win_value, \n fighter_build_info[other_fighter_id][2][state_side_columns[i]], \n fighter_build_info[other_fighter_id][4][0])\n fighter_build_info[other_fighter_id][2][state_data_columns[i]] = now_other_win_percent \n fighter_build_info[other_fighter_id][2][state_side_columns[i]] = now_other_loss_percent \n\n new_fight_row[target_win_columns[i_w]] = now_win_value\n new_fight_row[target_win_columns[other_i_w]] = now_other_win_value\n\n fight_train_df = fight_train_df.append(new_fight_row, ignore_index=True)\n \n with open(fighter_build_info_path, 'wb') as handle:\n pickle.dump(fighter_build_info, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n fight_train_df.to_csv(fight_train_df_path)\n\ndef get_needed_fighter_data(new_row, fighter_id, id_i, fight_date, fighter_age=np.nan):\n fighter_reformat_row = fighter_reformat_df[fighter_reformat_df['FIGHTER_ID'] == fighter_id].squeeze()\n # Get fighter stance use one hot encoding\n for s in ['ORT', 'SOU', 'SWI']:\n new_row['STANCE_' + s + '_' + str(id_i)] = int(str(fighter_reformat_row['STANCE']) == s)\n # Get fighter reach and height for fight input value\n for i in range(len(fighter_static_columns)):\n i_w = i * 2 + id_i\n new_row[fight_fighter_static_columns[i_w]] = fighter_reformat_row[fighter_static_columns[i]]\n # Get fighter age according to date of born and fight date\n pattern = re.compile(r\"[0-9\\-]+\")\n\n if np.isnan(fighter_age) and bool(pattern.match(str(fighter_reformat_row['DOB']))) and bool(pattern.match(str(fight_date))):\n dob = pd.to_datetime(fighter_reformat_row['DOB'])\n date = pd.to_datetime(fight_date)\n time_diff = dateutil.relativedelta.relativedelta(date, dob)\n fighter_age = time_diff.years + (time_diff.months + time_diff.days / 31) / 12\n new_row['AGE_' + str(id_i)] = fighter_age\n\ndef get_needed_fight_stats_data(new_row, fighter_build_info, i_w, other_i_w, fighter_id, other_fighter_id):\n # Store stats value for model input\n new_row[fight_weighted_data_columns[i_w]] = fighter_build_info[fighter_id][0][fight_weighted_data_columns[i_w][:-2]]\n new_row[fight_weighted_side_columns[other_i_w]] = fighter_build_info[other_fighter_id][0][fight_weighted_side_columns[i_w][:-2]]\n new_row[fight_avg_data_columns[i_w]] = fighter_build_info[fighter_id][2][fight_avg_data_columns[i_w][:-2]]\n new_row[fight_avg_side_columns[other_i_w]] = fighter_build_info[other_fighter_id][2][fight_avg_side_columns[i_w][:-2]]\n new_row[fight_pm_data_columns[i_w]] = fighter_build_info[fighter_id][2][fight_pm_data_columns[i_w][:-2]]\n new_row[fight_pm_side_columns[other_i_w]] = fighter_build_info[other_fighter_id][2][fight_pm_side_columns[i_w][:-2]]\n\ndef get_needed_fight_state_data(new_row, fighter_build_info, i, i_w, other_i_w, fighter_id, other_fighter_id):\n new_row[fight_state_data_columns[i_w]] = fighter_build_info[fighter_id][2][state_data_columns[i]]\n new_row[fight_state_side_columns[i_w]] = fighter_build_info[fighter_id][2][state_side_columns[i]]\n new_row[fight_state_data_columns[other_i_w]] = fighter_build_info[other_fighter_id][2][state_data_columns[i]]\n new_row[fight_state_side_columns[other_i_w]] = fighter_build_info[other_fighter_id][2][state_side_columns[i]]\n\ndef reverse_01(i):\n return (i + 1) % 2\n\ndef get_avg_value(new_value, old_avg_value, count):\n return old_avg_value + (new_value - old_avg_value) / count\n\ndef normalize_df_input(df):\n print(\"Normalize df input\\n\")\n fighter_data_columns = fighter_dynamic_columns[:5] + fighter_static_columns\n fight_fighter_data_columns = init_columns_name('_', fighter_data_columns)\n fight_need_normalize_columns = fight_fighter_data_columns + fight_pm_data_columns + fight_pm_side_columns + fight_other_data_columns[:1]\n\n # Normalize some columns of data frame\n normalize_parameters = {}\n parameters_path = './Data/normalize_parameters.pickle'\n if path.exists(parameters_path):\n with open(parameters_path, 'rb') as handle:\n normalize_parameters = pickle.load(handle)\n else :\n for n_col in fight_need_normalize_columns:\n parameters = []\n parameters.append(df[n_col].mean())\n parameters.append(df[n_col].max() - df[n_col].min())\n normalize_parameters[n_col] = parameters\n\n with open(parameters_path, 'wb') as handle:\n pickle.dump(normalize_parameters, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n for row_index, train_row in df.iterrows(): \n for n_col in normalize_parameters:\n df.loc[row_index, n_col] = mean_normalization(train_row[n_col], normalize_parameters[n_col][0], normalize_parameters[n_col][1])\n\n for n_col in normalize_parameters:\n df[n_col] = df[n_col].fillna(0) \n\ndef mean_normalization(x, mean, val_range):\n return (x - mean) / val_range\n\n# Uniform sample data in chunks, chunks is divided respect to time of fight\ndef df_sample():\n test_dataset = pd.DataFrame(columns=fight_train_df.columns)\n if bs.divide_by_chunks:\n print(\"Splite train/test dataset using chunks uniform sample\")\n df_chunks = split_dataframe(fight_train_df, bs.chunk_size)\n for df_chunk in df_chunks:\n test_dataset = test_dataset.append(df_chunk.sample(frac=bs.test_set_frac), ignore_index=True)\n else :\n print(\"Splite train/test dataset using uniform sample\")\n test_dataset = fight_train_df.sample(frac=bs.test_set_frac)\n train_dataset = fight_train_df.drop(test_dataset.index)\n return train_dataset, test_dataset\n\n# input - df: a Dataframe, chunkSize: the chunk size\n# output - a list of DataFrame\n# purpose - splits the DataFrame into smaller chunks\ndef split_dataframe(df, chunk_size = 100): \n chunks = list()\n num_chunks = len(df) // chunk_size + 1\n for i in range(num_chunks):\n chunks.append(df[i*chunk_size:(i+1)*chunk_size])\n return chunks\n\ndef build_and_process_train_fight_data():\n print(\"Build and process train fight data\\n\")\n global fight_train_df\n if path.exists(fight_train_df_path):\n fight_train_df = pd.read_csv(fight_train_df_path, index_col=0)\n else :\n build_train_fight_data()\n \n fight_train_df = double_and_switch_df_input(fight_train_df)\n normalize_df_input(fight_train_df)\n\n train_dataset, test_dataset = df_sample()\n train_dataset.to_csv(\"./Data/FightAllTrainData.csv\")\n test_dataset.to_csv(\"./Data/FightAllTestData.csv\")\n\ndef double_and_switch_df_input(dataset):\n print(\"Double and switch df input\\n\")\n # In order to train the model that treat two fighter's feature irrelevant to their input position, \n # we double the input row and switch the all paired data: 0->1; 1->0\n mirror_dataset = pd.DataFrame(columns=dataset.columns)\n for _, train_row in dataset.iterrows(): \n new_mirror_row = {}\n for i in range(len(train_row.index)):\n new_col = col = train_row.index[i]\n if col[-2:] == '_0':\n new_col = col[:-2] + '_1'\n elif col[-2:] == '_1':\n new_col = col[:-2] + '_0'\n new_mirror_row[new_col] = train_row[col]\n mirror_dataset = mirror_dataset.append(new_mirror_row, ignore_index=True)\n \n return dataset.append(mirror_dataset, ignore_index=True)\n\ndef build_and_process_predict_fight_data():\n print(\"Build and process predict fight data\\n\")\n if path.exists(fighter_build_info_path):\n with open(fighter_build_info_path, 'rb') as handle:\n fighter_build_info = pickle.load(handle)\n\n need_predict_df = pd.DataFrame(columns=fight_input_columns)\n\n fighter_id_pairs = list(zip(bs.fighter_0_ids, bs.fighter_1_ids))\n all_need_predict_num = len(fighter_id_pairs)\n fighter_0_ages = bs.fighter_0_ages + [np.nan] * max(all_need_predict_num - len(bs.fighter_0_ages), 0)\n fighter_1_ages = bs.fighter_1_ages + [np.nan] * max(all_need_predict_num - len(bs.fighter_1_ages), 0)\n fighter_age_pairs = list(zip(fighter_0_ages, fighter_1_ages))\n for fp_i in range(all_need_predict_num):\n new_need_predict_row = {}\n\n for id_i in range(2):\n fighter_id = fighter_id_pairs[fp_i][id_i]\n other_id_i = reverse_01(id_i)\n other_fighter_id = fighter_id_pairs[fp_i][other_id_i]\n\n new_need_predict_row['WOMEN'] = int(not bs.is_man)\n fight_weight_class = bs.fight_weights[fp_i]\n for w_col in fight_other_data_columns[2:]:\n new_need_predict_row[w_col] = int(w_col == fight_weight_class)\n\n fight_date = bs.fight_date\n if fight_date is None:\n fight_date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n get_needed_fighter_data(new_need_predict_row, fighter_id, id_i, fight_date, fighter_age_pairs[fp_i][id_i])\n\n new_need_predict_row['FIGHT_COUNT_' + str(id_i)] = fighter_build_info[fighter_id][4][0]\n new_need_predict_row['FIGHT_MINUTE_' + str(id_i)] = fighter_build_info[fighter_id][4][1]\n\n for i in range(len(need_weight_data_columns)):\n i_w = i * 2 + id_i\n other_i_w = i * 2 + other_id_i\n get_needed_fight_stats_data(new_need_predict_row, fighter_build_info, i_w, other_i_w, fighter_id, other_fighter_id)\n\n new_need_predict_row['REV_I_' + str(id_i)] = fighter_build_info[fighter_id][2]['REV_I']\n new_need_predict_row['REV_P_' + str(other_id_i)] = fighter_build_info[other_fighter_id][2]['REV_P']\n\n for i in range(len(state_data_columns)):\n i_w = i * 2 + id_i\n other_i_w = i * 2 + other_id_i\n get_needed_fight_state_data(new_need_predict_row, fighter_build_info, i, i_w, other_i_w, fighter_id, other_fighter_id)\n\n need_predict_df = need_predict_df.append(new_need_predict_row, ignore_index=True)\n \n normalize_df_input(need_predict_df)\n \n # Add old need predict fight if exist\n fight_need_predict_path = \"./Data/FightNeedPredictData.csv\"\n if path.exists(fight_need_predict_path):\n pre_need_predict_df = pd.read_csv(fight_need_predict_path, index_col=0)\n need_predict_df = need_predict_df.append(pre_need_predict_df, ignore_index=True)\n\n need_predict_df.to_csv(fight_need_predict_path)\n\nif __name__ == \"__main__\":\n if bs.is_build_train:\n build_and_process_train_fight_data()\n else:\n build_and_process_predict_fight_data()"
] |
[
[
"numpy.isnan",
"pandas.read_csv",
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
LorenFrankLab/nwb_datajoint
|
[
"ef50fe23f71fe8f44ce6c878f863de51a49478cb"
] |
[
"src/nwb_datajoint/common/common_ephys.py"
] |
[
"import re\nimport warnings\n\nimport datajoint as dj\nimport numpy as np\nimport pynwb\n\nfrom .common_device import Probe # noqa: F401\nfrom .common_filter import FirFilter\nfrom .common_interval import IntervalList, interval_list_censor, interval_list_intersect # noqa: F401\n# SortInterval, interval_list_intersect, interval_list_excludes_ind\nfrom .common_nwbfile import AnalysisNwbfile, Nwbfile\nfrom .common_region import BrainRegion # noqa: F401\nfrom .common_session import Session # noqa: F401\nfrom .dj_helper_fn import fetch_nwb # dj_replace\nfrom .nwb_helper_fn import (estimate_sampling_rate, get_data_interface,\n get_electrode_indices, get_nwb_file,\n get_valid_intervals)\n\nschema = dj.schema('common_ephys')\n\n\n@schema\nclass ElectrodeGroup(dj.Imported):\n definition = \"\"\"\n # Grouping of electrodes corresponding to a physical probe.\n -> Session\n electrode_group_name: varchar(80) # electrode group name from NWBFile\n ---\n -> BrainRegion\n -> Probe\n description: varchar(80) # description of electrode group\n target_hemisphere: enum('Right','Left')\n \"\"\"\n\n def make(self, key):\n nwb_file_name = key['nwb_file_name']\n nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)\n nwbf = get_nwb_file(nwb_file_abspath)\n # fill in the groups\n egroups = list(nwbf.electrode_groups.keys())\n\n for eg_name in egroups:\n # for each electrode group, we get the group and add an electrode group entry.\n # as the ElectrodeGroup\n electrode_group = nwbf.get_electrode_group(eg_name)\n key['electrode_group_name'] = eg_name\n # check to see if the location is listed in the region.BrainRegion schema, and if not add it\n region_dict = dict()\n region_dict['region_name'] = electrode_group.location\n region_dict['subregion_name'] = ''\n region_dict['subsubregion_name'] = ''\n query = BrainRegion() & region_dict\n if len(query) == 0:\n # this region isn't in the list, so add it\n BrainRegion().insert1(region_dict)\n query = BrainRegion() & region_dict\n # we also need to get the region_id for this new region or find the right region_id\n region_id_dict = query.fetch1()\n key['region_id'] = region_id_dict['region_id']\n key['description'] = electrode_group.description\n # the following should probably be a function that returns the probe devices from the file\n # TODO check and replace this with\n # if isinstance(electrode_group.device, ndx_franklab_novela.Probe):\n # key['probe_type'] = electrode_group.device.probe_type\n # else:\n # key['probe_type'] = 'unknown-probe-type'\n probe_re = re.compile(\"probe\")\n for d in nwbf.devices:\n if probe_re.search(d):\n if nwbf.devices[d] == electrode_group.device:\n # this will match the entry in the device schema\n key['probe_type'] = electrode_group.device.probe_type\n break\n if 'probe_type' not in key:\n key['probe_type'] = 'unknown-probe-type'\n self.insert1(key, skip_duplicates=True)\n\n\n@schema\nclass Electrode(dj.Imported):\n definition = \"\"\"\n -> ElectrodeGroup\n electrode_id: int # the unique number for this electrode\n ---\n -> Probe.Electrode\n -> BrainRegion\n name='': varchar(80) # unique label for each contact\n original_reference_electrode=-1: int # the configured reference electrode for this electrode\n x=NULL: float # the x coordinate of the electrode position in the brain\n y=NULL: float # the y coordinate of the electrode position in the brain\n z=NULL: float # the z coordinate of the electrode position in the brain\n filtering: varchar(200) # description of the signal filtering\n impedance=null: float # electrode impedance\n bad_channel: enum(\"True\",\"False\") # if electrode is 'good' or 'bad' as observed during recording\n x_warped=NULL: float # x coordinate of electrode position warped to common template brain\n y_warped=NULL: float # y coordinate of electrode position warped to common template brain\n z_warped=NULL: float # z coordinate of electrode position warped to common template brain\n contacts: varchar(80) # label of electrode contacts used for a bipolar signal -- current workaround\n \"\"\"\n\n def make(self, key):\n nwb_file_name = key['nwb_file_name']\n nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)\n nwbf = get_nwb_file(nwb_file_abspath)\n # create the table of electrodes\n electrodes = nwbf.electrodes.to_dataframe()\n\n # Below it would be better to find the mapping between\n # nwbf.electrodes.colnames and the schema fields and\n # where possible, assign automatically. It would also help to be\n # robust to missing fields and have them\n # assigned as empty if they don't exist in the nwb file in case\n # people are not using our column names.\n\n for elect in electrodes.iterrows():\n key['electrode_group_name'] = elect[1].group_name\n key['electrode_id'] = elect[0]\n key['name'] = str(elect[0])\n key['probe_type'] = elect[1].group.device.probe_type\n key['probe_shank'] = elect[1].probe_shank\n key['probe_electrode'] = elect[1].probe_electrode\n key['bad_channel'] = 'True' if elect[1].bad_channel else 'False'\n # look up the region\n region_dict = dict()\n region_dict['region_name'] = elect[1].group.location\n region_dict['subregion_name'] = ''\n region_dict['subsubregion_name'] = ''\n key['region_id'] = (\n BrainRegion() & region_dict).fetch1('region_id')\n key['x'] = elect[1].x\n key['y'] = elect[1].y\n key['z'] = elect[1].z\n key['x_warped'] = 0\n key['y_warped'] = 0\n key['z_warped'] = 0\n key['contacts'] = ''\n key['filtering'] = elect[1].filtering\n key['impedance'] = elect[1].imp\n try:\n key['original_reference_electrode'] = elect[1].ref_elect_id\n except Exception: # TODO: use more precise error check\n key['original_reference_electrode'] = -1\n self.insert1(key, skip_duplicates=True)\n\n\n@schema\nclass Raw(dj.Imported):\n definition = \"\"\"\n # Raw voltage timeseries data, ElectricalSeries in NWB.\n -> Session\n ---\n -> IntervalList\n raw_object_id: varchar(80) # the NWB object ID for loading this object from the file\n sampling_rate: float # Sampling rate calculated from data, in Hz\n comments: varchar(80)\n description: varchar(80)\n \"\"\"\n\n def make(self, key):\n nwb_file_name = key['nwb_file_name']\n nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)\n nwbf = get_nwb_file(nwb_file_abspath)\n raw_interval_name = \"raw data valid times\"\n # get the acquisition object\n # TODO this assumes there is a single item in NWBFile.acquisition\n try:\n rawdata = nwbf.get_acquisition()\n assert isinstance(rawdata, pynwb.ecephys.ElectricalSeries)\n except Exception: # TODO: use more precise error check\n warnings.warn(\n f'WARNING: Unable to get acquisition object in: {nwb_file_abspath}')\n return\n print('Estimating sampling rate...')\n # NOTE: Only use first 1e6 timepoints to save time\n sampling_rate = estimate_sampling_rate(\n np.asarray(rawdata.timestamps[:int(1e6)]), 1.5)\n print(f'Estimated sampling rate: {sampling_rate}')\n key['sampling_rate'] = sampling_rate\n\n interval_dict = dict()\n interval_dict['nwb_file_name'] = key['nwb_file_name']\n interval_dict['interval_list_name'] = raw_interval_name\n # get the list of valid times given the specified sampling rate.\n interval_dict['valid_times'] = get_valid_intervals(np.asarray(rawdata.timestamps), key['sampling_rate'],\n 1.75, 0)\n IntervalList().insert1(interval_dict, skip_duplicates=True)\n\n # now insert each of the electrodes as an individual row, but with the same nwb_object_id\n key['raw_object_id'] = rawdata.object_id\n key['sampling_rate'] = sampling_rate\n print(\n f'Importing raw data: Estimated sampling rate:\\t{key[\"sampling_rate\"]} Hz')\n print(\n f' Number of valid intervals:\\t{len(interval_dict[\"valid_times\"])}')\n key['interval_list_name'] = raw_interval_name\n key['comments'] = rawdata.comments\n key['description'] = rawdata.description\n self.insert1(key, skip_duplicates=True)\n\n def nwb_object(self, key):\n # TODO return the nwb_object; FIX: this should be replaced with a fetch call. Note that we're using the raw file\n # so we can modify the other one.\n nwb_file_name = key['nwb_file_name']\n nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)\n nwbf = get_nwb_file(nwb_file_abspath)\n raw_object_id = (self & {'nwb_file_name': key['nwb_file_name']}).fetch1(\n 'raw_object_id')\n return nwbf.objects[raw_object_id]\n\n def fetch_nwb(self, *attrs, **kwargs):\n return fetch_nwb(self, (Nwbfile, 'nwb_file_abs_path'), *attrs, **kwargs)\n\n\n@schema\nclass SampleCount(dj.Imported):\n definition = \"\"\"\n # Sample count :s timestamp timeseries\n -> Session\n ---\n sample_count_object_id: varchar(40) # the NWB object ID for loading this object from the file\n \"\"\"\n\n def make(self, key):\n nwb_file_name = key['nwb_file_name']\n nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name)\n nwbf = get_nwb_file(nwb_file_abspath)\n # get the sample count object\n # TODO: change name when nwb file is changed\n sample_count = get_data_interface(nwbf, 'sample_count')\n if sample_count is None:\n warnings.warn(\n f'Unable to get sample count object in: {nwb_file_abspath}')\n return\n key['sample_count_object_id'] = sample_count.object_id\n\n def fetch_nwb(self, *attrs, **kwargs):\n return fetch_nwb(self, (Nwbfile, 'nwb_file_abs_path'), *attrs, **kwargs)\n\n\n@schema\nclass LFPSelection(dj.Manual):\n definition = \"\"\"\n -> Session\n \"\"\"\n\n class LFPElectrode(dj.Part):\n definition = \"\"\"\n -> master\n -> Electrode\n \"\"\"\n\n def set_lfp_electrodes(self, nwb_file_name, electrode_list):\n '''\n Removes all electrodes for the specified nwb file and then adds back the electrodes in the list\n :param nwb_file_name: string - the name of the nwb file for the desired session\n :param electrode_list: list of electrodes to be used for LFP\n :return:\n '''\n # remove the session and then recreate the session and Electrode list\n (LFPSelection() & {'nwb_file_name': nwb_file_name}).delete()\n # check to see if the user allowed the deletion\n if len((LFPSelection() & {'nwb_file_name': nwb_file_name}).fetch()) == 0:\n LFPSelection().insert1({'nwb_file_name': nwb_file_name})\n\n # TO DO: do this in a better way\n all_electrodes = (Electrode() & {'nwb_file_name' : nwb_file_name}).fetch(as_dict=True)\n primary_key = Electrode.primary_key\n for e in all_electrodes:\n # create a dictionary so we can insert new elects\n if e['electrode_id'] in electrode_list:\n lfpelectdict = {k: v for k,\n v in e.items() if k in primary_key}\n LFPSelection().LFPElectrode.insert1(lfpelectdict, replace=True)\n\n\n@schema\nclass LFP(dj.Imported):\n definition = \"\"\"\n -> LFPSelection\n ---\n -> IntervalList # the valid intervals for the data\n -> FirFilter # the filter used for the data\n -> AnalysisNwbfile # the name of the nwb file with the lfp data\n lfp_object_id: varchar(80) # the NWB object ID for loading this object from the file\n lfp_sampling_rate: float # the sampling rate, in HZ\n \"\"\"\n\n def make(self, key):\n # get the NWB object with the data; FIX: change to fetch with additional infrastructure\n rawdata = Raw().nwb_object(key)\n sampling_rate, interval_list_name = (Raw() & key).fetch1(\n 'sampling_rate', 'interval_list_name')\n sampling_rate = int(np.round(sampling_rate))\n\n valid_times = (IntervalList() & {'nwb_file_name': key['nwb_file_name'],\n 'interval_list_name': interval_list_name}).fetch1('valid_times')\n # keep only the intervals > 1 second long\n min_interval_length = 1.0\n valid = []\n for count, interval in enumerate(valid_times):\n if interval[1] - interval[0] > min_interval_length:\n valid.append(count)\n valid_times = valid_times[valid] \n print(f'LFP: found {len(valid)} of {count+1} intervals > {min_interval_length} sec long.') \n \n # target 1 KHz sampling rate\n decimation = sampling_rate // 1000\n\n # get the LFP filter that matches the raw data\n filter = (FirFilter() & {'filter_name': 'LFP 0-400 Hz'} &\n {'filter_sampling_rate': sampling_rate}).fetch(as_dict=True)\n\n # there should only be one filter that matches, so we take the first of the dictionaries\n key['filter_name'] = filter[0]['filter_name']\n key['filter_sampling_rate'] = filter[0]['filter_sampling_rate']\n\n filter_coeff = filter[0]['filter_coeff']\n if len(filter_coeff) == 0:\n print(\n f'Error in LFP: no filter found with data sampling rate of {sampling_rate}')\n return None\n # get the list of selected LFP Channels from LFPElectrode\n electrode_keys = (LFPSelection.LFPElectrode & key).fetch('KEY')\n electrode_id_list = list(k['electrode_id'] for k in electrode_keys)\n\n lfp_file_name = AnalysisNwbfile().create(key['nwb_file_name'])\n\n lfp_file_abspath = AnalysisNwbfile().get_abs_path(lfp_file_name)\n lfp_object_id, timestamp_interval = FirFilter().filter_data_nwb(lfp_file_abspath, rawdata,\n filter_coeff, valid_times, electrode_id_list, decimation)\n\n # now that the LFP is filtered and in the file, add the file to the AnalysisNwbfile table \n AnalysisNwbfile().add(key['nwb_file_name'], lfp_file_name)\n\n key['analysis_file_name'] = lfp_file_name\n key['lfp_object_id'] = lfp_object_id\n key['lfp_sampling_rate'] = sampling_rate // decimation\n\n # finally, we need to censor the valid times to account for the downsampling\n lfp_valid_times = interval_list_censor(valid_times, timestamp_interval)\n # add an interval list for the LFP valid times, skipping duplicates\n key['interval_list_name'] = 'lfp valid times'\n IntervalList.insert1({'nwb_file_name': key['nwb_file_name'],\n 'interval_list_name': key['interval_list_name'], \n 'valid_times': lfp_valid_times}, replace=True)\n self.insert1(key)\n\n def nwb_object(self, key):\n # return the nwb_object.\n lfp_file_name = (LFP() & {'nwb_file_name': key['nwb_file_name']}).fetch1(\n 'analysis_file_name')\n lfp_file_abspath = AnalysisNwbfile().get_abs_path(lfp_file_name)\n nwbf = get_nwb_file(lfp_file_abspath)\n # get the object id\n nwb_object_id = (self & {'analysis_file_name': lfp_file_name}).fetch1(\n 'lfp_object_id')\n return nwbf.objects[nwb_object_id]\n\n def fetch_nwb(self, *attrs, **kwargs):\n return fetch_nwb(self, (AnalysisNwbfile, 'analysis_file_abs_path'), *attrs, **kwargs)\n\n\n@schema\nclass LFPBandSelection(dj.Manual):\n definition = \"\"\"\n -> LFP\n -> FirFilter # the filter to use for the data\n -> IntervalList.proj(target_interval_list_name='interval_list_name') # the original set of times to be filtered\n lfp_band_sampling_rate: int # the sampling rate for this band\n ---\n min_interval_len=1.0 : float #the minimum length of a valid interval to filter\n \"\"\"\n\n class LFPBandElectrode(dj.Part):\n definition = \"\"\"\n -> master\n -> LFPSelection.LFPElectrode # the LFP electrode to be filtered LFP\n reference_elect_id = -1: int # the reference electrode to use; -1 for no reference\n ---\n \"\"\"\n\n def set_lfp_band_electrodes(self, nwb_file_name, electrode_list, filter_name, interval_list_name,\n reference_electrode_list, lfp_band_sampling_rate):\n '''\n Adds an entry for each electrode in the electrode_list with the specified filter, interval_list, and\n reference electrode.\n Also removes any entries that have the same filter, interval list and reference electrode but are not\n in the electrode_list.\n :param nwb_file_name: string - the name of the nwb file for the desired session\n :param electrode_list: list of LFP electrodes to be filtered\n :param filter_name: the name of the filter (from the FirFilter schema)\n :param interval_name: the name of the interval list (from the IntervalList schema)\n :param reference_electrode_list: A single electrode id corresponding to the reference to use for all\n electrodes or a list with one element per entry in the electrode_list\n :param lfp_band_sampling_rate: The output sampling rate to be used for the filtered data; must be an\n integer divisor of the LFP sampling rate\n :return: none\n '''\n # Error checks on parameters\n # electrode_list\n available_electrodes = (LFPSelection().LFPElectrode() & {\n 'nwb_file_name': nwb_file_name}).fetch('electrode_id')\n if not np.all(np.isin(electrode_list, available_electrodes)):\n raise ValueError(\n 'All elements in electrode_list must be valid electrode_ids in the LFPSelection table')\n # sampling rate\n lfp_sampling_rate = (LFP() & {'nwb_file_name': nwb_file_name}).fetch1(\n 'lfp_sampling_rate')\n decimation = lfp_sampling_rate // lfp_band_sampling_rate\n if lfp_sampling_rate // decimation != lfp_band_sampling_rate:\n raise ValueError(f'lfp_band_sampling rate {lfp_band_sampling_rate} is not an integer divisor of lfp '\n f'samping rate {lfp_sampling_rate}')\n # filter\n if not len((FirFilter() & {'filter_name': filter_name, 'filter_sampling_rate': lfp_sampling_rate}).fetch()):\n raise ValueError(\n f'filter {filter_name}, sampling rate {lfp_sampling_rate}is not in the FirFilter table')\n # interval_list\n if not len((IntervalList() & {'nwb_file_name': nwb_file_name, 'interval_name': interval_list_name}).fetch()):\n raise ValueError(f'interval list {interval_list_name} is not in the IntervalList table; the list must be '\n 'added before this function is called')\n # reference_electrode_list\n if len(reference_electrode_list) != 1 and len(reference_electrode_list) != len(electrode_list):\n raise ValueError(\n 'reference_electrode_list must contain either 1 or len(electrode_list) elements')\n # add a -1 element to the list to allow for the no reference option\n available_electrodes = np.append(available_electrodes, [-1])\n if not np.all(np.isin(reference_electrode_list, available_electrodes)):\n raise ValueError('All elements in reference_electrode_list must be valid electrode_ids in the LFPSelection '\n 'table')\n\n # make a list of all the references\n ref_list = np.zeros((len(electrode_list),))\n ref_list[:] = reference_electrode_list\n\n key = dict()\n key['nwb_file_name'] = nwb_file_name\n key['filter_name'] = filter_name\n key['filter_sampling_rate'] = lfp_sampling_rate\n key['target_interval_list_name'] = interval_list_name\n key['lfp_band_sampling_rate'] = lfp_sampling_rate // decimation\n # insert an entry into the main LFPBandSelectionTable\n self.insert1(key, skip_duplicates=True)\n\n # get all of the current entries and delete any that are not in the list\n elect_id, ref_id = (self.LFPBandElectrode() & key).fetch(\n 'electrode_id', 'reference_elect_id')\n for e, r in zip(elect_id, ref_id):\n if not len(np.where((electrode_list == e) & (ref_list == r))[0]):\n key['electrode_id'] = e\n key['reference_elect_id'] = r\n (self.LFPBandElectrode() & key).delete()\n\n # iterate through all of the new elements and add them\n for e, r in zip(electrode_list, ref_list):\n key['electrode_id'] = e\n key['electrode_group_name'] = (\n Electrode & {'nwb_file_name': nwb_file_name, 'electrode_id': e}).fetch1('electrode_group_name')\n key['reference_elect_id'] = r\n self.LFPBandElectrode().insert1(key, skip_duplicates=True)\n\n\n@schema\nclass LFPBand(dj.Computed):\n definition = \"\"\"\n -> LFPBandSelection\n ---\n -> AnalysisNwbfile\n -> IntervalList\n filtered_data_object_id: varchar(80) # the NWB object ID for loading this object from the file\n \"\"\"\n\n def make(self, key):\n # get the NWB object with the lfp data; FIX: change to fetch with additional infrastructure\n lfp_object = (LFP() & {'nwb_file_name': key['nwb_file_name']}).fetch_nwb()[\n 0]['lfp']\n\n # load all the data to speed filtering\n lfp_data = np.asarray(\n lfp_object.data, dtype=type(lfp_object.data[0][0]))\n # lfp_timestamps = np.asarray(lfp_object.timestamps, dtype=type(lfp_object.timestamps[0]))\n\n # get the electrodes to be filtered and their references\n lfp_band_elect_id, lfp_band_ref_id = (LFPBandSelection().LFPBandElectrode() & key).fetch('electrode_id',\n 'reference_elect_id')\n\n # get the indices of the electrodes to be filtered and the references\n lfp_band_elect_index = get_electrode_indices(\n lfp_object, lfp_band_elect_id)\n lfp_band_ref_index = get_electrode_indices(lfp_object, lfp_band_ref_id)\n\n # subtract off the references for the selected channels\n for index, elect_index in enumerate(lfp_band_elect_index):\n if lfp_band_ref_id[index] != -1:\n lfp_data[:, elect_index] = lfp_data[:, elect_index] - \\\n lfp_data[:, lfp_band_ref_index]\n\n lfp_sampling_rate = (LFP() & {'nwb_file_name': key['nwb_file_name']}).fetch1(\n 'lfp_sampling_rate')\n interval_list_name, lfp_band_sampling_rate = (LFPBandSelection() & key).fetch1('target_interval_list_name',\n 'lfp_band_sampling_rate')\n valid_times = (IntervalList() & {'nwb_file_name' : key['nwb_file_name'],\n 'interval_list_name': interval_list_name}).fetch1('valid_times')\n # the valid_times for this interval may be slightly beyond the valid times for the lfp itself, so we have to intersect the two\n lfp_interval_list = (LFP() & {'nwb_file_name': key['nwb_file_name']}).fetch1('interval_list_name')\n lfp_valid_times = (IntervalList() & {'nwb_file_name': key['nwb_file_name'], \n 'interval_list_name': lfp_interval_list}).fetch1('valid_times')\n min_length = (LFPBandSelection & key).fetch1('min_interval_len')\n lfp_band_valid_times = interval_list_intersect(valid_times, lfp_valid_times, min_length=min_length)\n filter_name, filter_sampling_rate, lfp_band_sampling_rate = (LFPBandSelection() & key).fetch1(\n 'filter_name', 'filter_sampling_rate', 'lfp_band_sampling_rate')\n\n decimation = int(lfp_sampling_rate) // lfp_band_sampling_rate\n\n # get the LFP filter that matches the raw data\n filter = (FirFilter() & {'filter_name': filter_name} &\n {'filter_sampling_rate': filter_sampling_rate}).fetch(as_dict=True)\n if len(filter) == 0:\n raise ValueError(f'Filter {filter_name} and sampling_rate {lfp_band_sampling_rate} does not exit in the '\n 'FirFilter table')\n\n filter_coeff = filter[0]['filter_coeff']\n if len(filter_coeff) == 0:\n print(\n f'Error in LFPBand: no filter found with data sampling rate of {lfp_band_sampling_rate}')\n return None\n\n # create the analysis nwb file to store the results.\n lfp_band_file_name = AnalysisNwbfile().create(key['nwb_file_name'])\n lfp_band_file_abspath = AnalysisNwbfile().get_abs_path(lfp_band_file_name)\n # filter the data and write to an the nwb file\n filtered_data_object_id, timestamp_interval = FirFilter().filter_data_nwb(lfp_band_file_abspath, lfp_object, filter_coeff,\n lfp_band_valid_times, lfp_band_elect_id, decimation)\n\n # now that the LFP is filtered and in the file, add the file to the AnalysisNwbfile table \n AnalysisNwbfile().add(key['nwb_file_name'], lfp_band_file_name) \n key['analysis_file_name'] = lfp_band_file_name\n key['filtered_data_object_id'] = filtered_data_object_id\n\n # finally, we need to censor the valid times to account for the downsampling if this is the first time we've downsampled these data\n key['interval_list_name'] = interval_list_name + ' lfp band ' + str(lfp_band_sampling_rate) + 'Hz'\n tmp_valid_times = (IntervalList & {'nwb_file_name': key['nwb_file_name'],\n 'interval_list_name': key['interval_list_name']}).fetch('valid_times')\n if len(tmp_valid_times) == 0:\n lfp_band_valid_times = interval_list_censor(lfp_band_valid_times, timestamp_interval)\n # add an interval list for the LFP valid times\n IntervalList.insert1({'nwb_file_name': key['nwb_file_name'],\n 'interval_list_name': key['interval_list_name'], \n 'valid_times': lfp_band_valid_times})\n else:\n # check that the valid times are the same\n assert np.isclose(tmp_valid_times[0], lfp_band_valid_times).all(), 'previously saved lfp band times do not match current times'\n\n self.insert1(key)\n\n def fetch_nwb(self, *attrs, **kwargs):\n return fetch_nwb(self, (AnalysisNwbfile, 'analysis_file_abs_path'), *attrs, **kwargs)\n"
] |
[
[
"numpy.asarray",
"numpy.round",
"numpy.append",
"numpy.where",
"numpy.isin",
"numpy.isclose"
]
] |
thouis/pandas
|
[
"f014b01af7bd2d03266697e672c2d41daded3fca"
] |
[
"pandas/tests/test_panel4d.py"
] |
[
"from datetime import datetime\nimport os\nimport operator\nimport unittest\nimport nose\n\nimport numpy as np\n\nfrom pandas import DataFrame, Index, isnull, notnull, pivot, MultiIndex\nfrom pandas.core.datetools import bday\nfrom pandas.core.frame import group_agg\nfrom pandas.core.panel import Panel\nfrom pandas.core.panel4d import Panel4D\nfrom pandas.core.series import remove_na\nimport pandas.core.common as com\nimport pandas.core.panel as panelmod\nfrom pandas.util import py3compat\nfrom pandas.io.parsers import (ExcelFile, ExcelWriter)\n\nfrom pandas.util.testing import (assert_panel_equal,\n assert_panel4d_equal,\n assert_frame_equal,\n assert_series_equal,\n assert_almost_equal)\nimport pandas.util.testing as tm\n\ndef add_nans(panel4d):\n for l, label in enumerate(panel4d.labels):\n panel = panel4d[label]\n tm.add_nans(panel)\n\nclass SafeForLongAndSparse(object):\n\n _multiprocess_can_split_ = True\n\n def test_repr(self):\n foo = repr(self.panel4d)\n\n def test_iter(self):\n tm.equalContents(list(self.panel4d), self.panel4d.labels)\n\n def test_count(self):\n f = lambda s: notnull(s).sum()\n self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False)\n\n def test_sum(self):\n self._check_stat_op('sum', np.sum)\n\n def test_mean(self):\n self._check_stat_op('mean', np.mean)\n\n def test_prod(self):\n self._check_stat_op('prod', np.prod)\n\n def test_median(self):\n def wrapper(x):\n if isnull(x).any():\n return np.nan\n return np.median(x)\n\n self._check_stat_op('median', wrapper)\n\n def test_min(self):\n self._check_stat_op('min', np.min)\n\n def test_max(self):\n self._check_stat_op('max', np.max)\n\n def test_skew(self):\n try:\n from scipy.stats import skew\n except ImportError:\n raise nose.SkipTest\n\n def this_skew(x):\n if len(x) < 3:\n return np.nan\n return skew(x, bias=False)\n self._check_stat_op('skew', this_skew)\n\n # def test_mad(self):\n # f = lambda x: np.abs(x - x.mean()).mean()\n # self._check_stat_op('mad', f)\n\n def test_var(self):\n def alt(x):\n if len(x) < 2:\n return np.nan\n return np.var(x, ddof=1)\n self._check_stat_op('var', alt)\n\n def test_std(self):\n def alt(x):\n if len(x) < 2:\n return np.nan\n return np.std(x, ddof=1)\n self._check_stat_op('std', alt)\n\n # def test_skew(self):\n # from scipy.stats import skew\n\n # def alt(x):\n # if len(x) < 3:\n # return np.nan\n # return skew(x, bias=False)\n\n # self._check_stat_op('skew', alt)\n\n def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):\n if obj is None:\n obj = self.panel4d\n\n # # set some NAs\n # obj.ix[5:10] = np.nan\n # obj.ix[15:20, -2:] = np.nan\n\n f = getattr(obj, name)\n\n if has_skipna:\n def skipna_wrapper(x):\n nona = remove_na(x)\n if len(nona) == 0:\n return np.nan\n return alternative(nona)\n\n def wrapper(x):\n return alternative(np.asarray(x))\n\n for i in range(obj.ndim):\n result = f(axis=i, skipna=False)\n assert_panel_equal(result, obj.apply(wrapper, axis=i))\n else:\n skipna_wrapper = alternative\n wrapper = alternative\n\n for i in range(obj.ndim):\n result = f(axis=i)\n assert_panel_equal(result, obj.apply(skipna_wrapper, axis=i))\n\n self.assertRaises(Exception, f, axis=obj.ndim)\n\nclass SafeForSparse(object):\n\n _multiprocess_can_split_ = True\n\n @classmethod\n def assert_panel_equal(cls, x, y):\n assert_panel_equal(x, y)\n\n @classmethod\n def assert_panel4d_equal(cls, x, y):\n assert_panel4d_equal(x, y)\n\n def test_get_axis(self):\n assert(self.panel4d._get_axis(0) is self.panel4d.labels)\n assert(self.panel4d._get_axis(1) is self.panel4d.items)\n assert(self.panel4d._get_axis(2) is self.panel4d.major_axis)\n assert(self.panel4d._get_axis(3) is self.panel4d.minor_axis)\n\n def test_set_axis(self):\n new_labels = Index(np.arange(len(self.panel4d.labels)))\n new_items = Index(np.arange(len(self.panel4d.items)))\n new_major = Index(np.arange(len(self.panel4d.major_axis)))\n new_minor = Index(np.arange(len(self.panel4d.minor_axis)))\n\n # ensure propagate to potentially prior-cached items too\n label = self.panel4d['l1']\n self.panel4d.labels = new_labels\n\n if hasattr(self.panel4d, '_item_cache'):\n self.assert_('l1' not in self.panel4d._item_cache)\n self.assert_(self.panel4d.labels is new_labels)\n\n self.panel4d.major_axis = new_major\n self.assert_(self.panel4d[0].major_axis is new_major)\n self.assert_(self.panel4d.major_axis is new_major)\n\n self.panel4d.minor_axis = new_minor\n self.assert_(self.panel4d[0].minor_axis is new_minor)\n self.assert_(self.panel4d.minor_axis is new_minor)\n\n def test_get_axis_number(self):\n self.assertEqual(self.panel4d._get_axis_number('labels'), 0)\n self.assertEqual(self.panel4d._get_axis_number('items'), 1)\n self.assertEqual(self.panel4d._get_axis_number('major'), 2)\n self.assertEqual(self.panel4d._get_axis_number('minor'), 3)\n\n def test_get_axis_name(self):\n self.assertEqual(self.panel4d._get_axis_name(0), 'labels')\n self.assertEqual(self.panel4d._get_axis_name(1), 'items')\n self.assertEqual(self.panel4d._get_axis_name(2), 'major_axis')\n self.assertEqual(self.panel4d._get_axis_name(3), 'minor_axis')\n\n def test_arith(self):\n self._test_op(self.panel4d, operator.add)\n self._test_op(self.panel4d, operator.sub)\n self._test_op(self.panel4d, operator.mul)\n self._test_op(self.panel4d, operator.truediv)\n self._test_op(self.panel4d, operator.floordiv)\n self._test_op(self.panel4d, operator.pow)\n\n self._test_op(self.panel4d, lambda x, y: y + x)\n self._test_op(self.panel4d, lambda x, y: y - x)\n self._test_op(self.panel4d, lambda x, y: y * x)\n self._test_op(self.panel4d, lambda x, y: y / x)\n self._test_op(self.panel4d, lambda x, y: y ** x)\n\n self.assertRaises(Exception, self.panel4d.__add__, self.panel4d['l1'])\n\n @staticmethod\n def _test_op(panel4d, op):\n result = op(panel4d, 1)\n assert_panel_equal(result['l1'], op(panel4d['l1'], 1))\n\n def test_keys(self):\n tm.equalContents(self.panel4d.keys(), self.panel4d.labels)\n\n def test_iteritems(self):\n \"\"\"Test panel4d.iteritems(), aka panel4d.iterkv()\"\"\"\n # just test that it works\n for k, v in self.panel4d.iterkv():\n pass\n\n self.assertEqual(len(list(self.panel4d.iterkv())),\n len(self.panel4d.labels))\n\n def test_combinePanel4d(self):\n result = self.panel4d.add(self.panel4d)\n self.assert_panel4d_equal(result, self.panel4d * 2)\n\n def test_neg(self):\n self.assert_panel4d_equal(-self.panel4d, self.panel4d * -1)\n\n def test_select(self):\n p = self.panel4d\n\n # select labels\n result = p.select(lambda x: x in ('l1', 'l3'), axis='labels')\n expected = p.reindex(labels=['l1','l3'])\n self.assert_panel4d_equal(result, expected)\n\n # select items\n result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')\n expected = p.reindex(items=['ItemA', 'ItemC'])\n self.assert_panel4d_equal(result, expected)\n\n # select major_axis\n result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')\n new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]\n expected = p.reindex(major=new_major)\n self.assert_panel4d_equal(result, expected)\n\n # select minor_axis\n result = p.select(lambda x: x in ('D', 'A'), axis=3)\n expected = p.reindex(minor=['A', 'D'])\n self.assert_panel4d_equal(result, expected)\n\n # corner case, empty thing\n result = p.select(lambda x: x in ('foo',), axis='items')\n self.assert_panel4d_equal(result, p.reindex(items=[]))\n\n def test_get_value(self):\n for item in self.panel.items:\n for mjr in self.panel.major_axis[::2]:\n for mnr in self.panel.minor_axis:\n result = self.panel.get_value(item, mjr, mnr)\n expected = self.panel[item][mnr][mjr]\n assert_almost_equal(result, expected)\n\n def test_abs(self):\n result = self.panel4d.abs()\n expected = np.abs(self.panel4d)\n self.assert_panel4d_equal(result, expected)\n\n p = self.panel4d['l1']\n result = p.abs()\n expected = np.abs(p)\n assert_panel_equal(result, expected)\n\n df = p['ItemA']\n result = df.abs()\n expected = np.abs(df)\n assert_frame_equal(result, expected)\n\nclass CheckIndexing(object):\n\n _multiprocess_can_split_ = True\n\n def test_getitem(self):\n self.assertRaises(Exception, self.panel4d.__getitem__, 'ItemQ')\n\n def test_delitem_and_pop(self):\n expected = self.panel4d['l2']\n result = self.panel4d.pop('l2')\n assert_panel_equal(expected, result)\n self.assert_('l2' not in self.panel4d.labels)\n\n del self.panel4d['l3']\n self.assert_('l3' not in self.panel4d.labels)\n self.assertRaises(Exception, self.panel4d.__delitem__, 'l3')\n\n values = np.empty((4, 4, 4, 4))\n values[0] = 0\n values[1] = 1\n values[2] = 2\n values[3] = 3\n\n panel4d = Panel4D(values, range(4), range(4), range(4), range(4))\n\n # did we delete the right row?\n\n panel4dc = panel4d.copy()\n del panel4dc[0]\n assert_panel_equal(panel4dc[1], panel4d[1])\n assert_panel_equal(panel4dc[2], panel4d[2])\n assert_panel_equal(panel4dc[3], panel4d[3])\n\n panel4dc = panel4d.copy()\n del panel4dc[1]\n assert_panel_equal(panel4dc[0], panel4d[0])\n assert_panel_equal(panel4dc[2], panel4d[2])\n assert_panel_equal(panel4dc[3], panel4d[3])\n\n panel4dc = panel4d.copy()\n del panel4dc[2]\n assert_panel_equal(panel4dc[1], panel4d[1])\n assert_panel_equal(panel4dc[0], panel4d[0])\n assert_panel_equal(panel4dc[3], panel4d[3])\n\n panel4dc = panel4d.copy()\n del panel4dc[3]\n assert_panel_equal(panel4dc[1], panel4d[1])\n assert_panel_equal(panel4dc[2], panel4d[2])\n assert_panel_equal(panel4dc[0], panel4d[0])\n\n def test_setitem(self):\n ## LongPanel with one item\n #lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()\n #self.assertRaises(Exception, self.panel.__setitem__,\n # 'ItemE', lp)\n\n # Panel\n p = Panel(dict(ItemA = self.panel4d['l1']['ItemA'][2:].filter(items=['A', 'B'])))\n self.panel4d['l4'] = p\n self.panel4d['l5'] = p\n\n p2 = self.panel4d['l4']\n\n assert_panel_equal(p, p2.reindex(items = p.items,\n major_axis = p.major_axis,\n minor_axis = p.minor_axis))\n\n # scalar\n self.panel4d['lG'] = 1\n self.panel4d['lE'] = True\n self.assert_(self.panel4d['lG'].values.dtype == np.int64)\n self.assert_(self.panel4d['lE'].values.dtype == np.bool_)\n\n # object dtype\n self.panel4d['lQ'] = 'foo'\n self.assert_(self.panel4d['lQ'].values.dtype == np.object_)\n\n # boolean dtype\n self.panel4d['lP'] = self.panel4d['l1'] > 0\n self.assert_(self.panel4d['lP'].values.dtype == np.bool_)\n\n def test_comparisons(self):\n p1 = tm.makePanel4D()\n p2 = tm.makePanel4D()\n\n tp = p1.reindex(labels = p1.labels + ['foo'])\n p = p1[p1.labels[0]]\n\n def test_comp(func):\n result = func(p1, p2)\n self.assert_(np.array_equal(result.values,\n func(p1.values, p2.values)))\n\n # versus non-indexed same objs\n self.assertRaises(Exception, func, p1, tp)\n\n # versus different objs\n self.assertRaises(Exception, func, p1, p)\n\n result3 = func(self.panel4d, 0)\n self.assert_(np.array_equal(result3.values,\n func(self.panel4d.values, 0)))\n\n test_comp(operator.eq)\n test_comp(operator.ne)\n test_comp(operator.lt)\n test_comp(operator.gt)\n test_comp(operator.ge)\n test_comp(operator.le)\n\n def test_setitem_ndarray(self):\n raise nose.SkipTest\n # from pandas import DateRange, datetools\n\n # timeidx = DateRange(start=datetime(2009,1,1),\n # end=datetime(2009,12,31),\n # offset=datetools.MonthEnd())\n # lons_coarse = np.linspace(-177.5, 177.5, 72)\n # lats_coarse = np.linspace(-87.5, 87.5, 36)\n # P = Panel(items=timeidx, major_axis=lons_coarse, minor_axis=lats_coarse)\n # data = np.random.randn(72*36).reshape((72,36))\n # key = datetime(2009,2,28)\n # P[key] = data#\n\n # assert_almost_equal(P[key].values, data)\n\n def test_major_xs(self):\n ref = self.panel4d['l1']['ItemA']\n\n idx = self.panel4d.major_axis[5]\n xs = self.panel4d.major_xs(idx)\n\n assert_series_equal(xs['l1'].T['ItemA'], ref.xs(idx))\n\n # not contained\n idx = self.panel4d.major_axis[0] - bday\n self.assertRaises(Exception, self.panel4d.major_xs, idx)\n\n def test_major_xs_mixed(self):\n self.panel4d['l4'] = 'foo'\n xs = self.panel4d.major_xs(self.panel4d.major_axis[0])\n self.assert_(xs['l1']['A'].dtype == np.float64)\n self.assert_(xs['l4']['A'].dtype == np.object_)\n\n def test_minor_xs(self):\n ref = self.panel4d['l1']['ItemA']\n\n idx = self.panel4d.minor_axis[1]\n xs = self.panel4d.minor_xs(idx)\n\n assert_series_equal(xs['l1'].T['ItemA'], ref[idx])\n\n # not contained\n self.assertRaises(Exception, self.panel4d.minor_xs, 'E')\n\n def test_minor_xs_mixed(self):\n self.panel4d['l4'] = 'foo'\n\n xs = self.panel4d.minor_xs('D')\n self.assert_(xs['l1'].T['ItemA'].dtype == np.float64)\n self.assert_(xs['l4'].T['ItemA'].dtype == np.object_)\n\n def test_xs(self):\n l1 = self.panel4d.xs('l1', axis=0)\n expected = self.panel4d['l1']\n assert_panel_equal(l1, expected)\n\n # not view by default\n l1.values[:] = np.nan\n self.assert_(not np.isnan(self.panel4d['l1'].values).all())\n\n # but can get view\n l1_view = self.panel4d.xs('l1', axis=0, copy=False)\n l1_view.values[:] = np.nan\n self.assert_(np.isnan(self.panel4d['l1'].values).all())\n\n # mixed-type\n self.panel4d['strings'] = 'foo'\n self.assertRaises(Exception, self.panel4d.xs, 'D', axis=2,\n copy=False)\n\n def test_getitem_fancy_labels(self):\n panel4d = self.panel4d\n\n labels = panel4d.labels[[1, 0]]\n items = panel4d.items[[1, 0]]\n dates = panel4d.major_axis[::2]\n cols = ['D', 'C', 'F']\n\n # all 4 specified\n assert_panel4d_equal(panel4d.ix[labels, items, dates, cols],\n panel4d.reindex(labels=labels, items=items, major=dates, minor=cols))\n\n # 3 specified\n assert_panel4d_equal(panel4d.ix[:, items, dates, cols],\n panel4d.reindex(items=items, major=dates, minor=cols))\n\n # 2 specified\n assert_panel4d_equal(panel4d.ix[:, :, dates, cols],\n panel4d.reindex(major=dates, minor=cols))\n\n assert_panel4d_equal(panel4d.ix[:, items, :, cols],\n panel4d.reindex(items=items, minor=cols))\n\n assert_panel4d_equal(panel4d.ix[:, items, dates, :],\n panel4d.reindex(items=items, major=dates))\n\n # only 1\n assert_panel4d_equal(panel4d.ix[:, items, :, :],\n panel4d.reindex(items=items))\n\n assert_panel4d_equal(panel4d.ix[:, :, dates, :],\n panel4d.reindex(major=dates))\n\n assert_panel4d_equal(panel4d.ix[:, :, :, cols],\n panel4d.reindex(minor=cols))\n\n def test_getitem_fancy_slice(self):\n pass\n\n def test_getitem_fancy_ints(self):\n pass\n\n def test_getitem_fancy_xs(self):\n raise nose.SkipTest\n #self.assertRaises(NotImplementedError, self.panel4d.major_xs)\n #self.assertRaises(NotImplementedError, self.panel4d.minor_xs)\n\n def test_get_value(self):\n for label in self.panel4d.labels:\n for item in self.panel4d.items:\n for mjr in self.panel4d.major_axis[::2]:\n for mnr in self.panel4d.minor_axis:\n result = self.panel4d.get_value(label, item, mjr, mnr)\n expected = self.panel4d[label][item][mnr][mjr]\n assert_almost_equal(result, expected)\n\n def test_set_value(self):\n for label in self.panel4d.labels:\n for item in self.panel4d.items:\n for mjr in self.panel4d.major_axis[::2]:\n for mnr in self.panel4d.minor_axis:\n self.panel4d.set_value(label, item, mjr, mnr, 1.)\n assert_almost_equal(self.panel4d[label][item][mnr][mjr], 1.)\n\n # resize\n res = self.panel4d.set_value('l4', 'ItemE', 'foo', 'bar', 1.5)\n self.assert_(isinstance(res, Panel4D))\n self.assert_(res is not self.panel4d)\n self.assertEqual(res.get_value('l4', 'ItemE', 'foo', 'bar'), 1.5)\n\n res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5)\n self.assert_(com.is_float_dtype(res3['l4'].values))\n\nclass TestPanel4d(unittest.TestCase, CheckIndexing, SafeForSparse,\n SafeForLongAndSparse):\n\n _multiprocess_can_split_ = True\n\n @classmethod\n def assert_panel4d_equal(cls,x, y):\n assert_panel4d_equal(x, y)\n\n def setUp(self):\n self.panel4d = tm.makePanel4D(nper=8)\n add_nans(self.panel4d)\n\n def test_constructor(self):\n # with BlockManager\n panel4d = Panel4D(self.panel4d._data)\n self.assert_(panel4d._data is self.panel4d._data)\n\n panel4d = Panel4D(self.panel4d._data, copy=True)\n self.assert_(panel4d._data is not self.panel4d._data)\n assert_panel4d_equal(panel4d, self.panel4d)\n\n # strings handled prop\n #panel4d = Panel4D([[['foo', 'foo', 'foo',],\n # ['foo', 'foo', 'foo']]])\n #self.assert_(wp.values.dtype == np.object_)\n\n vals = self.panel4d.values\n\n # no copy\n panel4d = Panel4D(vals)\n self.assert_(panel4d.values is vals)\n\n # copy\n panel4d = Panel4D(vals, copy=True)\n self.assert_(panel4d.values is not vals)\n\n def test_constructor_cast(self):\n zero_filled = self.panel4d.fillna(0)\n\n casted = Panel4D(zero_filled._data, dtype=int)\n casted2 = Panel4D(zero_filled.values, dtype=int)\n\n exp_values = zero_filled.values.astype(int)\n assert_almost_equal(casted.values, exp_values)\n assert_almost_equal(casted2.values, exp_values)\n\n # can't cast\n data = [[['foo', 'bar', 'baz']]]\n self.assertRaises(ValueError, Panel, data, dtype=float)\n\n def test_constructor_empty_panel(self):\n empty = Panel()\n self.assert_(len(empty.items) == 0)\n self.assert_(len(empty.major_axis) == 0)\n self.assert_(len(empty.minor_axis) == 0)\n\n def test_constructor_observe_dtype(self):\n # GH #411\n panel = Panel(items=range(3), major_axis=range(3),\n minor_axis=range(3), dtype='O')\n self.assert_(panel.values.dtype == np.object_)\n\n def test_consolidate(self):\n self.assert_(self.panel4d._data.is_consolidated())\n\n self.panel4d['foo'] = 1.\n self.assert_(not self.panel4d._data.is_consolidated())\n\n panel4d = self.panel4d.consolidate()\n self.assert_(panel4d._data.is_consolidated())\n\n def test_ctor_dict(self):\n l1 = self.panel4d['l1']\n l2 = self.panel4d['l2']\n\n d = {'A' : l1, 'B' : l2.ix[['ItemB'],:,:] }\n #d2 = {'A' : itema._series, 'B' : itemb[5:]._series}\n #d3 = {'A' : DataFrame(itema._series),\n # 'B' : DataFrame(itemb[5:]._series)}\n\n panel4d = Panel4D(d)\n #wp2 = Panel.from_dict(d2) # nested Dict\n #wp3 = Panel.from_dict(d3)\n #self.assert_(wp.major_axis.equals(self.panel.major_axis))\n assert_panel_equal(panel4d['A'], self.panel4d['l1'])\n assert_frame_equal(panel4d.ix['B','ItemB',:,:], self.panel4d.ix['l2',['ItemB'],:,:]['ItemB'])\n\n # intersect\n #wp = Panel.from_dict(d, intersect=True)\n #self.assert_(wp.major_axis.equals(itemb.index[5:]))\n\n # use constructor\n #assert_panel_equal(Panel(d), Panel.from_dict(d))\n #assert_panel_equal(Panel(d2), Panel.from_dict(d2))\n #assert_panel_equal(Panel(d3), Panel.from_dict(d3))\n\n # cast\n #dcasted = dict((k, v.reindex(wp.major_axis).fillna(0))\n # for k, v in d.iteritems())\n #result = Panel(dcasted, dtype=int)\n #expected = Panel(dict((k, v.astype(int))\n # for k, v in dcasted.iteritems()))\n #assert_panel_equal(result, expected)\n\n def test_constructor_dict_mixed(self):\n data = dict((k, v.values) for k, v in self.panel4d.iterkv())\n result = Panel4D(data)\n exp_major = Index(np.arange(len(self.panel4d.major_axis)))\n self.assert_(result.major_axis.equals(exp_major))\n\n result = Panel4D(data,\n labels = self.panel4d.labels,\n items = self.panel4d.items,\n major_axis = self.panel4d.major_axis,\n minor_axis = self.panel4d.minor_axis)\n assert_panel4d_equal(result, self.panel4d)\n\n data['l2'] = self.panel4d['l2']\n result = Panel4D(data)\n assert_panel4d_equal(result, self.panel4d)\n\n # corner, blow up\n data['l2'] = data['l2']['ItemB']\n self.assertRaises(Exception, Panel4D, data)\n\n data['l2'] = self.panel4d['l2'].values[:, :, :-1]\n self.assertRaises(Exception, Panel4D, data)\n\n def test_constructor_resize(self):\n data = self.panel4d._data\n labels= self.panel4d.labels[:-1]\n items = self.panel4d.items[:-1]\n major = self.panel4d.major_axis[:-1]\n minor = self.panel4d.minor_axis[:-1]\n\n result = Panel4D(data, labels=labels, items=items, major_axis=major, minor_axis=minor)\n expected = self.panel4d.reindex(labels=labels, items=items, major=major, minor=minor)\n assert_panel4d_equal(result, expected)\n\n result = Panel4D(data, items=items, major_axis=major)\n expected = self.panel4d.reindex(items=items, major=major)\n assert_panel4d_equal(result, expected)\n\n result = Panel4D(data, items=items)\n expected = self.panel4d.reindex(items=items)\n assert_panel4d_equal(result, expected)\n\n result = Panel4D(data, minor_axis=minor)\n expected = self.panel4d.reindex(minor=minor)\n assert_panel4d_equal(result, expected)\n\n def test_from_dict_mixed_orient(self):\n raise nose.SkipTest\n # df = tm.makeDataFrame()\n # df['foo'] = 'bar'\n\n # data = {'k1' : df,\n # 'k2' : df}\n\n # panel = Panel.from_dict(data, orient='minor')\n\n # self.assert_(panel['foo'].values.dtype == np.object_)\n # self.assert_(panel['A'].values.dtype == np.float64)\n\n def test_values(self):\n self.assertRaises(Exception, Panel, np.random.randn(5, 5, 5),\n range(5), range(5), range(4))\n\n def test_conform(self):\n p = self.panel4d['l1'].filter(items=['ItemA', 'ItemB'])\n conformed = self.panel4d.conform(p)\n\n assert(conformed.items.equals(self.panel4d.labels))\n assert(conformed.major_axis.equals(self.panel4d.major_axis))\n assert(conformed.minor_axis.equals(self.panel4d.minor_axis))\n\n def test_reindex(self):\n ref = self.panel4d['l2']\n\n # labels\n result = self.panel4d.reindex(labels=['l1','l2'])\n assert_panel_equal(result['l2'], ref)\n\n # items\n result = self.panel4d.reindex(items=['ItemA', 'ItemB'])\n assert_frame_equal(result['l2']['ItemB'], ref['ItemB'])\n\n # major\n new_major = list(self.panel4d.major_axis[:10])\n result = self.panel4d.reindex(major=new_major)\n assert_frame_equal(result['l2']['ItemB'], ref['ItemB'].reindex(index=new_major))\n\n # raise exception put both major and major_axis\n self.assertRaises(Exception, self.panel4d.reindex,\n major_axis=new_major, major=new_major)\n\n # minor\n new_minor = list(self.panel4d.minor_axis[:2])\n result = self.panel4d.reindex(minor=new_minor)\n assert_frame_equal(result['l2']['ItemB'], ref['ItemB'].reindex(columns=new_minor))\n\n result = self.panel4d.reindex(labels=self.panel4d.labels,\n items =self.panel4d.items,\n major =self.panel4d.major_axis,\n minor =self.panel4d.minor_axis)\n\n assert(result.labels is self.panel4d.labels)\n assert(result.items is self.panel4d.items)\n assert(result.major_axis is self.panel4d.major_axis)\n assert(result.minor_axis is self.panel4d.minor_axis)\n\n self.assertRaises(Exception, self.panel4d.reindex)\n\n # with filling\n smaller_major = self.panel4d.major_axis[::5]\n smaller = self.panel4d.reindex(major=smaller_major)\n\n larger = smaller.reindex(major=self.panel4d.major_axis,\n method='pad')\n\n assert_panel_equal(larger.ix[:,:,self.panel4d.major_axis[1],:],\n smaller.ix[:,:,smaller_major[0],:])\n\n # don't necessarily copy\n result = self.panel4d.reindex(major=self.panel4d.major_axis, copy=False)\n self.assert_(result is self.panel4d)\n\n def test_reindex_like(self):\n # reindex_like\n smaller = self.panel4d.reindex(labels=self.panel4d.labels[:-1],\n items =self.panel4d.items[:-1],\n major =self.panel4d.major_axis[:-1],\n minor =self.panel4d.minor_axis[:-1])\n smaller_like = self.panel4d.reindex_like(smaller)\n assert_panel4d_equal(smaller, smaller_like)\n\n def test_take(self):\n raise nose.SkipTest\n\n # # axis == 0\n # result = self.panel.take([2, 0, 1], axis=0)\n # expected = self.panel.reindex(items=['ItemC', 'ItemA', 'ItemB'])\n # assert_panel_equal(result, expected)#\n\n # # axis >= 1\n # result = self.panel.take([3, 0, 1, 2], axis=2)\n # expected = self.panel.reindex(minor=['D', 'A', 'B', 'C'])\n # assert_panel_equal(result, expected)\n\n # self.assertRaises(Exception, self.panel.take, [3, -1, 1, 2], axis=2)\n # self.assertRaises(Exception, self.panel.take, [4, 0, 1, 2], axis=2)\n\n def test_sort_index(self):\n import random\n\n rlabels= list(self.panel4d.labels)\n ritems = list(self.panel4d.items)\n rmajor = list(self.panel4d.major_axis)\n rminor = list(self.panel4d.minor_axis)\n random.shuffle(rlabels)\n random.shuffle(ritems)\n random.shuffle(rmajor)\n random.shuffle(rminor)\n\n random_order = self.panel4d.reindex(labels=rlabels)\n sorted_panel4d = random_order.sort_index(axis=0)\n assert_panel4d_equal(sorted_panel4d, self.panel4d)\n\n # descending\n #random_order = self.panel.reindex(items=ritems)\n #sorted_panel = random_order.sort_index(axis=0, ascending=False)\n #assert_panel_equal(sorted_panel,\n # self.panel.reindex(items=self.panel.items[::-1]))\n\n #random_order = self.panel.reindex(major=rmajor)\n #sorted_panel = random_order.sort_index(axis=1)\n #assert_panel_equal(sorted_panel, self.panel)\n\n #random_order = self.panel.reindex(minor=rminor)\n #sorted_panel = random_order.sort_index(axis=2)\n #assert_panel_equal(sorted_panel, self.panel)\n\n def test_fillna(self):\n filled = self.panel4d.fillna(0)\n self.assert_(np.isfinite(filled.values).all())\n\n filled = self.panel4d.fillna(method='backfill')\n assert_panel_equal(filled['l1'],\n self.panel4d['l1'].fillna(method='backfill'))\n\n panel4d = self.panel4d.copy()\n panel4d['str'] = 'foo'\n\n filled = panel4d.fillna(method='backfill')\n assert_panel_equal(filled['l1'],\n panel4d['l1'].fillna(method='backfill'))\n\n empty = self.panel4d.reindex(labels=[])\n filled = empty.fillna(0)\n assert_panel4d_equal(filled, empty)\n\n def test_swapaxes(self):\n result = self.panel4d.swapaxes('labels','items')\n self.assert_(result.items is self.panel4d.labels)\n\n result = self.panel4d.swapaxes('labels','minor')\n self.assert_(result.labels is self.panel4d.minor_axis)\n\n result = self.panel4d.swapaxes('items', 'minor')\n self.assert_(result.items is self.panel4d.minor_axis)\n\n result = self.panel4d.swapaxes('items', 'major')\n self.assert_(result.items is self.panel4d.major_axis)\n\n result = self.panel4d.swapaxes('major', 'minor')\n self.assert_(result.major_axis is self.panel4d.minor_axis)\n\n # this should also work\n result = self.panel4d.swapaxes(0, 1)\n self.assert_(result.labels is self.panel4d.items)\n\n # this should also work\n self.assertRaises(Exception, self.panel4d.swapaxes, 'items', 'items')\n\n def test_to_frame(self):\n raise nose.SkipTest\n # # filtered\n # filtered = self.panel.to_frame()\n # expected = self.panel.to_frame().dropna(how='any')\n # assert_frame_equal(filtered, expected)\n\n # # unfiltered\n # unfiltered = self.panel.to_frame(filter_observations=False)\n # assert_panel_equal(unfiltered.to_panel(), self.panel)\n\n # # names\n # self.assertEqual(unfiltered.index.names, ['major', 'minor'])\n\n def test_to_frame_mixed(self):\n raise nose.SkipTest\n # panel = self.panel.fillna(0)\n # panel['str'] = 'foo'\n # panel['bool'] = panel['ItemA'] > 0\n\n # lp = panel.to_frame()\n # wp = lp.to_panel()\n # self.assertEqual(wp['bool'].values.dtype, np.bool_)\n # assert_frame_equal(wp['bool'], panel['bool'])\n\n def test_update(self):\n\n p4d = Panel4D([[[[1.5, np.nan, 3.],\n [1.5, np.nan, 3.],\n [1.5, np.nan, 3.],\n [1.5, np.nan, 3.]],\n [[1.5, np.nan, 3.],\n [1.5, np.nan, 3.],\n [1.5, np.nan, 3.],\n [1.5, np.nan, 3.]]]])\n\n other = Panel4D([[[[3.6, 2., np.nan]],\n [[np.nan, np.nan, 7]]]])\n\n p4d.update(other)\n\n expected = Panel4D([[[[3.6, 2, 3.],\n [1.5, np.nan, 3.],\n [1.5, np.nan, 3.],\n [1.5, np.nan, 3.]],\n [[1.5, np.nan, 7],\n [1.5, np.nan, 3.],\n [1.5, np.nan, 3.],\n [1.5, np.nan, 3.]]]])\n\n assert_panel4d_equal(p4d, expected)\n\n def test_filter(self):\n raise nose.SkipTest\n\n def test_apply(self):\n raise nose.SkipTest\n\n def test_compound(self):\n raise nose.SkipTest\n # compounded = self.panel.compound()\n\n # assert_series_equal(compounded['ItemA'],\n # (1 + self.panel['ItemA']).product(0) - 1)\n\n def test_shift(self):\n raise nose.SkipTest\n # # major\n # idx = self.panel.major_axis[0]\n # idx_lag = self.panel.major_axis[1]\n\n # shifted = self.panel.shift(1)\n\n # assert_frame_equal(self.panel.major_xs(idx),\n # shifted.major_xs(idx_lag))\n\n # # minor\n # idx = self.panel.minor_axis[0]\n # idx_lag = self.panel.minor_axis[1]\n\n # shifted = self.panel.shift(1, axis='minor')\n\n # assert_frame_equal(self.panel.minor_xs(idx),\n # shifted.minor_xs(idx_lag))\n\n # self.assertRaises(Exception, self.panel.shift, 1, axis='items')\n\n def test_multiindex_get(self):\n raise nose.SkipTest\n # ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1), ('b',2)],\n # names=['first', 'second'])\n # wp = Panel(np.random.random((4,5,5)),\n # items=ind,\n # major_axis=np.arange(5),\n # minor_axis=np.arange(5))\n # f1 = wp['a']\n # f2 = wp.ix['a']\n # assert_panel_equal(f1, f2)\n\n # self.assert_((f1.items == [1, 2]).all())\n # self.assert_((f2.items == [1, 2]).all())\n\n # ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],\n # names=['first', 'second'])\n\n def test_multiindex_blocks(self):\n raise nose.SkipTest\n # ind = MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],\n # names=['first', 'second'])\n # wp = Panel(self.panel._data)\n # wp.items = ind\n # f1 = wp['a']\n # self.assert_((f1.items == [1, 2]).all())\n\n # f1 = wp[('b',1)]\n # self.assert_((f1.columns == ['A', 'B', 'C', 'D']).all())\n\n def test_repr_empty(self):\n empty = Panel4D()\n repr(empty)\n\n def test_rename(self):\n mapper = {\n 'l1' : 'foo',\n 'l2' : 'bar',\n 'l3' : 'baz'\n }\n\n renamed = self.panel4d.rename_axis(mapper, axis=0)\n exp = Index(['foo', 'bar', 'baz'])\n self.assert_(renamed.labels.equals(exp))\n\n renamed = self.panel4d.rename_axis(str.lower, axis=3)\n exp = Index(['a', 'b', 'c', 'd'])\n self.assert_(renamed.minor_axis.equals(exp))\n\n # don't copy\n renamed_nocopy = self.panel4d.rename_axis(mapper, axis=0, copy=False)\n renamed_nocopy['foo'] = 3.\n self.assert_((self.panel4d['l1'].values == 3).all())\n\n def test_get_attr(self):\n assert_panel_equal(self.panel4d['l1'], self.panel4d.l1)\n\n def test_group_agg(self):\n values = np.ones((10, 2)) * np.arange(10).reshape((10, 1))\n bounds = np.arange(5) * 2\n f = lambda x: x.mean(axis=0)\n\n agged = group_agg(values, bounds, f)\n\n assert(agged[1][0] == 2.5)\n assert(agged[2][0] == 4.5)\n\n # test a function that doesn't aggregate\n f2 = lambda x: np.zeros((2,2))\n self.assertRaises(Exception, group_agg, values, bounds, f2)\n\n def test_from_frame_level1_unsorted(self):\n raise nose.SkipTest\n # tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2),\n # ('AAPL', 1), ('MSFT', 1)]\n # midx = MultiIndex.from_tuples(tuples)\n # df = DataFrame(np.random.rand(5,4), index=midx)\n # p = df.to_panel()\n # assert_frame_equal(p.minor_xs(2), df.ix[:,2].sort_index())\n\n def test_to_excel(self):\n raise nose.SkipTest\n # try:\n # import xlwt\n # import xlrd\n # import openpyxl\n # except ImportError:\n # raise nose.SkipTest\n\n # for ext in ['xls', 'xlsx']:\n # path = '__tmp__.' + ext\n # self.panel.to_excel(path)\n # reader = ExcelFile(path)\n # for item, df in self.panel.iteritems():\n # recdf = reader.parse(str(item),index_col=0)\n # assert_frame_equal(df, recdf)\n # os.remove(path)\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure',\n '--with-timer'],\n exit=False)\n"
] |
[
[
"pandas.core.panel.Panel",
"numpy.asarray",
"pandas.util.testing.assert_frame_equal",
"numpy.random.randn",
"numpy.var",
"pandas.util.testing.makePanel4D",
"pandas.util.testing.add_nans",
"numpy.arange",
"pandas.core.common.is_float_dtype",
"pandas.util.testing.assert_series_equal",
"pandas.Index",
"pandas.util.testing.assert_panel4d_equal",
"numpy.std",
"pandas.util.testing.assert_panel_equal",
"scipy.stats.skew",
"numpy.zeros",
"pandas.core.panel4d.Panel4D",
"pandas.notnull",
"numpy.isnan",
"numpy.median",
"pandas.util.testing.assert_almost_equal",
"pandas.core.frame.group_agg",
"numpy.abs",
"pandas.core.series.remove_na",
"pandas.isnull",
"numpy.isfinite",
"numpy.ones",
"numpy.empty"
]
] |
AbdallahHemdan/Arithmetic-Coding
|
[
"326365a8281c1f570fd88ebf12ec96c863a00226"
] |
[
"encoding.py"
] |
[
"import numpy as np\nimport cv2\n\ninputImg = cv2.imread('dog.jpg', 0) # read the image\ncv2.imshow('dog', inputImg)\n\nrow = inputImg.shape[0]\ncol = inputImg.shape[1]\n\nimg = np.array(inputImg).flatten() # flatten the image into 1d array\nfrq = np.zeros(256, dtype=int) # frq of all gray scale levels\nblockSize = int(input('1. Enter the block size: '))\nfloatSize = int(input('2. Enter the float size (16-32-64): '))\ntotal = row * col\nif floatSize == 16:\n res = np.zeros(total // blockSize, dtype=np.float16) # final codes\n # array of probability of each of 256 gray scale levles\n prob = np.zeros(256, dtype=np.float16)\nelif floatSize == 32:\n res = np.zeros(total // blockSize, dtype=np.float32) # final codes\n prob = np.zeros(256, dtype=np.float32)\nelif floatSize == 64:\n res = np.zeros(total // blockSize, dtype=np.float64) # final codes\n prob = np.zeros(256, dtype=np.float64)\n\n# prob = np.zeros(256) # array of probability of each of 256 gray scale levles\ngrayLvl = 256\n\nfor i in img:\n frq[i] += 1\n\nfor i in range(0, grayLvl):\n prob[i] = frq[i] / (total)\n\nfor i in range(1, grayLvl):\n prob[i] += prob[i - 1]\n\nfor i in range(0, total, blockSize):\n l = 0.0\n r = 1.0\n for j in range(i, i + blockSize):\n oldLeft = l\n oldRight = r\n # base + (range) * prob[cur pixel]\n if img[j] != 0:\n l = oldLeft + (oldRight - oldLeft) * prob[img[j] - 1]\n r = oldLeft + (oldRight - oldLeft) * prob[img[j]]\n # result of the block is the average of (upper - lower)\n it = int(i / blockSize)\n res[it] = (l + r) / 2\n\n# export encoded tags && pro\nnp.save('./encoded-image', res)\nnp.save('./probability', prob)\nblockSizeFile = open('blockSizeFile.txt', \"w\")\nblockSizeFile.write(str(blockSize)) # read block size\nblockSizeFile.write('\\n' + str(row)) # read row dimension\nblockSizeFile.write('\\n' + str(col)) # read col dimension\n# terminate showing the image on pressing any key\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.save"
]
] |
pouyaaghahoseini/nla-project
|
[
"4bdc77c5c029b27f2e7cb4a0529fb00191f02815",
"4bdc77c5c029b27f2e7cb4a0529fb00191f02815"
] |
[
"Problem 1/gauss-elimination.py",
"Problem 1/SOR.py"
] |
[
"import numpy as np\nimport time\n\ndef create_tridiag(a, b, c, n):\n A = np.zeros((n, n))\n for i in range(n):\n A[i][i] = b\n if i < n - 1:\n A[i + 1][i] = a\n A[i][i + 1] = c\n return A\n\n\ndef solve_gauss(A, B):\n n = len(A)\n det = 1\n for i in range(n):\n # Find Maximum Row\n maxRow = i\n for j in range(i + 1, n):\n if abs(A[j][i]) > abs(A[maxRow][i]):\n maxRow = j\n # Swap Rows\n if maxRow != i:\n det *= -1\n for k in range(0, n):\n temp = A[i][k]\n A[i][k] = A[maxRow][k]\n A[maxRow][k] = temp\n temp = B[maxRow]\n B[maxRow] = B[i]\n B[i] = temp\n\n # Make upper diagonal\n for k in range(i + 1, n):\n t = -A[k][i] / A[i][i]\n for c in range(i, n):\n A[k][c] += t * A[i][c]\n B[k] += t * B[i]\n\n # Solve equation Ax=b for an upper triangular matrix A\n X = [0 for i in range(n)]\n for i in range(n - 1, -1, -1):\n X[i] = B[i] / A[i][i]\n for k in range(i - 1, -1, -1):\n B[k] -= A[k][i] * X[i]\n det *= A[i][i]\n return det, X\n\n\n'''___Main___'''\n\nn = 50\na = create_tridiag(1, 4, -1, n)\nb = [1 for i in range(n)]\nstart = time.time()\nd, result = solve_gauss(a, b)\nend = time.time()\nprint(\"Determinant: \", d)\nprint(\"Result: \", result)\nprint(\"NumPy Result: \", np.linalg.solve(a, b))\nprint(\"Time: \", end - start)\n",
"import numpy as np\nimport time\nfrom scipy.linalg import solve\ndef create_tridiag(l, r, u, n):\n A = np.zeros((n, n))\n temp = np.ones(n - 1)\n np.fill_diagonal(A, r)\n np.fill_diagonal(A[1:], l)\n np.fill_diagonal(A[:, 1:], u)\n return A\n\n\ndef SOR(a, x, b, Iteration = False):\n x0 = np.copy(x)\n n = len(x)\n w = 1.5\n # Finding X1\n for j in range(n):\n d = np.dot(b[j], w)\n for i in range(0, n):\n if (j != i):\n d -= w * a[j][i] * x[i]\n x[j] = (1-w) * x[i] + d / a[j][j]\n # Iterate Seidel\n while np.linalg.norm(np.subtract(x, x0)) > 0.0001:\n if Iteration == True:\n print(\"Iteration: \" + str(x))\n x0 = np.copy(x)\n for j in range(0, n):\n # temp variable d to store b[j]\n d = b[j]\n # to calculate respective xi, yi, zi\n for i in range(0, n):\n if (j != i):\n d -= a[j][i] * x[i]\n # updating the value of our solution\n x[j] = d / a[j][j]\n # returning our updated solution\n return x\n\nn = 50\na = create_tridiag(1, 4, -1, n)\nb = np.ones(n)\nx = np.ones(n)\nprint(x)\n\n# loop run for m times depending on m the error value\nstart = time.time()\nx = SOR(a, x, b, Iteration = False)\n # print each time the updated solution\nend = time.time()\nprint(\"Scipy Result: \", solve(a, b))\nprint(x)\nprint(\"Time: \", end - start)\n"
] |
[
[
"numpy.linalg.solve",
"numpy.zeros"
],
[
"numpy.dot",
"numpy.subtract",
"numpy.ones",
"numpy.copy",
"numpy.fill_diagonal",
"scipy.linalg.solve",
"numpy.zeros"
]
] |
scribbler00/fastrenewables
|
[
"f1a722b19376d0e3f26707aa4af6caf7ba8a8dfa"
] |
[
"fastrenewables/baselines.py"
] |
[
"# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/00d_baselines.ipynb (unless otherwise specified).\n\n__all__ = ['truncated_svd', 'prep_fixed_point_empircal_bayes', 'evidence_fixed_point', 'BayesLinReg', 'RidgeRegression',\n 'relu', 'identity', 'sigmoid', 'tmp', 'test_acts', 'ELM', 'sample_bayes_linear_model', 'MCLeanPowerCurve']\n\n# Cell\n#hide\nimport numpy as np\nfrom sklearn.base import BaseEstimator\nfrom sklearn.multioutput import MultiOutputRegressor\nfrom sklearn.utils.validation import check_is_fitted\nfrom sktime.transformations.panel.rocket import Rocket\nfrom sktime.utils.data_processing import from_3d_numpy_to_nested\nimport matplotlib.pyplot as plt\nfrom fastcore.basics import listify\nfrom fastcore.test import *\nnp.random.seed(23)\nimport numpy as np\nfrom numba import njit\n\n# Cell\ndef _append_one_col(X):\n \"\"\"append array with ones in order to replace a fitted bias in the output\"\"\"\n return np.hstack([np.ones((X.shape[0], 1)), X])\n\n# Cell\n#hide\n@njit\ndef truncated_svd(x):\n u, s, vh = np.linalg.svd(x.transpose() @ x)\n s = np.sqrt(s)\n u_times_sigma = x @ vh.transpose()\n k = np.sum((s > 1e-10) * 1) # rank of f\n s = s.reshape(-1, 1)\n s = s[:k]\n vh = vh[:k]\n u = u_times_sigma[:, :k] / s.reshape(1, -1)\n return u, s, vh\n\n_tmp =truncated_svd(np.random.randn(20, 10).astype(np.float64))\n\n\ndef _fit_fixed_point(X: np.ndarray, y: np.ndarray, n_iter=100):\n \"\"\"\n LogME calculation proposed in the arxiv 2021 paper\n \"Ranking and Tuning Pre-trained Models: A New Paradigm of Exploiting Model Hubs\"\n at https://arxiv.org/abs/2110.10545, source code is from https://github.com/thuml/LogME\n \"\"\"\n N, D = X.shape\n u, s, vh, sigma = _fixed_point_svd(X)\n\n x, x2, res_x2 = prep_fixed_point_empircal_bayes(y, u)\n\n alpha, beta = 1.0, 1.0\n for idx in range(n_iter):\n t = alpha / beta\n gamma = (sigma / (sigma + t)).sum()\n m2 = (sigma * x2 / ((t + sigma) ** 2)).sum()\n res2 = (x2 / ((1 + sigma / t) ** 2)).sum() + res_x2\n alpha = gamma / (m2 + 1e-5)\n beta = (N - gamma) / (res2 + 1e-5)\n t_ = alpha / beta\n evidence = evidence_fixed_point(N, D, sigma, alpha, beta, m2, res2)\n evidence /= N\n if abs(t_ - t) / t <= 1e-3: # abs(t_ - t) <= 1e-5 or abs(1 / t_ - 1 / t) <= 1e-5:\n break\n\n if idx+1==n_iter:\n print(f\"Optimization of alpha {alpha:0.2f} and beta {beta:0.2f} not terminated. \")\n\n evidence = evidence_fixed_point(N, D, sigma, alpha, beta, m2, res2)\n\n evidence /= N\n w_mean = 1.0 / (t + sigma) * s * x\n w_mean = (vh.T @ w_mean).reshape(-1)\n identity_matrix = np.eye(np.size(X,1))\n w_precision = alpha * identity_matrix + beta * X.T@X\n return w_mean, w_precision, alpha, beta\n\ndef prep_fixed_point_empircal_bayes(y, u):\n y = y.reshape(-1, 1)\n x = u.T @ y # x has shape [k, 1], but actually x should have shape [N, 1]\n x2 = x ** 2\n res_x2 = (y ** 2).sum() - x2.sum() # if k < N, we compute sum of xi for 0 singular values directly\n return x,x2,res_x2\n\ndef _fixed_point_svd(X):\n N, D = X.shape # k = min(N, D)\n if N > D: # direct SVD may be expensive\n u, s, vh = truncated_svd(X)\n else:\n u, s, vh = np.linalg.svd(X, full_matrices=False)\n # u.shape = N x k\n # s.shape = k\n # vh.shape = k x D\n s = s.reshape(-1, 1)\n sigma = (s ** 2)\n return u,s,vh,sigma\n\ndef evidence_fixed_point(N, D, sigma, alpha, beta, m2, res2):\n evidence = D / 2.0 * np.log(alpha) \\\n + N / 2.0 * np.log(beta) \\\n - 0.5 * np.sum(np.log(alpha + beta * sigma)) \\\n - beta / 2.0 * res2 \\\n - alpha / 2.0 * m2 \\\n - N / 2.0 * np.log(2 * np.pi)\n\n return evidence\n\n# Cell\nclass BayesLinReg(BaseEstimator):\n \"\"\"Batch wise linear regression\"\"\"\n\n def __init__(self, alpha:float=1., beta:float=1.,\n empirical_bayes:bool=True, n_iter:int=100,\n use_fixed_point=False):\n self.alpha = alpha\n self.beta = beta\n self.n_features = None\n self.n_iter = n_iter\n self.empirical_bayes = empirical_bayes\n self.use_fixed_point = use_fixed_point\n\n\n def _create_matrices(self, X):\n if self.n_features is None:\n self.n_features = X.shape[1]\n\n self.w_mean = np.zeros(self.n_features)\n self.w_precision = np.identity(self.n_features) * self.alpha\n\n def fit_empirical_bayes(self, X, y):\n X,y = self._check_and_prep(X, y)\n \"\"\"\n Fitting empirical bayes based on training data to determine alpha and beta.\n This can be used when N>>M (more training samples than parameters).\n \"\"\"\n\n M = X.T @ X\n eigenvalues = np.linalg.eigvalsh(M)\n identity_matrix = np.eye(np.size(X,1))#np.identity(self.n_features)\n N = len(X)\n xTy = X.T @ y\n\n for idx in range(self.n_iter):\n params = [self.alpha, self.beta]\n\n # update the inverse covariance matrix (Bishop eq. 3.51)\n w_precision = self.alpha * identity_matrix + self.beta * M\n # update the mean vector (Bishop eq. 3.50)\n w_mean = self.beta * np.linalg.solve(w_precision, xTy)\n\n # TODO (Bishop eq. 3.91)\n gamma = np.sum(eigenvalues / (self.alpha + eigenvalues))\n # TODO (Bishop eq. 3.98)\n self.alpha = float(gamma / np.sum(w_mean ** 2).clip(min=1e-10))\n\n # TODO (Bishop eq. 3.99)\n self.beta = float((N-gamma) / np.sum(np.square(y - X @ w_mean)))\n\n if np.allclose(params, [self.alpha, self.beta]):\n break\n\n if idx+1==self.n_iter:\n print(f\"Optimization of alpha {self.alpha:0.2f} and beta {self.beta:0.2f} not terminated. \")\n\n self.w_mean = w_mean\n self.w_precision = w_precision\n # calculate the covariance in advance for faster inference\n self.w_covariance = np.linalg.inv(w_precision)\n\n\n def fit_fixed_point(self, X, y):\n X = np.atleast_2d(X)\n X = _append_one_col(X)\n\n self.w_mean, self.w_precision, self.alpha, self.beta = _fit_fixed_point(X,y)\n # import so that model is not creating new matrices\n self.n_features = X.shape[1]\n\n self.w_covariance = np.linalg.inv(self.w_precision)\n\n def fit(self, X, y):\n if self.use_fixed_point:\n self.fit_fixed_point(X,y)\n elif self.empirical_bayes:\n self.fit_empirical_bayes(X,y)\n else:\n self.update(X,y)\n\n return self\n\n def _check_and_prep(self, X, y = None):\n X = np.atleast_2d(X)\n X = _append_one_col(X)\n\n self._create_matrices(X)\n if y is None:\n return X\n else:\n y = np.atleast_1d(y)\n return X,y\n\n def update(self, X, y):\n X, y = self._check_and_prep(X, y)\n\n # update the inverse covariance matrix (Bishop eq. 3.51)\n w_precision = self.w_precision + self.beta * X.T @ X\n\n # update the mean vector (Bishop eq. 3.50)\n w_covariance = np.linalg.inv(w_precision)\n w_mean = w_covariance @ (self.w_precision @ self.w_mean + self.beta * y @ X)\n\n self.w_precision = w_precision\n self.w_covariance = np.linalg.inv(w_precision)\n self.w_mean = w_mean\n\n return self\n\n def _predict(self,X):\n X = self._check_and_prep(X)\n\n # calcualte the predictive mean (Bishop eq. 3.58)\n y_pred_mean = X @ self.w_mean\n\n # calculate the predictive variance (Bishop eq. 3.59)\n y_pred_var = 1 / self.beta + (X @ self.w_covariance * X).sum(axis=1)\n\n # Drop a dimension from the mean and variance in case x and y were singletons\n # There might be a more elegant way to proceed but this works!\n y_pred_mean = np.squeeze(y_pred_mean)\n y_pred_var = np.squeeze(y_pred_var)\n\n return y_pred_mean, y_pred_var ** 0.5\n\n\n def predict(self, X):\n y_pred_mean, _ =self._predict(X)\n return y_pred_mean\n\n def predict_proba(self, X):\n y_pred_mean, y_pred_std = self._predict(X)\n return y_pred_mean, y_pred_std\n\n def _log_prior(self, w):\n return -0.5 * self.alpha * np.sum(w ** 2)\n\n def _log_likelihood(self, X, y, w):\n return -0.5 * self.beta * np.square(y - X @ w).sum()\n\n def _log_posterior(self, X, y, w):\n return self._log_likelihood(X, y, w) + self._log_prior(w)\n\n def log_prior(self):\n return self._log_prior(self.w_mean)\n\n def log_posterior(self, X, y):\n X = self._check_and_prep(X)\n return self._log_likelihood(X, y, self.w_mean) + self._log_prior(self.w_mean)\n\n def log_likelihood(self, X, y):\n X = self._check_and_prep(X)\n return self._log_likelihood(X, y, self.w_mean) + self._log_prior(self.w_mean)\n\n def log_evidence(self, X:np.ndarray, y:np.ndarray):\n # for compability reasons with older versions\n if getattr(self, 'use_fixed_point', False):\n N,D = X.shape\n t = self.alpha / self.beta\n u, s, vh, sigma = _fixed_point_svd(X)\n x, x2, res_x2 = prep_fixed_point_empircal_bayes(y, u)\n res2 = (x2 / ((1 + sigma / t) ** 2)).sum() + res_x2\n m2 = (sigma * x2 / ((t + sigma) ** 2)).sum()\n\n return evidence_fixed_point(N, D, sigma, self.alpha, self.beta, m2, res2)\n\n else:\n X, y = self._check_and_prep(X, y)\n\n N, M = X.shape\n\n # E(\\mathbf{m}_n) = \\beta/2 \\cdot ||y- X \\mathbf{m}_n|| + \\alpha/2 \\mathbf{m}_n^T \\mathbf{m}_n,\n # where \\mathbf{m}_n is the mean weight. This is the same as the negative of the posterior\n Emn = -self._log_posterior(X, y, self.w_mean)\n\n # Bishop eq. 3.86\n return 0.5 * (M * np.log(self.alpha) + N * np.log(self.beta)\n - np.linalg.slogdet(self.w_precision)[1] - N * np.log(2 * np.pi)\n ) - Emn\n\n# Cell\nclass RidgeRegression(BaseEstimator):\n def __init__(self, l2_reg=1e-3):\n self.l2_reg = l2_reg\n\n def _ridge_regression(self, X, y):\n \"\"\"ridge / tikhonov regularized linear multiple, multivariate regression\"\"\"\n return np.linalg.inv(X.T @ X + self.l2_reg * np.eye(X.shape[1])) @ X.T @ y\n\n def fit(self, X, y):\n X = _append_one_col(X)\n self.W = self._ridge_regression(X, y)\n\n return self\n\n def predict(self, X):\n X = _append_one_col(X)\n y_hat = X @ self.W\n\n return y_hat\n\n# Cell\ndef relu(x):\n return np.maximum(x, 0)\n\ndef identity(x):\n return x\n\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\n# Cell\n#hide\ntmp = [\"2\"]\ntest_acts = \"relu,sigmoid\"\ntmp += test_acts.split(\",\")\ntest_eq(tmp, ['2', 'relu', 'sigmoid'])\n\n# Cell\nclass ELM(BaseEstimator):\n def __init__(\n self, n_hidden=20, activations=relu, prediction_model=BayesLinReg(),\n include_original_features=True,**kwargs):\n\n \"\"\"[summary]\n\n Args:\n n_hidden (int, optional): [description]. Defaults to 100.\n activations ([type], optional): [description]. Defaults to relu.\n prediction_model_type (str, optional): [description]. Defaults to \"ridge\".\n \"\"\"\n\n self.prediction_model = prediction_model\n self.n_hidden = n_hidden\n self.include_original_features = include_original_features\n self._hidden_weights, self._biases = None, None\n\n if self.prediction_model is None:\n self._prediction_model = BayesLinReg()\n else:\n self._prediction_model = self.prediction_model\n\n self._convert_activations(activations)\n\n super(ELM).__init__()\n\n def _convert_activations(self, activations):\n self.activations = []\n activations = listify(activations)\n conversion_dict = {\"relu\": relu, \"identity\": identity, \"sigmoid\": sigmoid,\n \"tanh\": np.tanh}\n supported_activations = list(conversion_dict.keys())\n\n for activation in activations:\n if type(activation) is str:\n activation = activation.split(\",\")\n for act in activation:\n if act in supported_activations:\n self.activations.append(conversion_dict[act])\n elif callable(activation):\n self.activations.append(activation)\n\n @property\n def alpha(self):\n return self._prediction_model.alpha\n\n @property\n def beta(self):\n return self._prediction_model.beta\n\n @alpha.setter\n def alpha(self, alpha):\n self._prediction_model.alpha = alpha\n\n @beta.setter\n def beta(self, beta):\n self._prediction_model.beta = beta\n\n def transform_X(self, X, W, b, activations):\n G = np.dot(X, W) + b\n Hs = []\n for act in activations:\n Hs.append(act(G))\n return np.concatenate(Hs, axis=1)\n\n def _check_and_prep(self, X, y):\n X, y = self._validate_data(X, y, y_numeric=True, multi_output=True)\n\n self._n_features = X.shape[1]\n if self._hidden_weights is None:\n self._hidden_weights = 0.1*np.random.normal(size=[self._n_features, self.n_hidden])\n self._biases = 0.1*np.random.normal(size=[self.n_hidden])\n\n\n X_transformed = self.transform_X(\n X, self._hidden_weights, self._biases, self.activations\n )\n\n if self.include_original_features:\n X_transformed = np.concatenate([X_transformed, X], axis=1)\n\n return X_transformed, y\n\n def fit(self, X, y):\n\n X_transformed, y = self._check_and_prep(X, y)\n\n self._prediction_model.fit(X_transformed, y)\n\n return self\n\n def update(self, X, y):\n\n X_transformed, y = self._check_and_prep(X, y)\n\n self._prediction_model.update(X_transformed, y)\n\n return self\n\n def _prep_pred_X(self, X):\n # Check is fit had been called\n check_is_fitted(self)\n\n X = self._validate_data(X)\n\n X_transformed = self.transform_X(\n X, self._hidden_weights, self._biases, listify(self.activations)\n )\n\n if self.include_original_features:\n X_transformed = np.concatenate([X_transformed, X], axis=1)\n\n return X_transformed\n\n\n def predict(self, X):\n X_transformed = self._prep_pred_X(X)\n y_hat = self._prediction_model.predict(X_transformed)\n\n return y_hat\n\n def predict_proba(self, X):\n X_transformed = self._prep_pred_X(X)\n return self._prediction_model.predict_proba(X_transformed)\n\n def set_params(self, **params):\n local_param_keys = self.get_params(deep=False).keys()\n local_params = {k: v for (k, v) in params.items() if k in local_param_keys}\n\n for k, v in local_params.items():\n setattr(self, k, v)\n\n # assume that remaining keys must be part of the prediction model\n non_local_params = {\n k: v for (k, v) in params.items() if k not in local_param_keys\n }\n self.prediction_model.set_params(**non_local_params)\n\n def get_params(self, deep=False):\n return super().get_params()\n\n def log_evidence(self, X:np.ndarray, y:np.ndarray, logme=False):\n X_transformed = self._prep_pred_X(X)\n evidence = self._prediction_model.log_evidence(X_transformed, y)\n\n if logme:\n evidence = evidence / len(X)\n return evidence\n\n def log_likelihood(self, X:np.ndarray, y:np.ndarray):\n X_transformed = self._prep_pred_X(X)\n\n log_likelihood = self._prediction_model.log_likelihood(\n X_transformed, y.reshape(-1)\n )\n\n return log_likelihood\n\n def log_prior(self):\n return self._prediction_model.log_prior()\n\n def log_posterior(self, X:np.ndarray, y:np.ndarray):\n X_transformed = self._prep_pred_X(X)\n\n log_posterior = self._prediction_model.log_posterior(\n X_transformed, y.reshape(-1)\n )\n\n return log_posterior\n\n# Cell\ndef sample_bayes_linear_model(model, X, n_samples=100):\n if isinstance(model, ELM):\n X = model._prep_pred_X(X)\n prediction_model = model.prediction_model\n elif isinstance(model, BayesLinReg):\n prediction_model = model\n else:\n raise ValueError(\"Not supported model.\")\n\n # add bias\n X = _append_one_col(X)\n\n sampled_mean = np.random.multivariate_normal(prediction_model.w_mean.reshape(-1),\\\n prediction_model.w_covariance, size=n_samples)\n y_sampled = X @ sampled_mean.T\n\n return y_sampled\n\n# Cell\nclass MCLeanPowerCurve(BaseEstimator):\n def __init__(self, power_curve_type=\"upland\", normalize_ws=False):\n self.power_curve_type = power_curve_type\n self.normalize_ws = normalize_ws\n\n def fit(self, X, y=None, **kwargs):\n return self\n\n def predict(self, X):\n\n ws_ref = np.arange(0, 31)\n\n if self.power_curve_type == \"offshore\":\n pow_ref = [\n 0,\n 0,\n 0,\n 1,\n 2,\n 5,\n 8,\n 14,\n 20,\n 29,\n 40,\n 53,\n 64,\n 76,\n 84,\n 89,\n 89,\n 89,\n 89,\n 89,\n 83,\n 71,\n 54,\n 36,\n 18,\n 6,\n 0,\n 0,\n 0,\n 0,\n 0,\n ]\n elif self.power_curve_type == \"lowland\": # < 400\n pow_ref = [\n 0,\n 0,\n 0,\n 1,\n 3,\n 6,\n 11,\n 17,\n 25,\n 35,\n 47,\n 60,\n 72,\n 81,\n 88,\n 92,\n 94,\n 94,\n 94,\n 90,\n 83,\n 72,\n 56,\n 38,\n 23,\n 11,\n 4,\n 0,\n 0,\n 0,\n 0,\n ]\n elif self.power_curve_type == \"lowland_stall_regulated\":\n pow_ref = [\n 0,\n 0,\n 1,\n 2,\n 4,\n 8,\n 14,\n 21,\n 30,\n 40,\n 51,\n 61,\n 69,\n 76,\n 82,\n 87,\n 89,\n 90,\n 90,\n 85,\n 75,\n 63,\n 47,\n 30,\n 17,\n 8,\n 2,\n 0,\n 0,\n 0,\n 0,\n ]\n elif self.power_curve_type == \"upland\":\n pow_ref = [\n 0,\n 1,\n 2,\n 3,\n 6,\n 10,\n 15,\n 21,\n 30,\n 39,\n 49,\n 58,\n 67,\n 75,\n 82,\n 87,\n 91,\n 91,\n 88,\n 82,\n 73,\n 63,\n 52,\n 42,\n 31,\n 21,\n 13,\n 6,\n 2,\n 0,\n 0,\n ]\n else: # same as upland\n self.pow_ref = [\n 0,\n 1,\n 2,\n 3,\n 6,\n 10,\n 15,\n 21,\n 30,\n 39,\n 49,\n 58,\n 67,\n 75,\n 82,\n 87,\n 91,\n 91,\n 88,\n 82,\n 73,\n 63,\n 52,\n 42,\n 31,\n 21,\n 13,\n 6,\n 2,\n 0,\n 0,\n ]\n\n pow_ref = np.array(pow_ref) / 100.0\n\n if self.normalize_ws:\n # as we start by there it is sufficient to normalize via the max value\n return np.interp(X, ws_ref / ws_ref.max(), pow_ref)\n else:\n return np.interp(X, ws_ref, pow_ref)\n"
] |
[
[
"numpy.dot",
"sklearn.utils.validation.check_is_fitted",
"numpy.sqrt",
"numpy.squeeze",
"numpy.concatenate",
"numpy.random.randn",
"numpy.exp",
"numpy.square",
"numpy.linalg.svd",
"numpy.allclose",
"numpy.arange",
"numpy.eye",
"numpy.linalg.slogdet",
"numpy.atleast_1d",
"numpy.size",
"numpy.interp",
"numpy.zeros",
"numpy.log",
"numpy.linalg.inv",
"numpy.atleast_2d",
"numpy.identity",
"numpy.array",
"numpy.sum",
"numpy.linalg.solve",
"numpy.maximum",
"numpy.random.seed",
"numpy.ones",
"numpy.random.normal",
"numpy.linalg.eigvalsh"
]
] |
aicells/aicells
|
[
"b22bb0fe2f96afcab2c83a8b7cc4feb03ad32fad"
] |
[
"aicells-python/aicells_pkg/aicells/aicells-server/AICFunction.py"
] |
[
"# AIcells (https://github.com/aicells/aicells) - Copyright 2020 Gergely Szerovay, László Siller\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport yaml\n# import pathlib\nimport os\nfrom .AICException import AICEParameterError, AICException, AICEUnknownParameter\nimport pandas\nfrom . import UDFUtils\n\nclass AICEParameterNotMatch(AICException):\n pass\n\nclass AICFunction:\n def Init(self):\n self.queue = None\n self.parameters = []\n self.workbookPath = None\n\n self.y = None\n for d in ['function', 'tool']:\n yamlFile = os.path.join(os.path.dirname(self.classFile), '..', d + '-yml\\\\', self.__class__.__name__ + '.yml')\n if os.path.isfile(yamlFile):\n with open(yamlFile) as file:\n self.y = yaml.load(file, Loader=yaml.FullLoader)\n\n if self.y is None:\n raise AICException(\"FATAL_ERROR\", {\"error\": \"YML file not found, class: \" + self.__class__.__name__})\n\n self.parameters = []\n if 'parameters' in self.y:\n if isinstance(self.y['parameters'], list):\n self.parameters = self.y['parameters']\n\n self.parameterNameList = []\n self.parameterTypeList = []\n self.parameterDefaultList = []\n self.parameterDescriptionList = []\n\n for parameter in self.parameters:\n self.parameterNameList.append(parameter['parameterName'])\n self.parameterTypeList.append(\", \".join(parameter['type']))\n if 'default' in parameter:\n self.parameterDefaultList.append(parameter['default'])\n else:\n self.parameterDefaultList.append('(required)')\n\n if 'description' in parameter:\n self.parameterDescriptionList.append(parameter['description'])\n else:\n self.parameterDescriptionList.append('')\n\n def LoadDataYML(self, fileName):\n yamlFile = os.path.join(os.path.dirname(self.classFile), '..', 'yml\\\\', fileName)\n if os.path.isfile(yamlFile):\n with open(yamlFile) as file:\n return yaml.load(file, Loader=yaml.FullLoader)\n raise AICException(\"FATAL_ERROR\", {\"error\": \"YML file not found: \" + fileName})\n\n def GetTag(self):\n return self.y['tag']\n\n def GetDescription(self):\n self.parameters = []\n if 'description' in self.y:\n return self.y['description']\n else:\n return ''\n\n def GetParameterNameList(self):\n return self.parameterNameList\n\n def GetParameterTypeList(self):\n return self.parameterTypeList\n\n def GetParameterDefaultList(self):\n return self.parameterDefaultList\n\n def GetParameterDescritpionList(self):\n return self.parameterDescriptionList\n\n def FlatternList(self, l):\n if not isinstance(l, list):\n return [l]\n flatList = []\n for subList in l:\n if isinstance(subList, list):\n for item in subList:\n flatList.append(item)\n else:\n flatList.append(subList)\n return flatList\n\n def SetQueue(self, q):\n self.queue = q\n\n def SetConfig(self, config):\n self.config = config\n\n def Progress(self, text):\n if self.queue:\n self.queue.put(['progress', text])\n\n def ProcessArguments(self, args, argsKey):\n errors = []\n kwargs = {}\n\n parameterNamePrefix = argsKey[11:].strip(\".\")\n if parameterNamePrefix != \"\":\n parameterNamePrefix += '.'\n\n if len(args[argsKey]) == 2:\n if len(args[argsKey][0]) != 2:\n # vertical parameter table => transpose it\n args[argsKey] = UDFUtils.Transpose2DList(args[argsKey])\n\n for r in args[argsKey]:\n if not isinstance(r, list):\n errors += [[\"PARAMETER_ERROR\", {'parameterName': argsKey}]]\n elif len(r) != 2:\n errors += [[\"PARAMETER_ERROR\", {'parameterName': argsKey}]]\n else:\n if (r[0] is None) or (r[0] == \"\") or (r[0] in ['function', 'output']):\n pass\n elif r[0] == \"_workbook_path\":\n self.workbookPath = r[1]\n else:\n if not (r[0] in self.parameterNameList):\n raise AICEUnknownParameter(\"PARAMETER_UNKNOWN\", {'parameterName': parameterNamePrefix + r[0]})\n kwargs[r[0]] = r[1]\n\n kwargsCleaned = {}\n # errors = []\n for parameter in self.parameters:\n parameterName = parameter['parameterName']\n\n if 'default' in parameter:\n if not (parameterName in kwargs):\n kwargs[parameterName] = parameter['default']\n elif kwargs[parameterName] is None:\n kwargs[parameterName] = parameter['default']\n\n parameterTypes = parameter['type']\n if not isinstance(parameterTypes, list):\n parameterTypes = [parameterTypes]\n\n for parameterType in parameterTypes:\n if (parameterName in kwargs) and (not (parameterName in kwargsCleaned)):\n try:\n if parameterType == 'data_source':\n kwargsCleaned[parameterName] = self._DataSource(parameterName, kwargs[parameterName], parameterNamePrefix, args, self.workbookPath)\n if parameterType == 'string':\n kwargsCleaned[parameterName] = self._String(parameterName, kwargs[parameterName])\n if parameterType == 'set':\n if not ('setValues' in parameter):\n raise AICException(\"FATAL_ERROR\", {\"error\": \"setValues not defined\"})\n kwargsCleaned[parameterName] = self._Set(parameterName, kwargs[parameterName], parameter['setValues'])\n if parameterType == 'parameters':\n kwargsCleaned[parameterName] = None\n if parameterType == 'float':\n kwargsCleaned[parameterName] = self._Float(parameterName, kwargs[parameterName])\n if parameterType == 'boolean':\n kwargsCleaned[parameterName] = self._Boolean(parameterName, kwargs[parameterName])\n if parameterType == 'integer':\n kwargsCleaned[parameterName] = self._Integer(parameterName, kwargs[parameterName])\n if (parameterType == 'list') or (parameterType == 'series'):\n listItems = None\n listKey = argsKey + '.' + parameterName\n if listKey in args:\n listItems = args[listKey]\n if parameterType == 'list':\n kwargsCleaned[parameterName] = self._List(parameterName, kwargs[parameterName], listItems)\n if parameterType == 'series':\n l = self._List(parameterName, kwargs[parameterName], listItems)\n if isinstance(l, list):\n kwargsCleaned[parameterName] = pandas.Series(l)\n if parameterType == 'dataframe':\n isDataSource = False\n try:\n kwargsCleaned[parameterName] = self._DataSource(parameterName, kwargs[parameterName], parameterNamePrefix, args, self.workbookPath)\n isDataSource = True\n except AICEParameterNotMatch as e:\n pass\n\n if not isDataSource:\n list2d = None\n listKey = argsKey + '.' + parameterName\n if listKey in args:\n list2d = args[listKey]\n # TODO: ranges with 1 rows?\n columnHeader = False\n if 'columnHeader' in parameter:\n if parameter['columnHeader'] == True:\n columnHeader = True\n kwargsCleaned[parameterName] = self._DataFrame(parameterName, kwargs[parameterName], list2d, columnHeader)\n if parameterType == 'Null':\n kwargsCleaned[parameterName] = self._None(parameterName, kwargs[parameterName])\n if parameterType == 'False':\n kwargsCleaned[parameterName] = self._False(parameterName, kwargs[parameterName])\n except AICEParameterNotMatch as e:\n #errors += e.GetErrorListRaw()\n pass\n # } for parameterType in parameterTypes:\n\n if not (parameterName in kwargsCleaned):\n errors += [[\"PARAMETER_INVALID_TYPE\", {'parameterName': parameterName}]]\n\n if len(errors) != 0:\n raise AICEParameterError(errors)\n\n # self.kwargs = kwargsCleaned\n return kwargsCleaned\n\n\n def _Set(self, parameterName, x, setValues):\n if not isinstance(x, str):\n AICEParameterNotMatch()\n else:\n if x == \"@AICELLS-RANGE@\":\n AICEParameterNotMatch()\n if not (x in setValues):\n AICEParameterNotMatch()\n return x\n \n def GetDataSourceClass(self, arr2d):\n if not isinstance(arr2d, list):\n raise AICException()\n if len(arr2d) == 0:\n raise AICException()\n if not isinstance(arr2d[0], list):\n raise AICException()\n\n if not isinstance(arr2d[0][0], str):\n raise AICException()\n if arr2d[0][0] != 'data_source':\n raise AICException()\n\n if len(arr2d[0]) == 2:\n # horizontal parameter range\n if not isinstance(arr2d[0][1], str):\n raise AICException()\n dataSourceClass = arr2d[0][1]\n elif len(arr2d) == 2:\n # vertical parameter range\n if not isinstance(arr2d[1][0], str):\n raise AICException()\n dataSourceClass = arr2d[1][0]\n return dataSourceClass\n\n def _DataSource(self, parameterName, x, parameterNamePrefix, args, workbookPath):\n if not 'parameters.' + parameterNamePrefix + parameterName in args:\n raise AICEParameterNotMatch()\n\n try:\n dataSourceClass = self.GetDataSourceClass(args['parameters.' + parameterNamePrefix + parameterName])\n except Exception as e:\n raise AICEParameterNotMatch()\n \n try:\n dataSource = self.factory.CreateInstance('tool-class.' + dataSourceClass.replace('.', '_'))\n except Exception as e:\n raise AICEParameterError(\"DATA_SOURCE_UNKNOWN\", {\"dataSource\": args['parameters.' + parameterNamePrefix + parameterName][0][1]})\n\n dataSourceArguments = dataSource.ProcessArguments(args, 'parameters.' + parameterNamePrefix + parameterName)\n\n try:\n return dataSource.Read(workbookPath, dataSourceArguments, parameterNamePrefix + parameterName)\n except AICException as e:\n raise\n except Exception as e:\n raise AICEParameterError(\"DATA_SOURCE_ERROR\", {\"parameterName\": parameterNamePrefix + parameterName})\n\n def _String(self, parameterName, x):\n if not isinstance(x, str):\n raise AICEParameterNotMatch()\n else:\n if x == \"@AICELLS-RANGE@\":\n raise AICEParameterNotMatch()\n return x\n\n def _Float(self, parameterName, x):\n if not (x is None):\n if isinstance(x, float):\n return x\n if isinstance(x, int):\n return float(x)\n raise AICEParameterNotMatch()\n\n def _Boolean(self, parameterName, x):\n if not (x is None):\n if isinstance(x, bool):\n return x\n raise AICEParameterNotMatch()\n\n def _Integer(self, parameterName, x):\n if not (x is None):\n if isinstance(x, int):\n return x\n if isinstance(x, float):\n if x.is_integer():\n return int(x)\n raise AICEParameterNotMatch()\n\n def _List(self, parameterName, x, listItems):\n if not (listItems is None):\n return self.FlatternList(listItems)\n raise AICEParameterNotMatch()\n\n def _DataFrame(self, parameterName, x, list2d, columnHeader):\n if not (list2d is None):\n if columnHeader:\n columns = list2d[0]\n if not isinstance(columns, list): # single column\n columns = [columns]\n df = pandas.DataFrame(list2d[1:], columns=columns)\n else:\n df = pandas.DataFrame(list2d)\n return df\n raise AICEParameterNotMatch()\n\n def _None(self, parameterName, x):\n if x is None:\n return x\n raise AICEParameterNotMatch()\n\n def _False(self, parameterName, x):\n if not (x is None):\n if isinstance(x, bool):\n if x == False:\n return x\n raise AICEParameterNotMatch()\n"
] |
[
[
"pandas.Series",
"pandas.DataFrame"
]
] |
maryamhgf/backpack
|
[
"63d2717656df2e0f18b3b6ee50320e82ce7358b6"
] |
[
"test/bugfixes_test.py"
] |
[
"import itertools\n\nimport pytest\nimport torch\n\nimport backpack\n\n\ndef parameters_issue_30():\n possible_values = {\n \"N\": [4],\n \"C_in\": [4],\n \"C_out\": [6],\n \"H\": [6],\n \"W\": [6],\n \"K\": [3],\n \"S\": [1, 3],\n \"pad\": [0, 2],\n \"dil\": [1, 2],\n }\n\n configs = [\n dict(zip(possible_values.keys(), config_tuple))\n for config_tuple in itertools.product(*possible_values.values())\n ]\n\n return {\n \"argvalues\": configs,\n \"ids\": [str(config) for config in configs],\n }\n\n\n@pytest.mark.parametrize(\"params\", **parameters_issue_30())\ndef test_convolutions_stride_issue_30(params):\n \"\"\"\n https://github.com/f-dangel/backpack/issues/30\n\n The gradient for the convolution is wrong when `stride` is not a multiple of\n `D + 2*padding - dilation*(kernel-1) - 1`.\n \"\"\"\n torch.manual_seed(0)\n\n mod = torch.nn.Conv2d(\n in_channels=params[\"C_in\"],\n out_channels=params[\"C_out\"],\n kernel_size=params[\"K\"],\n stride=params[\"S\"],\n padding=params[\"pad\"],\n dilation=params[\"dil\"],\n )\n backpack.extend(mod)\n x = torch.randn(size=(params[\"N\"], params[\"C_in\"], params[\"W\"], params[\"H\"]))\n\n with backpack.backpack(backpack.extensions.BatchGrad()):\n loss = torch.sum(mod(x))\n loss.backward()\n\n for p in mod.parameters():\n assert torch.allclose(p.grad, p.grad_batch.sum(0), rtol=1e-04, atol=1e-04)\n"
] |
[
[
"torch.manual_seed",
"torch.nn.Conv2d",
"torch.randn"
]
] |
Leoniie/ecg_video_classification
|
[
"97d38a6e0b2b54919881123117aec1374bbd81d0"
] |
[
"aml_example_files/get_data.py"
] |
[
"import numpy as np\nimport skvideo.io \nimport os \nimport sys \nimport pandas as pd\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\ndef get_videos_from_folder(data_folder):\n\t'''\n\tget a list of video x wehre each video is a numpy array in the format [n_frames,width,height] \n\twith uint8 elements.\n\targument: relative path to the data_folder from the source folder.\n\t'''\n\tdata_folder = os.path.join(dir_path,data_folder)\n\tx = []\n\tfile_names = []\n\n\tif os.path.isdir(data_folder):\n\t\tfor dirpath, dirnames, filenames in os.walk(data_folder):\n\t\t\tfor filename in filenames:\n\t\t\t\tfile_path = os.path.join(dirpath, filename)\n\t\t\t\tstatinfo = os.stat(file_path)\n\t\t\t\tif statinfo.st_size != 0:\n\t\t\t\t\tvideo = skvideo.io.vread(file_path, outputdict={\"-pix_fmt\": \"gray\"})[:, :, :, 0]\n\t\t\t\t\tx.append(video)\n\t\t\t\t\tfile_names.append(int(filename.split(\".\")[0]))\n\n\tindices = sorted(range(len(file_names)), key=file_names.__getitem__)\n\tx = np.take(x,indices)\n\treturn x\n\ndef get_target_from_csv(csv_file):\n\t'''\n\tget a numpy array y of labels. the order follows the id of video. \n\targument: relative path to the csv_file from the source folder.\n\t'''\n\tcsv_file = os.path.join(dir_path,csv_file)\n\twith open(csv_file, 'r') as csvfile:\n\t\tlabel_reader = pd.read_csv(csvfile)\n\t\ty = label_reader['y']\n\t\n\ty = np.array(y)\n\treturn y"
] |
[
[
"pandas.read_csv",
"numpy.array",
"numpy.take"
]
] |
marctuscher/tf-multitask
|
[
"206db069e7981b57426c77d301010c7aba8fa807"
] |
[
"src/core/networks/add_multi_task.py"
] |
[
"import numpy as np\nimport os\nimport tensorflow as tf\nimport shutil\nfrom random import shuffle\nfrom core.utilities.keras_progbar import Progbar\n\n\nclass MultiTaskModel():\n\n def __init__(self, ntags_pos, ntags_ner, utils, dir_model):\n \"\"\"\n Defines the hyperparameters\n \"\"\"\n # training\n self.ntags_pos = ntags_pos\n self.ntags_ner = ntags_ner\n self.embeddings = None\n self.utils = utils\n self.train_embeddings = False\n self.nepochs = 20\n self.keep_prob = 0.9 # 0.8\n self.batch_size = 1024 # 1024\n self.lr_method = \"adam\"\n self.learning_rate = 0.01 # 0.01\n self.lr_decay = 0.9 # 0.9\n self.clip = 1 # 1 if negative, no clipping\n self.nepoch_no_imprv = 20\n # model hyperparameters\n self.hidden_size_lstm = 600 # lstm on word embeddings\n self.sess = None\n self.saver = None\n if os.path.isdir(\"./out\"):\n shutil.rmtree(\"./out\")\n self.dir_output = \"./out\"\n self.dir_model = (str(dir_model)+\"multi_task.ckpt\")\n self.acc = 0\n\n def reinitialize_weights(self, scope_name):\n \"\"\"Reinitializes the weights of a given layer\"\"\"\n variables = tf.contrib.framework.get_variables(scope_name)\n init = tf.variables_initializer(variables)\n self.sess.run(init)\n\n def add_train_op(self, lr_method, lr, pos_loss, ner_loss, clip=-1):\n \"\"\"Defines self.train_op that performs an update on a batch\n\n Args:\n lr_method: (string) sgd method, for example \"adam\"\n lr: (tf.placeholder) tf.float32, learning rate\n loss: (tensor) tf.float32 loss to minimize\n clip: (python float) clipping of gradient. If < 0, no clipping\n\n \"\"\"\n _lr_m = lr_method.lower() # lower to make sure\n\n with tf.variable_scope(\"train_step\"):\n if _lr_m == 'adam': # sgd method\n optimizer = tf.train.AdamOptimizer(lr)\n elif _lr_m == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(lr)\n elif _lr_m == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(lr)\n elif _lr_m == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(lr)\n elif _lr_m == 'momentum':\n optimizer = tf.train.MomentumOptimizer(lr, 0.01)\n else:\n raise NotImplementedError(\"Unknown method {}\".format(_lr_m))\n\n if clip > 0: # gradient clipping if clip is positive\n grads_pos, vs_pos = zip(*optimizer.compute_gradients(pos_loss))\n grads_pos, gnorm_pos = tf.clip_by_global_norm(grads_pos, clip)\n self.train_pos_op = optimizer.apply_gradients(zip(grads_pos, vs_pos))\n grads_ner, vs_ner = zip(*optimizer.compute_gradients(ner_loss))\n grads_ner, gnorm_ner = tf.clip_by_global_norm(grads_ner, clip)\n self.train_ner_op = optimizer.apply_gradients(zip(grads_ner, vs_ner))\n else:\n self.train_pos_op = optimizer.minimize(pos_loss)\n self.train_ner_op = optimizer.minimize(ner_loss)\n\n\n def initialize_session(self):\n \"\"\"Defines self.sess and initialize the variables\"\"\"\n print(\"Initializing tf session\")\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver()\n\n def close_session(self):\n \"\"\"Closes the session\"\"\"\n self.sess.close()\n\n def add_summary(self):\n \"\"\"Defines variables for Tensorboard\n\n Args:\n dir_output: (string) where the results are written\n\n \"\"\"\n self.merged = tf.summary.merge_all()\n self.file_writer = tf.summary.FileWriter(self.dir_output+\"/train\",\n self.sess.graph)\n\n def train(self, train_pos, dev_pos, inv_classes_pos, train_ner, dev_ner, inv_classes_ner):\n \"\"\"Performs training with early stopping and lr exponential decay\n\n Args:\n train: dataset that yields tuple of (sentences, tags)\n dev: dataset\n\n \"\"\"\n best_score_pos = 0\n best_score_ner = 0\n nepoch_no_imprv = 0 # for early stopping\n self.add_summary() # tensorboard\n classes_ner = [i for i in range(self.ntags_ner)]\n classes_pos = [i for i in range(self.ntags_pos)]\n for epoch in range(self.nepochs):\n print(\"Epoch {:} out of {:}\".format(epoch + 1,\n self.nepochs))\n\n self.run_epoch(train_pos, train_ner, epoch)\n self.learning_rate *= self.lr_decay # decay learning rate\n\n if epoch % 3 == 0 or epoch == self.nepochs:\n\n metrics_pos = self.run_evaluate(dev_pos, True, classes_pos, inv_classes_pos)\n metrics_ner = self.run_evaluate(dev_ner, not True, classes_ner, inv_classes_ner)\n msg_pos = \" - \".join([\"Pos: {} {:04.2f}\".format(k, v)\n for k, v in metrics_pos.items()])\n msg_ner = \" - \".join([\"Ner: {} {:04.2f}\".format(k, v)\n for k, v in metrics_ner.items()])\n print(msg_pos)\n print(msg_ner)\n\n # self.file_writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag=\"accuracy\", simple_value=metrics_pos[\"acc\"])]), epoch)\n # early stopping and saving best parameters\n if metrics_pos[\"acc\"] >= best_score_pos or metrics_ner[\"acc\"] >= best_score_ner:\n nepoch_no_imprv = 0\n self.save_session()\n if metrics_pos[\"acc\"] >= best_score_pos:\n best_score_pos = metrics_pos[\"acc\"]\n print(\"- new best score pos!\")\n if metrics_ner[\"acc\"] >= best_score_ner:\n best_score_ner = metrics_ner[\"acc\"]\n print(\"- new best score ner!\")\n else:\n nepoch_no_imprv += 1\n if nepoch_no_imprv >= self.nepoch_no_imprv:\n print(\"- early stopping {} epochs without \" \\\n \"improvement\".format(nepoch_no_imprv))\n break\n\n def evaluate(self, test):\n \"\"\"Evaluate model on test set\n\n Args:\n test: instance of class Dataset\n\n \"\"\"\n print(\"Testing model over test set\")\n metrics = self.run_evaluate(test)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n print(msg)\n\n def add_placeholders(self):\n \"\"\"Define placeholders = entries to computational graph\"\"\"\n # shape = (batch size, max length of sentence in batch)\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None],\n name=\"word_ids\")\n\n # shape = (batch size)\n self.sequence_lengths = tf.placeholder(tf.int32, shape=[None],\n name=\"sequence_lengths\")\n\n # shape = (batch size, max length of sentence in batch)\n self.labels = tf.placeholder(tf.int32, shape=[None, None],\n name=\"labels\")\n\n # hyper parameters\n self.dropout = tf.placeholder(dtype=tf.float32, shape=[],\n name=\"dropout\")\n self.lr = tf.placeholder(dtype=tf.float32, shape=[],\n name=\"lr\")\n\n def get_feed_dict(self, words, labels=None, lr=None, dropout=None):\n \"\"\"Given some data, pad it and build a feed dictionary\n\n Args:\n words: list of sentences. A sentence is a list of ids of a list of\n words. A word is a list of ids\n labels: list of ids\n lr: (float) learning rate\n dropout: (float) keep prob\n\n Returns:\n dict {placeholder: value}\n\n \"\"\"\n word_ids, sequence_lengths = self.utils.pad_sequences(words, 0)\n\n # build feed dictionary\n feed = {\n self.word_ids: word_ids,\n self.sequence_lengths: sequence_lengths\n }\n\n if labels is not None:\n labels, _ = self.utils.pad_sequences(labels, 0)\n feed[self.labels] = labels\n\n if lr is not None:\n feed[self.lr] = lr\n\n if dropout is not None:\n feed[self.dropout] = dropout\n\n return feed, sequence_lengths\n\n def add_word_embeddings_op(self):\n \"\"\"Defines self.word_embeddings\n This is a lookup tensor where each word_id corresponds to an index in this lookup tensor.\n Each index holds a 300-dim vector representing the GloVe word embedding of the word corresponding to this\n word_id in our dictionary\n \"\"\"\n _word_embeddings = tf.Variable(\n self.embeddings,\n name=\"word_embeddings_v\",\n dtype=tf.float32,\n trainable=self.train_embeddings)\n\n word_embeddings = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids, name=\"word_embeddings\")\n\n self.word_embeddings = tf.nn.dropout(word_embeddings, self.dropout)\n\n def add_logits_op(self):\n \"\"\"\n Adds the bi-lstm layer and a fully connected layer with softmax output for each task to the graph.\n \"\"\"\n with tf.variable_scope(\"bi-lstm\"):\n cell_fw = tf.contrib.rnn.LSTMCell(self.hidden_size_lstm)\n cell_bw = tf.contrib.rnn.LSTMCell(self.hidden_size_lstm)\n (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, self.word_embeddings,\n sequence_length=self.sequence_lengths, dtype=tf.float32)\n output = tf.add(output_fw, output_bw)\n\n with tf.variable_scope(\"pos\"):\n W_pos = tf.get_variable(\"W\", dtype=tf.float32,\n shape=[self.hidden_size_lstm, self.ntags_pos])\n\n b_pos = tf.get_variable(\"b\", shape=[self.ntags_pos],\n dtype=tf.float32, initializer=tf.zeros_initializer())\n\n nsteps_pos = tf.shape(output)[1]\n output_pos = tf.reshape(output, [-1,self.hidden_size_lstm])\n pred = tf.matmul(output_pos, W_pos) + b_pos\n pred= tf.nn.dropout(pred, self.dropout)\n self.logits_pos = tf.reshape(pred, [-1, nsteps_pos, self.ntags_pos])\n\n with tf.variable_scope(\"ner\"):\n W_ner = tf.get_variable(\"W\", dtype=tf.float32,\n shape=[self.hidden_size_lstm, self.ntags_ner])\n\n b_ner = tf.get_variable(\"b\", shape=[self.ntags_ner],\n dtype=tf.float32, initializer=tf.zeros_initializer())\n\n nsteps_ner = tf.shape(output)[1]\n output_ner = tf.reshape(output, [-1,self.hidden_size_lstm])\n pred_ner = tf.matmul(output_ner, W_ner) + b_ner\n pred_ner = tf.nn.dropout(pred_ner, self.dropout)\n self.logits_ner = tf.reshape(pred_ner, [-1, nsteps_ner, self.ntags_ner])\n\n def add_pred_op(self):\n \"\"\"Defines self.labels_pred\n Gets int labels from the output of the softmax layer. The predicted label is\n the argmax of this layer\n \"\"\"\n self.labels_pred_ner = tf.cast(tf.argmax(self.logits_ner, axis=-1),\n tf.int32)\n self.labels_pred_pos = tf.cast(tf.argmax(self.logits_pos, axis=-1),\n tf.int32)\n\n def add_loss_op(self):\n \"\"\"Losses for training\"\"\"\n losses_pos = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=self.logits_pos, labels=self.labels)\n mask_pos = tf.sequence_mask(self.sequence_lengths)\n losses_pos = tf.boolean_mask(losses_pos, mask_pos)\n self.loss_pos = tf.reduce_mean(losses_pos)\n\n losses_ner = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=self.logits_ner, labels=self.labels)\n mask_ner = tf.sequence_mask(self.sequence_lengths)\n losses_ner = tf.boolean_mask(losses_ner, mask_ner)\n self.loss_ner = tf.reduce_mean(losses_ner)\n # Scalars for tensorboard\n tf.summary.scalar(\"loss\", self.loss_pos)\n tf.summary.scalar(\"loss\", self.loss_ner)\n def build(self, embeddings):\n \"\"\"\n Build the computational graph with functions defined earlier\n \"\"\"\n self.embeddings = embeddings\n self.add_placeholders()\n self.add_word_embeddings_op()\n self.add_logits_op()\n self.add_pred_op()\n self.add_loss_op()\n self.add_train_op(self.lr_method, self.lr, self.loss_pos, self.loss_ner,\n self.clip)\n self.initialize_session()\n\n def predict_batch_ner(self, words):\n \"\"\"\n Args:\n words: list of sentences\n\n Returns:\n labels_pred: list of labels for each sentence\n sequence_length\n Predict a batch of sentences (list of word_ids)\n \"\"\"\n fd, sequence_lengths = self.get_feed_dict(words, dropout=1.0)\n labels_pred = self.sess.run(self.labels_pred_ner, feed_dict=fd)\n return labels_pred, sequence_lengths\n\n def predict_batch_pos(self, words):\n \"\"\"\n Args:\n words: list of sentences\n\n Returns:\n labels_pred: list of labels for each sentence\n sequence_length\n Predict a batch of sentences (list of word_ids)\n \"\"\"\n fd, sequence_lengths = self.get_feed_dict(words, dropout=1.0)\n labels_pred = self.sess.run(self.labels_pred_pos, feed_dict=fd)\n return labels_pred, sequence_lengths\n\n def run_epoch(self, train_pos, train_ner, epoch):\n \"\"\"Performs one complete epoch over the dataset\n\n Args:\n train: dataset for training that yields tuple of sentences, tags\n dev: dataset for evaluation that yields tuple of sentences, tags\n epoch: (int) index of the current epoch\n\n Returns:\n acc: (float) current accuracy score over evaluation dataset\n\n \"\"\"\n # progbar stuff for logging\n batch_size = self.batch_size\n nbatches = (2*(min(len(train_ner), len(train_pos)) + batch_size)) / batch_size\n prog = Progbar(target=nbatches)\n shuffle(train_pos)\n shuffle(train_ner)\n for i, (words, labels, state) in enumerate(self.utils.mixed_minibatches(train_pos, train_ner, batch_size)):\n fd, _ = self.get_feed_dict(words, labels, self.learning_rate,\n self.keep_prob)\n if state =='pos':\n _, train_loss, summary = self.sess.run(\n [self.train_pos_op, self.loss_pos, self.merged], feed_dict=fd)\n\n prog.update(i + 1, [(\"train loss\", train_loss)])\n\n else:\n _, train_loss, summary = self.sess.run(\n [self.train_ner_op, self.loss_ner, self.merged], feed_dict=fd)\n\n prog.update(i + 1, [(\"train loss\", train_loss)])\n # tensorboard\n if i % 10 == 0:\n self.file_writer.add_summary(summary, epoch * nbatches + i)\n\n\n def run_evaluate(self, test, pos, classes, inv_classes):\n \"\"\"Evaluates performance on test set\n\n Args:\n test: dataset that yields tuple of (sentences, tags)\n\n Returns:\n metrics: (dict) metrics[\"acc\"] = 98.4, ...\n\n \"\"\"\n accs = []\n correct_preds, total_correct, total_preds = 0., 0., 0.\n pos_sen, correct_pos_sen = 0., 0.\n for words, labels in self.utils.minibatches(test, self.batch_size):\n if pos:\n labels_pred, sequence_lengths = self.predict_batch_pos(words)\n else:\n labels_pred, sequence_lengths = self.predict_batch_ner(words)\n for lab, lab_pred, length in zip(labels, labels_pred,\n sequence_lengths):\n lab = lab[:length]\n lab_pred = lab_pred[:length]\n acc = [a == b for (a, b) in zip(lab, lab_pred)]\n accs += acc\n if pos:\n pos_sen +=1\n if len(lab) == acc.count(1):\n correct_pos_sen += 1\n else:\n lab_chunks = set(self.utils.get_chunks(lab, inv_classes))\n lab_pred_chunks = set(self.utils.get_chunks(lab_pred,\n inv_classes))\n correct_preds += len(lab_chunks & lab_pred_chunks)\n total_preds += len(lab_pred_chunks)\n total_correct += len(lab_chunks)\n\n p = correct_preds / total_preds if correct_preds > 0 else 0\n r = correct_preds / total_correct if correct_preds > 0 else 0\n f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0\n acc = np.mean(accs)\n whole_sen = correct_pos_sen/pos_sen if pos else 0\n # set self.acc for Tensorboard visualization\n return {\n \"acc\": 100 * acc\n ,\n \"f1\": f1,\n \"whole_sen\": whole_sen\n }\n\n\n def save_session(self):\n \"\"\"Saves session = weights\"\"\"\n if not os.path.exists(self.dir_model):\n os.makedirs(self.dir_model)\n self.saver.save(self.sess, self.dir_model)\n\n def restore_session(self):\n \"\"\"Restores session after saving for further training\"\"\"\n self.saver.restore(self.sess, self.dir_model)\n"
] |
[
[
"tensorflow.get_variable",
"tensorflow.variables_initializer",
"tensorflow.nn.bidirectional_dynamic_rnn",
"numpy.mean",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.boolean_mask",
"tensorflow.Variable",
"tensorflow.train.MomentumOptimizer",
"tensorflow.add",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.argmax",
"tensorflow.nn.dropout",
"tensorflow.matmul",
"tensorflow.train.AdagradOptimizer",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.shape",
"tensorflow.zeros_initializer",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.summary.merge_all",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.sequence_mask",
"tensorflow.nn.embedding_lookup",
"tensorflow.summary.FileWriter",
"tensorflow.contrib.framework.get_variables",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.clip_by_global_norm",
"tensorflow.contrib.rnn.LSTMCell",
"tensorflow.variable_scope"
]
] |
minatoyuichiro/Blueqat
|
[
"1be0150ca48bf40527936561d1bf4687dbf435b4",
"6c5f26b377bc3ce0d02adec8b9132d70870b3d95"
] |
[
"blueqat/backends/draw_backend.py",
"blueqat/backends/numpy_backend.py"
] |
[
"from .backendbase import Backend\nfrom ..circuit import Circuit\nfrom ..gate import *\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\nclass DrawCircuit(Backend):\n \"\"\"Backend for draw output.\"\"\"\n \n def _preprocess_run(self, gates, n_qubits, args, kwargs):\n qlist = {}\n flg = 0\n time = 0\n add_edge = []\n remove_edge = []\n\n for i in range(n_qubits):\n qlist[i] = [{'num': flg, 'gate': 'q'+str(i), 'angle': '', 'xpos': 0, 'ypos': i, 'type': 'qubit'}]\n flg += 1\n \n time += 1\n return gates, (qlist, n_qubits, [flg], [time], add_edge, remove_edge)\n\n def _postprocess_run(self, ctx):\n #color_code\n color_gate = {}\n color_gate['X'] = color_gate['Y'] = color_gate['Z'] = '#0BB0E2'\n color_gate['RX'] = color_gate['RY'] = color_gate['RZ'] = '#FCD000'\n color_gate['H'] = color_gate['T'] = color_gate['S'] = '#E6000A'\n color_gate['M'] = 'white'\n \n qlist = ctx[0]\n n_qubits = ctx[1]\n flg = ctx[2][-1]\n time = ctx[3][-1]\n \n #measurement\n for i in range(n_qubits):\n qlist[i].append({'num': flg, 'gate': 'M', 'angle': '', 'xpos': 30, 'ypos': i + math.floor((time-1)/30)*(n_qubits+1), 'type': 'measurement'})\n flg += 1\n \n G = nx.Graph()\n\n for i in range(n_qubits):\n for j in range(len(qlist[i])-1):\n G.add_edge(qlist[i][j]['num'], qlist[i][j+1]['num'])\n \n #twoqubit connections\n for item in ctx[4]:\n G.add_edge(item[0], item[1])\n\n for item in ctx[5]:\n G.remove_edge(item[0], item[1])\n\n #image size\n plt.figure(1, figsize=(30, (n_qubits+1)*(math.floor(time/30)+1)), dpi=60)\n\n labels = {}\n colors = {}\n angles = {}\n sizes = {}\n\n for i in range(n_qubits):\n for j in range(len(qlist[i])):\n angles[qlist[i][j]['num']] = qlist[i][j]['angle']\n labels[qlist[i][j]['num']] = qlist[i][j]['gate']\n sizes[qlist[i][j]['num']] = 1200\n if qlist[i][j]['type'] == 'dummy':\n colors[qlist[i][j]['num']] = 'white'\n sizes[qlist[i][j]['num']] = 0\n elif qlist[i][j]['gate'] == '' or qlist[i][j]['gate'] == 'CZ':\n colors[qlist[i][j]['num']] = 'black'\n sizes[qlist[i][j]['num']] = 100\n elif qlist[i][j]['type'] == 'qubit':\n colors[qlist[i][j]['num']] = 'white'\n else:\n colors[qlist[i][j]['num']] = color_gate[qlist[i][j]['gate']]\n\n \n #position\n pos = {}\n for i in range(n_qubits):\n for j in range(len(qlist[i])):\n pos[qlist[i][j]['num']] = (qlist[i][j]['xpos'], (n_qubits+1)*(math.floor(time/30)+1) - qlist[i][j]['ypos'])\n\n #dummy qubit just for top and bottom margin\n labels[flg]= ''\n colors[flg] = 'black'\n sizes[flg] = 0\n pos[flg] = (0, (n_qubits+1)*(math.floor(time/30)+1)+1)\n G.add_node(flg)\n labels[flg+1]= ''\n colors[flg+1] = 'black'\n sizes[flg+1] = 0\n pos[flg+1] = (0, 1)\n G.add_node(flg+1)\n \n nx.set_node_attributes(G, labels, 'label')\n nx.set_node_attributes(G, colors, 'color')\n nx.set_node_attributes(G, angles, 'angle')\n nx.set_node_attributes(G, sizes, 'size')\n\n options = {\n \"font_size\": 12,\n \"edgecolors\": \"black\",\n \"linewidths\": 2,\n \"width\": 2,\n }\n\n node_labels = nx.get_node_attributes(G, 'label')\n node_colors = [colors[i] for i in nx.get_node_attributes(G, 'color')]\n node_sizes = [sizes[i] for i in nx.get_node_attributes(G, 'size')]\n nx.draw_networkx(G, pos, labels = node_labels, node_color = node_colors, node_size = node_sizes, **options)\n\n #label positions for angles\n pos_attrs = pos.copy()\n for i in pos_attrs:\n pos_attrs[i] = (pos_attrs[i][0]+0.4, pos_attrs[i][1]-0.4)\n \n node_attrs = nx.get_node_attributes(G, 'angle')\n custom_node_attrs = {}\n\n for node, attr in node_attrs.items():\n custom_node_attrs[node] = attr\n\n nx.draw_networkx_labels(G, pos_attrs, labels = custom_node_attrs, font_size=9)\n #plt.axis('off')\n plt.show()\n \n return \n\n def _one_qubit_gate_noargs(self, gate, ctx):\n flg = ctx[2][-1]\n time = ctx[3][-1]\n qlist = ctx[0]\n \n time_adjust = time%30\n if time_adjust == 0:\n for i in range(ctx[1]):\n ypos_adjust = i + (math.floor(time/30)-1)*(ctx[1]+1)\n qlist[i].append({'num': flg, 'gate': '', 'angle': '', 'xpos': 30, 'ypos': ypos_adjust, 'type': 'dummy'})\n flg += 1\n time += 1\n \n for i in range(ctx[1]):\n ypos_adjust = i + math.floor(time/30)*(ctx[1]+1)\n qlist[i].append({'num': flg, 'gate': '', 'angle': '', 'xpos': 0, 'ypos': ypos_adjust, 'type': 'dummy'})\n flg += 1\n ctx[5].append((flg-1, flg-1-ctx[1]))\n \n time_adjust = time%30\n for idx in gate.target_iter(ctx[1]):\n ypos_adjust = idx + math.floor(time/30)*(ctx[1]+1)\n qlist[idx].append({'num': flg, 'gate': gate.lowername.upper(), 'angle': '', 'xpos': time_adjust, 'ypos': ypos_adjust, 'type': 'gate'})\n flg += 1\n ctx[2].append(flg)\n ctx[3].append(time+1)\n return ctx\n\n gate_x = _one_qubit_gate_noargs\n gate_y = _one_qubit_gate_noargs\n gate_z = _one_qubit_gate_noargs\n gate_h = _one_qubit_gate_noargs\n gate_t = _one_qubit_gate_noargs\n gate_s = _one_qubit_gate_noargs\n \n def _one_qubit_gate_args_theta(self, gate, ctx):\n flg = ctx[2][-1]\n time = ctx[3][-1]\n qlist = ctx[0]\n \n time_adjust = time%30\n if time_adjust == 0:\n for i in range(ctx[1]):\n ypos_adjust = i + (math.floor(time/30)-1)*(ctx[1]+1)\n qlist[i].append({'num': flg, 'gate': '', 'angle': '', 'xpos': 30, 'ypos': ypos_adjust, 'type': 'dummy'})\n flg += 1\n time += 1\n \n for i in range(ctx[1]):\n ypos_adjust = i + math.floor(time/30)*(ctx[1]+1)\n qlist[i].append({'num': flg, 'gate': '', 'angle': '', 'xpos': 0, 'ypos': ypos_adjust, 'type': 'dummy'})\n flg += 1\n ctx[5].append((flg-1, flg-1-ctx[1]))\n \n time_adjust = time%30\n for idx in gate.target_iter(ctx[1]):\n ypos_adjust = idx + math.floor(time/30)*(ctx[1]+1)\n qlist[idx].append({'num': flg, 'gate': gate.lowername.upper(), 'angle': round(gate.theta, 2), 'xpos': time_adjust, 'ypos': ypos_adjust, 'type': 'gate'})\n flg += 1\n ctx[2].append(flg)\n ctx[3].append(time+1)\n return ctx\n\n gate_rx = _one_qubit_gate_args_theta\n gate_ry = _one_qubit_gate_args_theta\n gate_rz = _one_qubit_gate_args_theta\n gate_phase = _one_qubit_gate_args_theta\n\n def gate_i(self, gate, ctx):\n time = ctx[3][-1]\n ctx[3].append(time+1)\n return ctx\n \n def _two_qubit_gate_noargs(self, gate, ctx):\n flg = ctx[2][-1]\n time = ctx[3][-1]\n qlist = ctx[0]\n \n tg = ''\n if gate.lowername == 'cx':\n tg = 'x'\n elif gate.lowername == 'cy':\n tg = 'y'\n elif gate.lowername == 'cz':\n tg = 'z'\n\n time_adjust = time%30\n if time_adjust == 0:\n for i in range(ctx[1]):\n ypos_adjust = i + (math.floor(time/30)-1)*(ctx[1]+1)\n qlist[i].append({'num': flg, 'gate': '', 'angle': '', 'xpos': 30, 'ypos': ypos_adjust, 'type': 'dummy'})\n flg += 1\n time += 1\n \n for i in range(ctx[1]):\n ypos_adjust = i + math.floor(time/30)*(ctx[1]+1)\n qlist[i].append({'num': flg, 'gate': '', 'angle': '', 'xpos': 0, 'ypos': ypos_adjust, 'type': 'dummy'})\n flg += 1\n ctx[5].append((flg-1, flg-1-ctx[1]))\n\n time_adjust = time%30 \n for control, target in gate.control_target_iter(ctx[1]):\n qlist[target].append({'num': flg, 'gate': tg.upper(), 'angle': '', 'xpos': time_adjust, 'ypos': target + math.floor(time/30)*(ctx[1]+1), 'type': 'gate'})\n flg += 1\n qlist[control].append({'num': flg, 'gate': '', 'angle': '', 'xpos': time_adjust, 'ypos': control + math.floor(time/30)*(ctx[1]+1), 'type': 'gate'})\n flg += 1\n ctx[4].append((flg-2, flg-1))\n ctx[2].append(flg)\n ctx[3].append(time+1)\n return ctx\n \n gate_cx = gate_cy = gate_cz = _two_qubit_gate_noargs\n\n def _three_qubit_gate_noargs(self, gate, ctx):\n return ctx\n\n gate_ccx = _three_qubit_gate_noargs\n gate_cswap = _three_qubit_gate_noargs\n\n def gate_measure(self, gate, ctx):\n return ctx\n\n gate_reset = _one_qubit_gate_noargs",
"# Copyright 2019 The Blueqat Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import Counter\nimport typing\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, cast\nimport math\nimport cmath\nimport random\nimport warnings\n\nimport numpy as np\n\nfrom ..gate import *\nfrom ..utils import ignore_global_phase\nfrom .backendbase import Backend\n\nDEFAULT_DTYPE = complex\n\nOPS_DISABLE_CTX_CACHE = (Measurement, Reset)\n\nDEFAULT_SHOTS: int = 1024\n\n\nclass _NumPyBackendContext:\n \"\"\"This class is internally used in NumPyBackend\"\"\"\n def __init__(self,\n n_qubits: int,\n cache: Optional[np.ndarray] = None,\n cache_idx: int = -1) -> None:\n self.n_qubits = n_qubits\n self.qubits = np.zeros(2**n_qubits, dtype=DEFAULT_DTYPE)\n self.qubits_buf = np.zeros(2**n_qubits, dtype=DEFAULT_DTYPE)\n self.indices = np.arange(2**n_qubits, dtype=np.uint32)\n self.save_ctx_cache = True\n self.cache = cache\n self.cache_idx = cache_idx\n self.shots_result = Counter()\n self.cregs = [0] * self.n_qubits\n\n def prepare(self, initial: Optional[np.ndarray]) -> None:\n \"\"\"Prepare to run next shot.\"\"\"\n if self.cache is not None:\n np.copyto(self.qubits, self.cache)\n elif initial is not None:\n np.copyto(self.qubits, initial)\n else:\n self.qubits.fill(0.0)\n self.qubits[0] = 1.0\n self.cregs = [0] * self.n_qubits\n self.sample = {}\n\n def store_shot(self) -> None:\n \"\"\"Store current cregs to shots_result\"\"\"\n def to_str(cregs: List[int]) -> str:\n return ''.join(str(b) for b in cregs)\n\n key = to_str(self.cregs)\n self.shots_result[key] = self.shots_result.get(key, 0) + 1\n\n\nclass _NumPyBackendOperations:\n @staticmethod\n def gate_x(gate: XGate, ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of X gate.\"\"\"\n qubits = ctx.qubits\n newq = ctx.qubits_buf\n n_qubits = ctx.n_qubits\n i = ctx.indices\n for target in gate.target_iter(n_qubits):\n t0 = (i & (1 << target)) == 0\n t1 = (i & (1 << target)) != 0\n newq[t0] = qubits[t1]\n newq[t1] = qubits[t0]\n qubits, newq = newq, qubits\n ctx.qubits = qubits\n ctx.qubits_buf = newq\n return ctx\n\n @staticmethod\n def gate_y(gate: YGate, ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of Y gate.\"\"\"\n qubits = ctx.qubits\n newq = ctx.qubits_buf\n n_qubits = ctx.n_qubits\n i = ctx.indices\n for target in gate.target_iter(n_qubits):\n t0 = (i & (1 << target)) == 0\n t1 = (i & (1 << target)) != 0\n newq[t0] = -1.0j * qubits[t1]\n newq[t1] = 1.0j * qubits[t0]\n qubits, newq = newq, qubits\n ctx.qubits = qubits\n ctx.qubits_buf = newq\n return ctx\n\n @staticmethod\n def gate_z(gate: ZGate, ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of Z gate.\"\"\"\n qubits = ctx.qubits\n n_qubits = ctx.n_qubits\n i = ctx.indices\n for target in gate.target_iter(n_qubits):\n qubits[(i & (1 << target)) != 0] *= -1\n return ctx\n\n @staticmethod\n def gate_h(gate: HGate, ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of H gate.\"\"\"\n qubits = ctx.qubits\n newq = ctx.qubits_buf\n n_qubits = ctx.n_qubits\n i = ctx.indices\n for target in gate.target_iter(n_qubits):\n t0 = (i & (1 << target)) == 0\n t1 = (i & (1 << target)) != 0\n newq[t0] = qubits[t0] + qubits[t1]\n newq[t1] = qubits[t0] - qubits[t1]\n newq *= 1.0 / math.sqrt(2)\n qubits, newq = newq, qubits\n ctx.qubits = qubits\n ctx.qubits_buf = newq\n return ctx\n\n @staticmethod\n def gate_rx(gate: RXGate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of RX gate.\"\"\"\n qubits = ctx.qubits\n newq = ctx.qubits_buf\n n_qubits = ctx.n_qubits\n i = ctx.indices\n halftheta = gate.theta * 0.5\n a00 = a11 = math.cos(halftheta)\n a01 = a10 = -1j * math.sin(halftheta)\n for target in gate.target_iter(n_qubits):\n t0 = (i & (1 << target)) == 0\n t1 = (i & (1 << target)) != 0\n newq[t0] = a00 * qubits[t0] + a01 * qubits[t1]\n newq[t1] = a10 * qubits[t0] + a11 * qubits[t1]\n qubits, newq = newq, qubits\n ctx.qubits = qubits\n ctx.qubits_buf = newq\n return ctx\n\n @staticmethod\n def gate_ry(gate: RYGate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of RY gate.\"\"\"\n qubits = ctx.qubits\n newq = ctx.qubits_buf\n n_qubits = ctx.n_qubits\n i = ctx.indices\n halftheta = gate.theta * 0.5\n a00 = a11 = math.cos(halftheta)\n a10 = math.sin(halftheta)\n a01 = -a10\n for target in gate.target_iter(n_qubits):\n t0 = (i & (1 << target)) == 0\n t1 = (i & (1 << target)) != 0\n newq[t0] = a00 * qubits[t0] + a01 * qubits[t1]\n newq[t1] = a10 * qubits[t0] + a11 * qubits[t1]\n qubits, newq = newq, qubits\n ctx.qubits = qubits\n ctx.qubits_buf = newq\n return ctx\n\n @staticmethod\n def gate_rz(gate: RZGate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of RZ gate.\"\"\"\n qubits = ctx.qubits\n n_qubits = ctx.n_qubits\n i = ctx.indices\n halftheta = gate.theta * 0.5\n a0 = complex(math.cos(halftheta), -math.sin(halftheta))\n a1 = complex(math.cos(halftheta), math.sin(halftheta))\n for target in gate.target_iter(n_qubits):\n qubits[(i & (1 << target)) == 0] *= a0\n qubits[(i & (1 << target)) != 0] *= a1\n return ctx\n\n @staticmethod\n def gate_phase(gate: PhaseGate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of Phase gate.\"\"\"\n qubits = ctx.qubits\n n_qubits = ctx.n_qubits\n i = ctx.indices\n theta = gate.theta\n a = complex(math.cos(theta), math.sin(theta))\n for target in gate.target_iter(n_qubits):\n qubits[(i & (1 << target)) != 0] *= a\n return ctx\n\n @staticmethod\n def gate_t(gate: PhaseGate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of T gate.\"\"\"\n qubits = ctx.qubits\n n_qubits = ctx.n_qubits\n i = ctx.indices\n\n sqrt2_inv = 1 / math.sqrt(2)\n factor = complex(sqrt2_inv, sqrt2_inv)\n for target in gate.target_iter(n_qubits):\n qubits[(i & (1 << target)) != 0] *= factor\n return ctx\n\n @staticmethod\n def gate_s(gate: SGate, ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of S gate.\"\"\"\n qubits = ctx.qubits\n n_qubits = ctx.n_qubits\n i = ctx.indices\n for target in gate.target_iter(n_qubits):\n qubits[(i & (1 << target)) != 0] *= 1.j\n return ctx\n\n @staticmethod\n def gate_cz(gate: CZGate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of CZ gate.\"\"\"\n qubits = ctx.qubits\n n_qubits = ctx.n_qubits\n i = ctx.indices\n for control, target in gate.control_target_iter(n_qubits):\n qubits[((i & (1 << control)) != 0)\n & ((i & (1 << target)) != 0)] *= -1\n return ctx\n\n @staticmethod\n def gate_cx(gate: CXGate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of CX gate.\"\"\"\n qubits = ctx.qubits\n newq = ctx.qubits_buf\n n_qubits = ctx.n_qubits\n i = ctx.indices\n for control, target in gate.control_target_iter(n_qubits):\n np.copyto(newq, qubits)\n c1 = (i & (1 << control)) != 0\n t0 = (i & (1 << target)) == 0\n t1 = (i & (1 << target)) != 0\n newq[c1 & t0] = qubits[c1 & t1]\n newq[c1 & t1] = qubits[c1 & t0]\n qubits, newq = newq, qubits\n ctx.qubits = qubits\n ctx.qubits_buf = newq\n return ctx\n\n @staticmethod\n def gate_cu(gate: CUGate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of CU gate.\"\"\"\n qubits = ctx.qubits\n newq = ctx.qubits_buf\n n_qubits = ctx.n_qubits\n i = ctx.indices\n theta = gate.theta\n phi = gate.phi\n lam = gate.lam\n gamma = gate.gamma\n globalphase = cmath.exp(1j * gamma)\n a00 = math.cos(theta * 0.5) * globalphase\n a11 = a00 * cmath.exp(1j * (phi + lam))\n a01 = a10 = math.sin(theta * 0.5) * globalphase\n a01 *= -cmath.exp(1j * lam)\n a10 *= cmath.exp(1j * phi)\n for control, target in gate.control_target_iter(n_qubits):\n np.copyto(newq, qubits)\n c1 = (i & (1 << control)) != 0\n c1t0 = ((i & (1 << target)) == 0) & c1\n c1t1 = ((i & (1 << target)) != 0) & c1\n newq[c1t0] = a00 * qubits[c1t0] + a01 * qubits[c1t1]\n newq[c1t1] = a10 * qubits[c1t0] + a11 * qubits[c1t1]\n qubits, newq = newq, qubits\n ctx.qubits = qubits\n ctx.qubits_buf = newq\n return ctx\n\n @staticmethod\n def gate_crx(gate: CRXGate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of CRX gate.\"\"\"\n qubits = ctx.qubits\n newq = ctx.qubits_buf\n n_qubits = ctx.n_qubits\n i = ctx.indices\n halftheta = gate.theta * 0.5\n a00 = a11 = math.cos(halftheta)\n a01 = a10 = -1j * math.sin(halftheta)\n for control, target in gate.control_target_iter(n_qubits):\n np.copyto(newq, qubits)\n c1 = (i & (1 << control)) != 0\n c1t0 = ((i & (1 << target)) == 0) & c1\n c1t1 = ((i & (1 << target)) != 0) & c1\n newq[c1t0] = a00 * qubits[c1t0] + a01 * qubits[c1t1]\n newq[c1t1] = a10 * qubits[c1t0] + a11 * qubits[c1t1]\n qubits, newq = newq, qubits\n ctx.qubits = qubits\n ctx.qubits_buf = newq\n return ctx\n\n @staticmethod\n def gate_cry(gate: CRYGate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of CRY gate.\"\"\"\n qubits = ctx.qubits\n newq = ctx.qubits_buf\n n_qubits = ctx.n_qubits\n i = ctx.indices\n halftheta = gate.theta * 0.5\n a00 = a11 = math.cos(halftheta)\n a10 = math.sin(halftheta)\n a01 = -a10\n for control, target in gate.control_target_iter(n_qubits):\n np.copyto(newq, qubits)\n c1 = (i & (1 << control)) != 0\n c1t0 = ((i & (1 << target)) == 0) & c1\n c1t1 = ((i & (1 << target)) != 0) & c1\n newq[c1t0] = a00 * qubits[c1t0] + a01 * qubits[c1t1]\n newq[c1t1] = a10 * qubits[c1t0] + a11 * qubits[c1t1]\n qubits, newq = newq, qubits\n ctx.qubits = qubits\n ctx.qubits_buf = newq\n return ctx\n\n @staticmethod\n def gate_crz(gate: CRZGate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of CRZ gate.\"\"\"\n qubits = ctx.qubits\n n_qubits = ctx.n_qubits\n i = ctx.indices\n halftheta = gate.theta * 0.5\n a0 = complex(math.cos(halftheta), -math.sin(halftheta))\n a1 = complex(math.cos(halftheta), math.sin(halftheta))\n for control, target in gate.control_target_iter(n_qubits):\n c1t0 = ((i & (1 << control)) != 0) & ((i & (1 << target)) == 0)\n c1t1 = ((i & (1 << control)) != 0) & ((i & (1 << target)) != 0)\n qubits[c1t0] *= a0\n qubits[c1t1] *= a1\n return ctx\n\n @staticmethod\n def gate_cphase(gate: CPhaseGate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of CPhase gate.\"\"\"\n qubits = ctx.qubits\n n_qubits = ctx.n_qubits\n i = ctx.indices\n theta = gate.theta\n a = complex(math.cos(theta), math.sin(theta))\n for control, target in gate.control_target_iter(n_qubits):\n c1t1 = ((i & (1 << control)) != 0) & ((i & (1 << target)) != 0)\n qubits[c1t1] *= a\n return ctx\n\n @staticmethod\n def gate_ccz(gate: CCZGate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of CCZ gate.\"\"\"\n c1, c2, t = cast(Tuple[int, int, int], gate.targets)\n qubits = ctx.qubits\n i = ctx.indices\n indices = (i & (1 << c1)) != 0\n indices &= (i & (1 << c2)) != 0\n indices &= (i & (1 << t)) != 0\n qubits[indices] *= -1\n return ctx\n\n @staticmethod\n def gate_ccx(gate: ToffoliGate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of Toffoli gate.\"\"\"\n _, _, t = cast(Tuple[int, int, int], gate.targets)\n ctx = _NumPyBackendOperations.gate_h(HGate(t), ctx)\n ctx = _NumPyBackendOperations.gate_ccz(CCZGate(gate.targets), ctx)\n ctx = _NumPyBackendOperations.gate_h(HGate(t), ctx)\n return ctx\n\n @staticmethod\n def gate_u(gate: UGate, ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of U gate.\"\"\"\n qubits = ctx.qubits\n newq = ctx.qubits_buf\n n_qubits = ctx.n_qubits\n i = ctx.indices\n theta = gate.theta\n phi = gate.phi\n lam = gate.lam\n gamma = gate.gamma\n globalphase = cmath.exp(1j * gamma)\n a00 = math.cos(theta * 0.5) * globalphase\n a11 = a00 * cmath.exp(1j * (phi + lam))\n a01 = a10 = math.sin(theta * 0.5) * globalphase\n a01 *= -cmath.exp(1j * lam)\n a10 *= cmath.exp(1j * phi)\n for target in gate.target_iter(n_qubits):\n np.copyto(newq, qubits)\n t0 = (i & (1 << target)) == 0\n t1 = (i & (1 << target)) != 0\n newq[t0] = qubits[t0] * a00\n newq[t0] += qubits[t1] * a01\n newq[t1] = qubits[t0] * a10\n newq[t1] += qubits[t1] * a11\n qubits, newq = newq, qubits\n ctx.qubits = qubits\n ctx.qubits_buf = newq\n return ctx\n\n @staticmethod\n def gate_mat1(gate: Mat1Gate,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of U3 gate.\"\"\"\n qubits = ctx.qubits\n newq = ctx.qubits_buf\n n_qubits = ctx.n_qubits\n i = ctx.indices\n mat = gate.mat\n for target in gate.target_iter(n_qubits):\n np.copyto(newq, qubits)\n t0 = (i & (1 << target)) == 0\n t1 = (i & (1 << target)) != 0\n newq[t0] = mat[0, 0] * qubits[t0]\n newq[t0] += mat[0, 1] * qubits[t1]\n newq[t1] = mat[1, 0] * qubits[t0]\n newq[t1] += mat[1, 1] * qubits[t1]\n qubits, newq = newq, qubits\n ctx.qubits = qubits\n ctx.qubits_buf = newq\n return ctx\n\n @staticmethod\n def gate_measure(gate: Measurement,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of measurement operation.\"\"\"\n qubits = ctx.qubits\n n_qubits = ctx.n_qubits\n i = ctx.indices\n measured = []\n for target in gate.target_iter(n_qubits):\n p_zero = np.linalg.norm(qubits[(i & (1 << target)) == 0])**2\n rand = random.random()\n if rand < p_zero:\n qubits[(i & (1 << target)) != 0] = 0.0\n qubits /= math.sqrt(p_zero)\n ctx.cregs[target] = 0\n measured.append(0)\n else:\n qubits[(i & (1 << target)) == 0] = 0.0\n qubits /= math.sqrt(1.0 - p_zero)\n ctx.cregs[target] = 1\n measured.append(1)\n if gate.key is not None:\n if gate.key in ctx.sample:\n if gate.duplicated == \"replace\":\n ctx.sample[gate.key] = measured\n elif gate.duplicated == \"append\":\n ctx.sample[gate.key] += measured\n else:\n raise ValueError(\"Measurement key is duplicated.\")\n else:\n ctx.sample[gate.key] = measured\n return ctx\n\n @staticmethod\n def gate_reset(gate: Reset,\n ctx: _NumPyBackendContext) -> _NumPyBackendContext:\n \"\"\"Implementation of measurement operation.\"\"\"\n qubits = ctx.qubits\n n_qubits = ctx.n_qubits\n i = ctx.indices\n for target in gate.target_iter(n_qubits):\n p_zero = np.linalg.norm(qubits[(i & (1 << target)) == 0])**2\n rand = random.random()\n t1 = (i & (1 << target)) != 0\n if rand < p_zero:\n qubits[t1] = 0.0\n qubits /= math.sqrt(p_zero)\n else:\n qubits[(i & (1 << target)) == 0] = qubits[t1]\n qubits[t1] = 0.0\n qubits /= math.sqrt(1.0 - p_zero)\n return ctx\n\n\ndef _check_and_transform_initial(initial: np.ndarray,\n n_qubits: int) -> np.ndarray:\n \"\"\"Check the shape and transform the type of initial.\"\"\"\n if not isinstance(initial, np.ndarray):\n raise ValueError(\n f\"`initial` must be a np.ndarray, but {type(initial)}\")\n if initial.shape != (2**n_qubits, ):\n raise ValueError(\n f\"`initial.shape` is not matched. Expected: {(2**n_qubits,)}, Actual: {initial.shape}\"\n )\n if initial.dtype != DEFAULT_DTYPE:\n initial = initial.astype(DEFAULT_DTYPE)\n return initial\n\n\nclass NumPyBackend(Backend):\n \"\"\"Simulator backend which uses numpy. This backend is Blueqat's default backend.\"\"\"\n __return_type: Dict[str, Callable[[_NumPyBackendContext], Any]] = {\n \"statevector\": lambda ctx: ctx.qubits,\n \"shots\": lambda ctx: ctx.shots_result,\n \"statevector_and_shots\": lambda ctx: (ctx.qubits, ctx.shots_result),\n \"_inner_ctx\": lambda ctx: ctx,\n \"samples\": lambda _: _, # dummy\n }\n\n def __init__(self) -> None:\n self.cache = None\n self.cache_idx = -1\n\n def __clear_cache(self) -> None:\n self.cache = None\n self.cache_idx = -1\n\n def __clear_cache_if_invalid(self, n_qubits: int, dtype: type) -> None:\n if self.cache is None:\n self.__clear_cache()\n return\n if len(self.cache) != 2**n_qubits:\n self.__clear_cache()\n return\n if self.cache.dtype != dtype:\n self.__clear_cache()\n return\n\n @staticmethod\n def _run_inner(ctx: _NumPyBackendContext, gates: List[Operation],\n n_qubits: int,\n initial: Optional[np.ndarray]) -> _NumPyBackendContext:\n def run_single_gate(ctx: _NumPyBackendContext,\n gate: Operation) -> _NumPyBackendContext:\n try:\n action = getattr(_NumPyBackendOperations,\n 'gate_' + gate.lowername)\n ctx = action(gate, ctx)\n except AttributeError:\n if isinstance(gate, IFallbackOperation):\n for g in gate.fallback(n_qubits):\n ctx = run_single_gate(ctx, g)\n else:\n raise ValueError(f\"Unknown operation {gate.lowername}.\") # pylint: disable=raise-missing-from\n return ctx\n\n ctx.prepare(initial)\n for i, gate in enumerate(gates[ctx.cache_idx + 1:]):\n if isinstance(gate, OPS_DISABLE_CTX_CACHE):\n if ctx.save_ctx_cache:\n ctx.cache = ctx.qubits.copy()\n ctx.cache_idx += i\n ctx.save_ctx_cache = False\n ctx = run_single_gate(ctx, gate)\n return ctx\n\n def run(self,\n gates: List[Operation],\n n_qubits,\n shots: Optional[int] = None,\n initial: Optional[np.ndarray] = None,\n returns: Optional[str] = None,\n ignore_global: bool = False,\n save_cache: bool = False,\n **kwargs) -> Any:\n def __parse_run_args(shots: Optional[int],\n returns: Optional[str]) -> Tuple[int, str]:\n if returns is None:\n if shots is None:\n returns = \"statevector\"\n else:\n returns = \"shots\"\n if returns not in self.__return_type.keys():\n raise ValueError(f\"Unknown returns type '{returns}'\")\n if shots is None:\n if returns in (\"statevector\", \"_inner_ctx\"):\n shots = 1\n else:\n shots = DEFAULT_SHOTS\n if returns == \"statevector\" and shots > 1:\n shots = 1\n warnings.warn(\n \"When `returns` = 'statevector', `shots` is ignored.\")\n return shots, returns\n\n shots, returns = __parse_run_args(shots, returns)\n if kwargs:\n warnings.warn(f\"Unknown run arguments: {kwargs}\")\n if returns == \"samples\":\n # Remarks: This feature is experimental.\n return self.samples(gates, n_qubits, shots, initial)\n\n if initial is not None:\n initial = _check_and_transform_initial(initial, n_qubits)\n if save_cache:\n warnings.warn(\n \"When initial is not None, saving cache is disabled.\")\n save_cache = False\n self.__clear_cache()\n else:\n self.__clear_cache_if_invalid(n_qubits, DEFAULT_DTYPE)\n\n ctx = _NumPyBackendContext(n_qubits, self.cache, self.cache_idx)\n\n for _ in range(shots):\n ctx = NumPyBackend._run_inner(ctx, gates, n_qubits, initial)\n if ctx.cregs:\n ctx.store_shot()\n if save_cache:\n self.cache = ctx.cache\n self.cache_idx = ctx.cache_idx\n\n if ignore_global:\n ignore_global_phase(ctx.qubits)\n return self.__return_type[returns](ctx)\n\n @staticmethod\n def statevector(gates: List[Operation],\n n_qubits,\n initial: Optional[np.ndarray] = None) -> np.ndarray:\n if initial is not None:\n initial = _check_and_transform_initial(initial, n_qubits)\n ctx = _NumPyBackendContext(n_qubits)\n ctx = NumPyBackend._run_inner(ctx, gates, n_qubits, initial)\n return ctx.qubits\n\n @staticmethod\n def shots(gates: List[Operation],\n n_qubits,\n shots: int,\n initial: Optional[np.ndarray] = None) -> typing.Counter[str]:\n if initial is not None:\n initial = _check_and_transform_initial(initial, n_qubits)\n ctx = _NumPyBackendContext(n_qubits)\n for _ in range(shots):\n ctx = NumPyBackend._run_inner(ctx, gates, n_qubits, initial)\n if ctx.cregs:\n ctx.store_shot()\n return ctx.shots_result\n\n @staticmethod\n def oneshot(\n gates: List[Operation],\n n_qubits,\n initial: Optional[np.ndarray] = None) -> Tuple[np.ndarray, str]:\n if initial is not None:\n initial = _check_and_transform_initial(initial, n_qubits)\n ctx = _NumPyBackendContext(n_qubits)\n ctx = NumPyBackend._run_inner(ctx, gates, n_qubits, initial)\n if ctx.cregs:\n ctx.store_shot()\n return ctx.qubits, ctx.shots_result.most_common()[0][0]\n\n @staticmethod\n def samples(gates: List[Operation],\n n_qubits,\n shots: int,\n initial: Optional[np.ndarray] = None) -> List[Dict[str, List[int]]]:\n \"\"\"This feature is experimental.\"\"\"\n if initial is not None:\n initial = _check_and_transform_initial(initial, n_qubits)\n ctx = _NumPyBackendContext(n_qubits)\n samples = []\n for _ in range(shots):\n ctx = NumPyBackend._run_inner(ctx, gates, n_qubits, initial)\n samples.append(ctx.sample)\n return samples\n\n def make_cache(self, gates: List[Operation], n_qubits: int) -> None:\n self.run(gates, n_qubits, save_cache=True)\n"
] |
[
[
"matplotlib.pyplot.show"
],
[
"numpy.copyto",
"numpy.arange",
"numpy.zeros",
"numpy.linalg.norm"
]
] |
ucgmsim/Empirical_Engine
|
[
"fa990da352c5615bcaf300142fad6907024e917b"
] |
[
"empirical/GMM_models/AfshariStewart_2016_Ds.py"
] |
[
"\"\"\"\nAfshariStewart_2016_Ds.py\nJason Motha\n29/3/16\n\nProvides the ground motion prediction equation for Significant duration\ndefined at the time from 5-95% of the Arias intensity\n\nReference: Kempton JJ, Stewart JP. Prediction equations for significant duration\nof earthquake ground motions considering site and near-source effects. \nEarthquake Spectra 2006, 22(4), pp985-1013.\n\nThis implementation only considers the acceleration-based measures, Ds5-75\nand Ds5-95, i.e. the times between the 5% and 75%/95% of the arias\nintensity buildup.\n\nThe constant values in the lists below are arranged as:\n 1st value Ds575\n 2nd value Ds595\n 3rd value Ds2080\n\nInput Variables:\n \n R = Source-to-site distance (km) (closest distance)\n siteprop = properties of site (soil etc)\n Rrup - Source-to-site distance\n Vs30 - shear wave velocity of upper 30m\n z1.5 - depth to 1500m/s shear wave velocity (in m)\n defn =0 sign duration 5-75% arias intenstiy integral\n =1 sign duration 5-95% arias intensity integral (default)\n =2 sign duration 20-80% arias intensity integral\n faultprop = properties of fault\n Mw - Moment magnitude (Mw)\n\n\nOutput Variables:\n Ds = median Ds \n sigma_Ds = lognormal standard deviation in Ds\n %sigma_Ds(1) = total std\n %sigma_Ds(2) = interevent std\n %sigma_Ds(3) = intraevent std\n\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\"\"\"\nimport math\nimport numpy as np\n\nfrom empirical.util.classdef import FaultStyle\n\nbeta = 3.2\nMstar = 6.0\n\nM1 = [5.35, 5.2, 5.2]\nM2 = [7.15, 7.4, 7.4]\n\nb2 = [0.9011, 0.9443, 0.7414]\nb3 = [-1.684, -3.911, -3.164]\n\nc1 = [0.1159, 0.3165, 0.0646]\nc2 = [0.1065, 0.2539, 0.0865]\nc3 = [0.0682, 0.0932, 0.0373]\n\nc4 = [-0.2246, -0.3183, -0.4237]\nc5 = [0.0006, 0.0006, 0.0005]\n\nR1 = 10\nR2 = 50\n\nV1 = 600\nVref = [368.2, 369.9, 369.6]\nsigma_z1ref = 200\n\ntau1 = [0.28, 0.25, 0.30]\ntau2 = [0.25, 0.19, 0.19]\nphi1 = [0.54, 0.43, 0.56]\nphi2 = [0.41, 0.35, 0.45]\n\n\ndef Afshari_Stewart_2016_Ds(siteprop, faultprop, im):\n M = faultprop.Mw\n R = siteprop.Rrup\n v30 = siteprop.vs30\n Z1p0 = siteprop.z1p0\n\n if im == \"Ds575\":\n i = 0\n elif im == \"Ds595\":\n i = 1\n elif im == \"Ds2080\":\n i = 2\n else:\n print(\"Invalid IM specified\")\n exit()\n\n M0 = 10 ** (1.5 * M + 16.05)\n\n if faultprop.faultstyle == FaultStyle.NORMAL:\n b0 = [1.555, 2.541, 1.409]\n b1 = [4.992, 3.170, 4.778]\n elif faultprop.faultstyle == FaultStyle.REVERSE:\n b0 = [0.7806, 1.612, 0.7729]\n b1 = [7.061, 4.536, 6.579]\n elif faultprop.faultstyle == FaultStyle.STRIKESLIP:\n b0 = [1.279, 2.302, 0.8804]\n b1 = [5.578, 3.467, 6.188]\n else:\n b0 = [1.28, 2.182, 0.8822]\n b1 = [5.576, 3.628, 6.182]\n\n # stress_drop = math.exp(b1[i] + b2[i] * M-Mstar)\n if M <= M2[i]:\n delta_sigma = np.exp(b1[i] + b2[i] * (M - Mstar))\n else:\n delta_sigma = np.exp(b1[i] + b2[i] * (M2[i] - Mstar) + b3[i] * (M - M2[i]))\n\n f0 = 4.9 * 10 ** 6 * beta * (delta_sigma / M0) ** (1.0 / 3.0)\n\n if M > M1[i]:\n Fe = 1 / f0\n else:\n Fe = b0[i]\n\n if R <= R1:\n Fp = c1[i] * R\n elif R <= R2:\n Fp = c1[i] * R1 + c2[i] * (R - R1)\n else:\n Fp = c1[i] * R1 + c2[i] * (R2 - R1) + c3[i] * (R - R2)\n\n # Japan\n MuZ1 = np.exp(\n -5.23 / 2 * np.log((v30 ** 2 + 412.39 ** 2) / (1360 ** 2 + 412.39 ** 2))\n - np.log(1000)\n )\n # California\n MuZ1 = np.exp(\n -7.15 / 4 * np.log((v30 ** 4 + 570.94 ** 4) / (1360 ** 4 + 570.94 ** 4))\n - np.log(1000)\n )\n\n delta_z1 = Z1p0 - MuZ1\n\n # default value\n # delta_z1 = 0\n\n if delta_z1 <= sigma_z1ref:\n FsigmaZ1 = c5[i] * delta_z1\n else:\n FsigmaZ1 = c5[i] * sigma_z1ref\n\n if v30 <= V1:\n Fs = c4[i] * np.log(v30 / Vref[i]) + FsigmaZ1\n else:\n Fs = c4[i] * np.log(V1 / Vref[i]) + FsigmaZ1\n\n Ds = np.exp(np.log(Fe + Fp) + Fs)\n\n if M < 6.5:\n tau_M = tau1[i]\n elif M < 7:\n tau_M = tau1[i] + (tau2[i] - tau1[i]) * ((M - 6.5) / (7 - 6.5))\n else:\n tau_M = tau2[i]\n\n if M < 5.5:\n phi_M = phi1[i]\n elif M < 5.75:\n phi_M = phi1[i] + (phi2[i] - phi1[i]) * ((M - 5.5) / (5.75 - 5.5))\n else:\n phi_M = phi2[i]\n\n total_sigma_Ds = np.sqrt(tau_M ** 2 + phi_M ** 2)\n sigma_Ds = [total_sigma_Ds, tau_M, phi_M]\n\n return Ds, sigma_Ds\n"
] |
[
[
"numpy.log",
"numpy.exp",
"numpy.sqrt"
]
] |
TromsFylkestrafikk/models
|
[
"028eecfbe3e23a73228110bff8900e50c64a7bed"
] |
[
"research/object_detection/meta_architectures/center_net_meta_arch.py"
] |
[
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The CenterNet meta architecture as described in the \"Objects as Points\" paper [1].\n\n[1]: https://arxiv.org/abs/1904.07850\n\n\"\"\"\n\nimport abc\nimport collections\nimport functools\nimport tensorflow.compat.v1 as tf\nimport tensorflow.compat.v2 as tf2\n\nfrom object_detection.core import box_list\nfrom object_detection.core import box_list_ops\nfrom object_detection.core import keypoint_ops\nfrom object_detection.core import model\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.core import target_assigner as cn_assigner\nfrom object_detection.utils import shape_utils\nfrom object_detection.utils import target_assigner_utils as ta_utils\nfrom object_detection.utils import tf_version\n\n\n# Number of channels needed to predict size and offsets.\nNUM_OFFSET_CHANNELS = 2\nNUM_SIZE_CHANNELS = 2\n\n# Error range for detecting peaks.\nPEAK_EPSILON = 1e-6\n\n\nclass CenterNetFeatureExtractor(tf.keras.Model):\n \"\"\"Base class for feature extractors for the CenterNet meta architecture.\n\n Child classes are expected to override the _output_model property which will\n return 1 or more tensors predicted by the feature extractor.\n\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, name=None, channel_means=(0., 0., 0.),\n channel_stds=(1., 1., 1.), bgr_ordering=False):\n \"\"\"Initializes a CenterNet feature extractor.\n\n Args:\n name: str, the name used for the underlying keras model.\n channel_means: A tuple of floats, denoting the mean of each channel\n which will be subtracted from it. If None or empty, we use 0s.\n channel_stds: A tuple of floats, denoting the standard deviation of each\n channel. Each channel will be divided by its standard deviation value.\n If None or empty, we use 1s.\n bgr_ordering: bool, if set will change the channel ordering to be in the\n [blue, red, green] order.\n \"\"\"\n super(CenterNetFeatureExtractor, self).__init__(name=name)\n\n if channel_means is None or len(channel_means) == 0: # pylint:disable=g-explicit-length-test\n channel_means = [0., 0., 0.]\n\n if channel_stds is None or len(channel_stds) == 0: # pylint:disable=g-explicit-length-test\n channel_stds = [1., 1., 1.]\n\n self._channel_means = channel_means\n self._channel_stds = channel_stds\n self._bgr_ordering = bgr_ordering\n\n def preprocess(self, inputs):\n \"\"\"Converts a batch of unscaled images to a scale suitable for the model.\n\n This method normalizes the image using the given `channel_means` and\n `channels_stds` values at initialization time while optionally flipping\n the channel order if `bgr_ordering` is set.\n\n Args:\n inputs: a [batch, height, width, channels] float32 tensor\n\n Returns:\n outputs: a [batch, height, width, channels] float32 tensor\n\n \"\"\"\n\n if self._bgr_ordering:\n red, green, blue = tf.unstack(inputs, axis=3)\n inputs = tf.stack([blue, green, red], axis=3)\n\n channel_means = tf.reshape(tf.constant(self._channel_means),\n [1, 1, 1, -1])\n channel_stds = tf.reshape(tf.constant(self._channel_stds),\n [1, 1, 1, -1])\n\n return (inputs - channel_means)/channel_stds\n\n @property\n @abc.abstractmethod\n def out_stride(self):\n \"\"\"The stride in the output image of the network.\"\"\"\n pass\n\n @property\n @abc.abstractmethod\n def num_feature_outputs(self):\n \"\"\"Ther number of feature outputs returned by the feature extractor.\"\"\"\n pass\n\n @property\n def classification_backbone(self):\n raise NotImplementedError(\n 'Classification backbone not supported for {}'.format(type(self)))\n\n\ndef make_prediction_net(num_out_channels, kernel_sizes=(3), num_filters=(256),\n bias_fill=None, use_depthwise=False, name=None,\n unit_height_conv=True):\n \"\"\"Creates a network to predict the given number of output channels.\n\n This function is intended to make the prediction heads for the CenterNet\n meta architecture.\n\n Args:\n num_out_channels: Number of output channels.\n kernel_sizes: A list representing the sizes of the conv kernel in the\n intermediate layer. Note that the length of the list indicates the number\n of intermediate conv layers and it must be the same as the length of the\n num_filters.\n num_filters: A list representing the number of filters in the intermediate\n conv layer. Note that the length of the list indicates the number of\n intermediate conv layers.\n bias_fill: If not None, is used to initialize the bias in the final conv\n layer.\n use_depthwise: If true, use SeparableConv2D to construct the Sequential\n layers instead of Conv2D.\n name: Optional name for the prediction net.\n unit_height_conv: If True, Conv2Ds have asymmetric kernels with height=1.\n\n Returns:\n net: A keras module which when called on an input tensor of size\n [batch_size, height, width, num_in_channels] returns an output\n of size [batch_size, height, width, num_out_channels]\n \"\"\"\n if isinstance(kernel_sizes, int) and isinstance(num_filters, int):\n kernel_sizes = [kernel_sizes]\n num_filters = [num_filters]\n assert len(kernel_sizes) == len(num_filters)\n if use_depthwise:\n conv_fn = tf.keras.layers.SeparableConv2D\n else:\n conv_fn = tf.keras.layers.Conv2D\n\n # We name the convolution operations explicitly because Keras, by default,\n # uses different names during training and evaluation. By setting the names\n # here, we avoid unexpected pipeline breakage in TF1.\n out_conv = tf.keras.layers.Conv2D(\n num_out_channels,\n kernel_size=1,\n name='conv1' if tf_version.is_tf1() else None)\n\n if bias_fill is not None:\n out_conv.bias_initializer = tf.keras.initializers.constant(bias_fill)\n\n layers = []\n for idx, (kernel_size,\n num_filter) in enumerate(zip(kernel_sizes, num_filters)):\n layers.append(\n conv_fn(\n num_filter,\n kernel_size=[1, kernel_size] if unit_height_conv else kernel_size,\n padding='same',\n name='conv2_%d' % idx if tf_version.is_tf1() else None))\n layers.append(tf.keras.layers.ReLU())\n layers.append(out_conv)\n net = tf.keras.Sequential(layers, name=name)\n return net\n\n\ndef _to_float32(x):\n return tf.cast(x, tf.float32)\n\n\ndef _get_shape(tensor, num_dims):\n assert len(tensor.shape.as_list()) == num_dims\n return shape_utils.combined_static_and_dynamic_shape(tensor)\n\n\ndef _flatten_spatial_dimensions(batch_images):\n batch_size, height, width, channels = _get_shape(batch_images, 4)\n return tf.reshape(batch_images, [batch_size, height * width,\n channels])\n\n\ndef _multi_range(limit,\n value_repetitions=1,\n range_repetitions=1,\n dtype=tf.int32):\n \"\"\"Creates a sequence with optional value duplication and range repetition.\n\n As an example (see the Args section for more details),\n _multi_range(limit=2, value_repetitions=3, range_repetitions=4) returns:\n\n [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]\n\n Args:\n limit: A 0-D Tensor (scalar). Upper limit of sequence, exclusive.\n value_repetitions: Integer. The number of times a value in the sequence is\n repeated. With value_repetitions=3, the result is [0, 0, 0, 1, 1, 1, ..].\n range_repetitions: Integer. The number of times the range is repeated. With\n range_repetitions=3, the result is [0, 1, 2, .., 0, 1, 2, ..].\n dtype: The type of the elements of the resulting tensor.\n\n Returns:\n A 1-D tensor of type `dtype` and size\n [`limit` * `value_repetitions` * `range_repetitions`] that contains the\n specified range with given repetitions.\n \"\"\"\n return tf.reshape(\n tf.tile(\n tf.expand_dims(tf.range(limit, dtype=dtype), axis=-1),\n multiples=[range_repetitions, value_repetitions]), [-1])\n\n\ndef top_k_feature_map_locations(feature_map, max_pool_kernel_size=3, k=100,\n per_channel=False):\n \"\"\"Returns the top k scores and their locations in a feature map.\n\n Given a feature map, the top k values (based on activation) are returned. If\n `per_channel` is True, the top k values **per channel** are returned. Note\n that when k equals to 1, ths function uses reduce_max and argmax instead of\n top_k to make the logics more efficient.\n\n The `max_pool_kernel_size` argument allows for selecting local peaks in a\n region. This filtering is done per channel, so nothing prevents two values at\n the same location to be returned.\n\n Args:\n feature_map: [batch, height, width, channels] float32 feature map.\n max_pool_kernel_size: integer, the max pool kernel size to use to pull off\n peak score locations in a neighborhood (independently for each channel).\n For example, to make sure no two neighboring values (in the same channel)\n are returned, set max_pool_kernel_size=3. If None or 1, will not apply max\n pooling.\n k: The number of highest scoring locations to return.\n per_channel: If True, will return the top k scores and locations per\n feature map channel. If False, the top k across the entire feature map\n (height x width x channels) are returned.\n\n Returns:\n Tuple of\n scores: A [batch, N] float32 tensor with scores from the feature map in\n descending order. If per_channel is False, N = k. Otherwise,\n N = k * channels, and the first k elements correspond to channel 0, the\n second k correspond to channel 1, etc.\n y_indices: A [batch, N] int tensor with y indices of the top k feature map\n locations. If per_channel is False, N = k. Otherwise,\n N = k * channels.\n x_indices: A [batch, N] int tensor with x indices of the top k feature map\n locations. If per_channel is False, N = k. Otherwise,\n N = k * channels.\n channel_indices: A [batch, N] int tensor with channel indices of the top k\n feature map locations. If per_channel is False, N = k. Otherwise,\n N = k * channels.\n \"\"\"\n if not max_pool_kernel_size or max_pool_kernel_size == 1:\n feature_map_peaks = feature_map\n else:\n feature_map_max_pool = tf.nn.max_pool(\n feature_map, ksize=max_pool_kernel_size, strides=1, padding='SAME')\n\n feature_map_peak_mask = tf.math.abs(\n feature_map - feature_map_max_pool) < PEAK_EPSILON\n\n # Zero out everything that is not a peak.\n feature_map_peaks = (\n feature_map * _to_float32(feature_map_peak_mask))\n\n batch_size, _, width, num_channels = _get_shape(feature_map, 4)\n\n if per_channel:\n if k == 1:\n feature_map_flattened = tf.reshape(\n feature_map_peaks, [batch_size, -1, num_channels])\n scores = tf.math.reduce_max(feature_map_flattened, axis=1)\n peak_flat_indices = tf.math.argmax(\n feature_map_flattened, axis=1, output_type=tf.dtypes.int32)\n peak_flat_indices = tf.expand_dims(peak_flat_indices, axis=-1)\n else:\n # Perform top k over batch and channels.\n feature_map_peaks_transposed = tf.transpose(feature_map_peaks,\n perm=[0, 3, 1, 2])\n feature_map_peaks_transposed = tf.reshape(\n feature_map_peaks_transposed, [batch_size, num_channels, -1])\n scores, peak_flat_indices = tf.math.top_k(\n feature_map_peaks_transposed, k=k)\n # Convert the indices such that they represent the location in the full\n # (flattened) feature map of size [batch, height * width * channels].\n channel_idx = tf.range(num_channels)[tf.newaxis, :, tf.newaxis]\n peak_flat_indices = num_channels * peak_flat_indices + channel_idx\n scores = tf.reshape(scores, [batch_size, -1])\n peak_flat_indices = tf.reshape(peak_flat_indices, [batch_size, -1])\n else:\n if k == 1:\n feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1])\n scores = tf.math.reduce_max(feature_map_peaks_flat, axis=1, keepdims=True)\n peak_flat_indices = tf.expand_dims(tf.math.argmax(\n feature_map_peaks_flat, axis=1, output_type=tf.dtypes.int32), axis=-1)\n else:\n feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1])\n safe_k = tf.minimum(k, tf.shape(feature_map_peaks_flat)[1])\n scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_flat,\n k=safe_k)\n\n # Get x, y and channel indices corresponding to the top indices in the flat\n # array.\n y_indices, x_indices, channel_indices = (\n row_col_channel_indices_from_flattened_indices(\n peak_flat_indices, width, num_channels))\n return scores, y_indices, x_indices, channel_indices\n\n\ndef prediction_tensors_to_boxes(y_indices, x_indices, height_width_predictions,\n offset_predictions):\n \"\"\"Converts CenterNet class-center, offset and size predictions to boxes.\n\n Args:\n y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to\n object center locations (expressed in output coordinate frame).\n x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to\n object center locations (expressed in output coordinate frame).\n height_width_predictions: A float tensor of shape [batch_size, height,\n width, 2] representing the height and width of a box centered at each\n pixel.\n offset_predictions: A float tensor of shape [batch_size, height, width, 2]\n representing the y and x offsets of a box centered at each pixel. This\n helps reduce the error from downsampling.\n\n Returns:\n detection_boxes: A tensor of shape [batch_size, num_boxes, 4] holding the\n the raw bounding box coordinates of boxes.\n \"\"\"\n batch_size, num_boxes = _get_shape(y_indices, 2)\n _, height, width, _ = _get_shape(height_width_predictions, 4)\n height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)\n\n # TF Lite does not support tf.gather with batch_dims > 0, so we need to use\n # tf_gather_nd instead and here we prepare the indices for that.\n combined_indices = tf.stack([\n _multi_range(batch_size, value_repetitions=num_boxes),\n tf.reshape(y_indices, [-1]),\n tf.reshape(x_indices, [-1])\n ], axis=1)\n new_height_width = tf.gather_nd(height_width_predictions, combined_indices)\n new_height_width = tf.reshape(new_height_width, [batch_size, num_boxes, 2])\n\n new_offsets = tf.gather_nd(offset_predictions, combined_indices)\n offsets = tf.reshape(new_offsets, [batch_size, num_boxes, 2])\n\n y_indices = _to_float32(y_indices)\n x_indices = _to_float32(x_indices)\n\n height_width = tf.maximum(new_height_width, 0)\n heights, widths = tf.unstack(height_width, axis=2)\n y_offsets, x_offsets = tf.unstack(offsets, axis=2)\n\n ymin = y_indices + y_offsets - heights / 2.0\n xmin = x_indices + x_offsets - widths / 2.0\n ymax = y_indices + y_offsets + heights / 2.0\n xmax = x_indices + x_offsets + widths / 2.0\n\n ymin = tf.clip_by_value(ymin, 0., height)\n xmin = tf.clip_by_value(xmin, 0., width)\n ymax = tf.clip_by_value(ymax, 0., height)\n xmax = tf.clip_by_value(xmax, 0., width)\n boxes = tf.stack([ymin, xmin, ymax, xmax], axis=2)\n\n return boxes\n\n\ndef prediction_tensors_to_temporal_offsets(\n y_indices, x_indices, offset_predictions):\n \"\"\"Converts CenterNet temporal offset map predictions to batched format.\n\n This function is similar to the box offset conversion function, as both\n temporal offsets and box offsets are size-2 vectors.\n\n Args:\n y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to\n object center locations (expressed in output coordinate frame).\n x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to\n object center locations (expressed in output coordinate frame).\n offset_predictions: A float tensor of shape [batch_size, height, width, 2]\n representing the y and x offsets of a box's center across adjacent frames.\n\n Returns:\n offsets: A tensor of shape [batch_size, num_boxes, 2] holding the\n the object temporal offsets of (y, x) dimensions.\n\n \"\"\"\n batch_size, num_boxes = _get_shape(y_indices, 2)\n\n # TF Lite does not support tf.gather with batch_dims > 0, so we need to use\n # tf_gather_nd instead and here we prepare the indices for that.\n combined_indices = tf.stack([\n _multi_range(batch_size, value_repetitions=num_boxes),\n tf.reshape(y_indices, [-1]),\n tf.reshape(x_indices, [-1])\n ], axis=1)\n\n new_offsets = tf.gather_nd(offset_predictions, combined_indices)\n offsets = tf.reshape(new_offsets, [batch_size, num_boxes, -1])\n\n return offsets\n\n\ndef prediction_tensors_to_keypoint_candidates(keypoint_heatmap_predictions,\n keypoint_heatmap_offsets,\n keypoint_score_threshold=0.1,\n max_pool_kernel_size=1,\n max_candidates=20,\n keypoint_depths=None):\n \"\"\"Convert keypoint heatmap predictions and offsets to keypoint candidates.\n\n Args:\n keypoint_heatmap_predictions: A float tensor of shape [batch_size, height,\n width, num_keypoints] representing the per-keypoint heatmaps.\n keypoint_heatmap_offsets: A float tensor of shape [batch_size, height,\n width, 2] (or [batch_size, height, width, 2 * num_keypoints] if\n 'per_keypoint_offset' is set True) representing the per-keypoint offsets.\n keypoint_score_threshold: float, the threshold for considering a keypoint a\n candidate.\n max_pool_kernel_size: integer, the max pool kernel size to use to pull off\n peak score locations in a neighborhood. For example, to make sure no two\n neighboring values for the same keypoint are returned, set\n max_pool_kernel_size=3. If None or 1, will not apply any local filtering.\n max_candidates: integer, maximum number of keypoint candidates per keypoint\n type.\n keypoint_depths: (optional) A float tensor of shape [batch_size, height,\n width, 1] (or [batch_size, height, width, num_keypoints] if\n 'per_keypoint_depth' is set True) representing the per-keypoint depths.\n\n Returns:\n keypoint_candidates: A tensor of shape\n [batch_size, max_candidates, num_keypoints, 2] holding the\n location of keypoint candidates in [y, x] format (expressed in absolute\n coordinates in the output coordinate frame).\n keypoint_scores: A float tensor of shape\n [batch_size, max_candidates, num_keypoints] with the scores for each\n keypoint candidate. The scores come directly from the heatmap predictions.\n num_keypoint_candidates: An integer tensor of shape\n [batch_size, num_keypoints] with the number of candidates for each\n keypoint type, as it's possible to filter some candidates due to the score\n threshold.\n depth_candidates: A tensor of shape [batch_size, max_candidates,\n num_keypoints] representing the estimated depth of each keypoint\n candidate. Return None if the input keypoint_depths is None.\n \"\"\"\n batch_size, _, _, num_keypoints = _get_shape(keypoint_heatmap_predictions, 4)\n # Get x, y and channel indices corresponding to the top indices in the\n # keypoint heatmap predictions.\n # Note that the top k candidates are produced for **each keypoint type**.\n # Might be worth eventually trying top k in the feature map, independent of\n # the keypoint type.\n keypoint_scores, y_indices, x_indices, channel_indices = (\n top_k_feature_map_locations(keypoint_heatmap_predictions,\n max_pool_kernel_size=max_pool_kernel_size,\n k=max_candidates,\n per_channel=True))\n\n # TF Lite does not support tf.gather with batch_dims > 0, so we need to use\n # tf_gather_nd instead and here we prepare the indices for that.\n _, num_indices = _get_shape(y_indices, 2)\n combined_indices = tf.stack([\n _multi_range(batch_size, value_repetitions=num_indices),\n tf.reshape(y_indices, [-1]),\n tf.reshape(x_indices, [-1])\n ], axis=1)\n\n selected_offsets_flat = tf.gather_nd(keypoint_heatmap_offsets,\n combined_indices)\n selected_offsets = tf.reshape(selected_offsets_flat,\n [batch_size, num_indices, -1])\n\n y_indices = _to_float32(y_indices)\n x_indices = _to_float32(x_indices)\n\n _, _, num_channels = _get_shape(selected_offsets, 3)\n if num_channels > 2:\n # Offsets are per keypoint and the last dimension of selected_offsets\n # contains all those offsets, so reshape the offsets to make sure that the\n # last dimension contains (y_offset, x_offset) for a single keypoint.\n reshaped_offsets = tf.reshape(selected_offsets,\n [batch_size, num_indices, -1, 2])\n\n # TF Lite does not support tf.gather with batch_dims > 0, so we need to use\n # tf_gather_nd instead and here we prepare the indices for that. In this\n # case, channel_indices indicates which keypoint to use the offset from.\n channel_combined_indices = tf.stack([\n _multi_range(batch_size, value_repetitions=num_indices),\n _multi_range(num_indices, range_repetitions=batch_size),\n tf.reshape(channel_indices, [-1])\n ], axis=1)\n\n offsets = tf.gather_nd(reshaped_offsets, channel_combined_indices)\n offsets = tf.reshape(offsets, [batch_size, num_indices, -1])\n else:\n offsets = selected_offsets\n y_offsets, x_offsets = tf.unstack(offsets, axis=2)\n\n keypoint_candidates = tf.stack([y_indices + y_offsets,\n x_indices + x_offsets], axis=2)\n keypoint_candidates = tf.reshape(\n keypoint_candidates,\n [batch_size, num_keypoints, max_candidates, 2])\n keypoint_candidates = tf.transpose(keypoint_candidates, [0, 2, 1, 3])\n keypoint_scores = tf.reshape(\n keypoint_scores,\n [batch_size, num_keypoints, max_candidates])\n keypoint_scores = tf.transpose(keypoint_scores, [0, 2, 1])\n num_candidates = tf.reduce_sum(\n tf.to_int32(keypoint_scores >= keypoint_score_threshold), axis=1)\n\n depth_candidates = None\n if keypoint_depths is not None:\n selected_depth_flat = tf.gather_nd(keypoint_depths, combined_indices)\n selected_depth = tf.reshape(selected_depth_flat,\n [batch_size, num_indices, -1])\n _, _, num_depth_channels = _get_shape(selected_depth, 3)\n if num_depth_channels > 1:\n combined_indices = tf.stack([\n _multi_range(batch_size, value_repetitions=num_indices),\n _multi_range(num_indices, range_repetitions=batch_size),\n tf.reshape(channel_indices, [-1])\n ], axis=1)\n depth = tf.gather_nd(selected_depth, combined_indices)\n depth = tf.reshape(depth, [batch_size, num_indices, -1])\n else:\n depth = selected_depth\n depth_candidates = tf.reshape(depth,\n [batch_size, num_keypoints, max_candidates])\n depth_candidates = tf.transpose(depth_candidates, [0, 2, 1])\n\n return keypoint_candidates, keypoint_scores, num_candidates, depth_candidates\n\n\ndef argmax_feature_map_locations(feature_map):\n \"\"\"Returns the peak locations in the feature map.\"\"\"\n batch_size, _, width, num_channels = _get_shape(feature_map, 4)\n\n feature_map_flattened = tf.reshape(\n feature_map, [batch_size, -1, num_channels])\n peak_flat_indices = tf.math.argmax(\n feature_map_flattened, axis=1, output_type=tf.dtypes.int32)\n # Get x and y indices corresponding to the top indices in the flat array.\n y_indices, x_indices = (\n row_col_indices_from_flattened_indices(peak_flat_indices, width))\n channel_indices = tf.tile(\n tf.range(num_channels)[tf.newaxis, :], [batch_size, 1])\n return y_indices, x_indices, channel_indices\n\n\ndef prediction_tensors_to_single_instance_kpts(\n keypoint_heatmap_predictions,\n keypoint_heatmap_offsets,\n keypoint_score_heatmap=None):\n \"\"\"Convert keypoint heatmap predictions and offsets to keypoint candidates.\n\n Args:\n keypoint_heatmap_predictions: A float tensor of shape [batch_size, height,\n width, num_keypoints] representing the per-keypoint heatmaps which is\n used for finding the best keypoint candidate locations.\n keypoint_heatmap_offsets: A float tensor of shape [batch_size, height,\n width, 2] (or [batch_size, height, width, 2 * num_keypoints] if\n 'per_keypoint_offset' is set True) representing the per-keypoint offsets.\n keypoint_score_heatmap: (optional) A float tensor of shape [batch_size,\n height, width, num_keypoints] representing the heatmap which is used for\n reporting the confidence scores. If not provided, then the values in the\n keypoint_heatmap_predictions will be used.\n\n Returns:\n keypoint_candidates: A tensor of shape\n [batch_size, max_candidates, num_keypoints, 2] holding the\n location of keypoint candidates in [y, x] format (expressed in absolute\n coordinates in the output coordinate frame).\n keypoint_scores: A float tensor of shape\n [batch_size, max_candidates, num_keypoints] with the scores for each\n keypoint candidate. The scores come directly from the heatmap predictions.\n num_keypoint_candidates: An integer tensor of shape\n [batch_size, num_keypoints] with the number of candidates for each\n keypoint type, as it's possible to filter some candidates due to the score\n threshold.\n \"\"\"\n batch_size, _, _, num_keypoints = _get_shape(\n keypoint_heatmap_predictions, 4)\n # Get x, y and channel indices corresponding to the top indices in the\n # keypoint heatmap predictions.\n y_indices, x_indices, channel_indices = argmax_feature_map_locations(\n keypoint_heatmap_predictions)\n\n # TF Lite does not support tf.gather with batch_dims > 0, so we need to use\n # tf_gather_nd instead and here we prepare the indices for that.\n _, num_keypoints = _get_shape(y_indices, 2)\n combined_indices = tf.stack([\n _multi_range(batch_size, value_repetitions=num_keypoints),\n tf.reshape(y_indices, [-1]),\n tf.reshape(x_indices, [-1]),\n ], axis=1)\n\n # shape: [num_keypoints, num_keypoints * 2]\n selected_offsets_flat = tf.gather_nd(keypoint_heatmap_offsets,\n combined_indices)\n # shape: [num_keypoints, num_keypoints, 2].\n selected_offsets_flat = tf.reshape(\n selected_offsets_flat, [num_keypoints, num_keypoints, -1])\n # shape: [num_keypoints].\n channel_indices = tf.keras.backend.flatten(channel_indices)\n # shape: [num_keypoints, 2].\n retrieve_indices = tf.stack([channel_indices, channel_indices], axis=1)\n # shape: [num_keypoints, 2]\n selected_offsets = tf.gather_nd(selected_offsets_flat, retrieve_indices)\n y_offsets, x_offsets = tf.unstack(selected_offsets, axis=1)\n\n keypoint_candidates = tf.stack([\n tf.cast(y_indices, dtype=tf.float32) + tf.expand_dims(y_offsets, axis=0),\n tf.cast(x_indices, dtype=tf.float32) + tf.expand_dims(x_offsets, axis=0)\n ], axis=2)\n keypoint_candidates = tf.expand_dims(keypoint_candidates, axis=0)\n\n # Append the channel indices back to retrieve the keypoint scores from the\n # heatmap.\n combined_indices = tf.concat(\n [combined_indices, tf.expand_dims(channel_indices, axis=-1)], axis=1)\n if keypoint_score_heatmap is None:\n keypoint_scores = tf.gather_nd(\n keypoint_heatmap_predictions, combined_indices)\n else:\n keypoint_scores = tf.gather_nd(keypoint_score_heatmap, combined_indices)\n keypoint_scores = tf.expand_dims(\n tf.expand_dims(keypoint_scores, axis=0), axis=0)\n return keypoint_candidates, keypoint_scores\n\n\ndef _score_to_distance_map(y_grid, x_grid, heatmap, points_y, points_x,\n score_distance_offset):\n \"\"\"Rescores heatmap using the distance information.\n\n Rescore the heatmap scores using the formula:\n score / (d + score_distance_offset), where the d is the distance from each\n pixel location to the target point location.\n\n Args:\n y_grid: A float tensor with shape [height, width] representing the\n y-coordinate of each pixel grid.\n x_grid: A float tensor with shape [height, width] representing the\n x-coordinate of each pixel grid.\n heatmap: A float tensor with shape [1, height, width, channel]\n representing the heatmap to be rescored.\n points_y: A float tensor with shape [channel] representing the y\n coordinates of the target points for each channel.\n points_x: A float tensor with shape [channel] representing the x\n coordinates of the target points for each channel.\n score_distance_offset: A constant used in the above formula.\n\n Returns:\n A float tensor with shape [1, height, width, channel] representing the\n rescored heatmap.\n \"\"\"\n y_diff = y_grid[:, :, tf.newaxis] - points_y\n x_diff = x_grid[:, :, tf.newaxis] - points_x\n distance = tf.math.sqrt(y_diff**2 + x_diff**2)\n return tf.math.divide(heatmap, distance + score_distance_offset)\n\n\ndef prediction_to_single_instance_keypoints(\n object_heatmap,\n keypoint_heatmap,\n keypoint_offset,\n keypoint_regression,\n kp_params,\n keypoint_depths=None):\n \"\"\"Postprocess function to predict single instance keypoints.\n\n This is a simplified postprocessing function based on the assumption that\n there is only one instance in the image. If there are multiple instances in\n the image, the model prefers to predict the one that is closest to the image\n center. Here is a high-level description of what this function does:\n 1) Object heatmap re-weighted by the distance between each pixel to the\n image center is used to determine the instance center.\n 2) Regressed keypoint locations are retrieved from the instance center. The\n Gaussian kernel is applied to the regressed keypoint locations to\n re-weight the keypoint heatmap. This is to select the keypoints that are\n associated with the center instance without using top_k op.\n 3) The keypoint locations are computed by the re-weighted keypoint heatmap\n and the keypoint offset.\n\n Args:\n object_heatmap: A float tensor of shape [1, height, width, 1] representing\n the heapmap of the class.\n keypoint_heatmap: A float tensor of shape [1, height, width, num_keypoints]\n representing the per-keypoint heatmaps.\n keypoint_offset: A float tensor of shape [1, height, width, 2] (or [1,\n height, width, 2 * num_keypoints] if 'per_keypoint_offset' is set True)\n representing the per-keypoint offsets.\n keypoint_regression: A float tensor of shape [1, height, width, 2 *\n num_keypoints] representing the joint regression prediction.\n kp_params: A `KeypointEstimationParams` object with parameters for a single\n keypoint class.\n keypoint_depths: (optional) A float tensor of shape [batch_size, height,\n width, 1] (or [batch_size, height, width, num_keypoints] if\n 'per_keypoint_depth' is set True) representing the per-keypoint depths.\n\n Returns:\n A tuple of two tensors:\n keypoint_candidates: A float tensor with shape [1, 1, num_keypoints, 2]\n representing the yx-coordinates of the keypoints in the output feature\n map space.\n keypoint_scores: A float tensor with shape [1, 1, num_keypoints]\n representing the keypoint prediction scores.\n\n Raises:\n ValueError: if the input keypoint_std_dev doesn't have valid number of\n elements (1 or num_keypoints).\n \"\"\"\n # TODO(yuhuic): add the keypoint depth prediction logics in the browser\n # postprocessing back.\n del keypoint_depths\n\n num_keypoints = len(kp_params.keypoint_std_dev)\n batch_size, height, width, _ = _get_shape(keypoint_heatmap, 4)\n\n # Create the image center location.\n image_center_y = tf.convert_to_tensor([0.5 * height], dtype=tf.float32)\n image_center_x = tf.convert_to_tensor([0.5 * width], dtype=tf.float32)\n (y_grid, x_grid) = ta_utils.image_shape_to_grids(height, width)\n # Rescore the object heatmap by the distnace to the image center.\n object_heatmap = _score_to_distance_map(\n y_grid, x_grid, object_heatmap, image_center_y,\n image_center_x, kp_params.score_distance_offset)\n\n # Pick the highest score and location of the weighted object heatmap.\n y_indices, x_indices, _ = argmax_feature_map_locations(object_heatmap)\n _, num_indices = _get_shape(y_indices, 2)\n combined_indices = tf.stack([\n _multi_range(batch_size, value_repetitions=num_indices),\n tf.reshape(y_indices, [-1]),\n tf.reshape(x_indices, [-1])\n ], axis=1)\n\n # Select the regression vectors from the object center.\n selected_regression_flat = tf.gather_nd(keypoint_regression, combined_indices)\n # shape: [num_keypoints, 2]\n regression_offsets = tf.reshape(selected_regression_flat, [num_keypoints, -1])\n (y_reg, x_reg) = tf.unstack(regression_offsets, axis=1)\n y_regressed = tf.cast(y_indices, dtype=tf.float32) + y_reg\n x_regressed = tf.cast(x_indices, dtype=tf.float32) + x_reg\n\n if kp_params.candidate_ranking_mode == 'score_distance_ratio':\n reweighted_keypoint_heatmap = _score_to_distance_map(\n y_grid, x_grid, keypoint_heatmap, y_regressed, x_regressed,\n kp_params.score_distance_offset)\n else:\n raise ValueError('Unsupported candidate_ranking_mode: %s' %\n kp_params.candidate_ranking_mode)\n\n # Get the keypoint locations/scores:\n # keypoint_candidates: [1, 1, num_keypoints, 2]\n # keypoint_scores: [1, 1, num_keypoints]\n # depth_candidates: [1, 1, num_keypoints]\n (keypoint_candidates, keypoint_scores\n ) = prediction_tensors_to_single_instance_kpts(\n reweighted_keypoint_heatmap,\n keypoint_offset,\n keypoint_score_heatmap=keypoint_heatmap)\n return keypoint_candidates, keypoint_scores, None\n\n\ndef _gaussian_weighted_map_const_multi(\n y_grid, x_grid, heatmap, points_y, points_x, boxes,\n gaussian_denom_ratio):\n \"\"\"Rescores heatmap using the distance information.\n\n The function is called when the candidate_ranking_mode in the\n KeypointEstimationParams is set to be 'gaussian_weighted_const'. The\n keypoint candidates are ranked using the formula:\n heatmap_score * exp((-distances^2) / (gaussian_denom))\n\n where 'gaussian_denom' is determined by:\n min(output_feature_height, output_feature_width) * gaussian_denom_ratio\n\n the 'distances' are the distances between the grid coordinates and the target\n points.\n\n Note that the postfix 'const' refers to the fact that the denominator is a\n constant given the input image size, not scaled by the size of each of the\n instances.\n\n Args:\n y_grid: A float tensor with shape [height, width] representing the\n y-coordinate of each pixel grid.\n x_grid: A float tensor with shape [height, width] representing the\n x-coordinate of each pixel grid.\n heatmap: A float tensor with shape [height, width, num_keypoints]\n representing the heatmap to be rescored.\n points_y: A float tensor with shape [num_instances, num_keypoints]\n representing the y coordinates of the target points for each channel.\n points_x: A float tensor with shape [num_instances, num_keypoints]\n representing the x coordinates of the target points for each channel.\n boxes: A tensor of shape [num_instances, 4] with predicted bounding boxes\n for each instance, expressed in the output coordinate frame.\n gaussian_denom_ratio: A constant used in the above formula that determines\n the denominator of the Gaussian kernel.\n\n Returns:\n A float tensor with shape [height, width, channel] representing\n the rescored heatmap.\n \"\"\"\n num_instances, _ = _get_shape(boxes, 2)\n height, width, num_keypoints = _get_shape(heatmap, 3)\n\n # [height, width, num_instances, num_keypoints].\n # Note that we intentionally avoid using tf.newaxis as TfLite converter\n # doesn't like it.\n y_diff = (\n tf.reshape(y_grid, [height, width, 1, 1]) -\n tf.reshape(points_y, [1, 1, num_instances, num_keypoints]))\n x_diff = (\n tf.reshape(x_grid, [height, width, 1, 1]) -\n tf.reshape(points_x, [1, 1, num_instances, num_keypoints]))\n distance_square = y_diff * y_diff + x_diff * x_diff\n\n y_min, x_min, y_max, x_max = tf.split(boxes, 4, axis=1)\n\n # Make the mask with all 1.0 in the box regions.\n # Shape: [height, width, num_instances]\n in_boxes = tf.math.logical_and(\n tf.math.logical_and(\n tf.reshape(y_grid, [height, width, 1]) >= tf.reshape(\n y_min, [1, 1, num_instances]),\n tf.reshape(y_grid, [height, width, 1]) < tf.reshape(\n y_max, [1, 1, num_instances])),\n tf.math.logical_and(\n tf.reshape(x_grid, [height, width, 1]) >= tf.reshape(\n x_min, [1, 1, num_instances]),\n tf.reshape(x_grid, [height, width, 1]) < tf.reshape(\n x_max, [1, 1, num_instances])))\n in_boxes = tf.cast(in_boxes, dtype=tf.float32)\n\n gaussian_denom = tf.cast(\n tf.minimum(height, width), dtype=tf.float32) * gaussian_denom_ratio\n # shape: [height, width, num_instances, num_keypoints]\n gaussian_map = tf.exp((-1 * distance_square) / gaussian_denom)\n return tf.expand_dims(heatmap, axis=2) * gaussian_map * tf.reshape(\n in_boxes, [height, width, num_instances, 1])\n\n\ndef prediction_tensors_to_multi_instance_kpts(\n keypoint_heatmap_predictions,\n keypoint_heatmap_offsets,\n keypoint_score_heatmap=None):\n \"\"\"Converts keypoint heatmap predictions and offsets to keypoint candidates.\n\n This function is similar to the 'prediction_tensors_to_single_instance_kpts'\n function except that the input keypoint_heatmap_predictions is prepared to\n have an additional 'num_instances' dimension for multi-instance prediction.\n\n Args:\n keypoint_heatmap_predictions: A float tensor of shape [height,\n width, num_instances, num_keypoints] representing the per-keypoint and\n per-instance heatmaps which is used for finding the best keypoint\n candidate locations.\n keypoint_heatmap_offsets: A float tensor of shape [height,\n width, 2 * num_keypoints] representing the per-keypoint offsets.\n keypoint_score_heatmap: (optional) A float tensor of shape [height, width,\n num_keypoints] representing the heatmap which is used for reporting the\n confidence scores. If not provided, then the values in the\n keypoint_heatmap_predictions will be used.\n\n Returns:\n keypoint_candidates: A tensor of shape\n [1, max_candidates, num_keypoints, 2] holding the\n location of keypoint candidates in [y, x] format (expressed in absolute\n coordinates in the output coordinate frame).\n keypoint_scores: A float tensor of shape\n [1, max_candidates, num_keypoints] with the scores for each\n keypoint candidate. The scores come directly from the heatmap predictions.\n \"\"\"\n height, width, num_instances, num_keypoints = _get_shape(\n keypoint_heatmap_predictions, 4)\n\n # [height * width, num_instances * num_keypoints].\n feature_map_flattened = tf.reshape(\n keypoint_heatmap_predictions,\n [-1, num_instances * num_keypoints])\n\n # [num_instances * num_keypoints].\n peak_flat_indices = tf.math.argmax(\n feature_map_flattened, axis=0, output_type=tf.dtypes.int32)\n\n # Get x and y indices corresponding to the top indices in the flat array.\n y_indices, x_indices = (\n row_col_indices_from_flattened_indices(peak_flat_indices, width))\n # [num_instances * num_keypoints].\n y_indices = tf.reshape(y_indices, [-1])\n x_indices = tf.reshape(x_indices, [-1])\n\n # Prepare the indices to gather the offsets from the keypoint_heatmap_offsets.\n kpts_idx = _multi_range(\n limit=num_keypoints, value_repetitions=1,\n range_repetitions=num_instances)\n combined_indices = tf.stack([\n y_indices,\n x_indices,\n kpts_idx\n ], axis=1)\n\n keypoint_heatmap_offsets = tf.reshape(\n keypoint_heatmap_offsets, [height, width, num_keypoints, 2])\n # Retrieve the keypoint offsets: shape:\n # [num_instance * num_keypoints, 2].\n selected_offsets_flat = tf.gather_nd(keypoint_heatmap_offsets,\n combined_indices)\n y_offsets, x_offsets = tf.unstack(selected_offsets_flat, axis=1)\n\n keypoint_candidates = tf.stack([\n tf.cast(y_indices, dtype=tf.float32) + tf.expand_dims(y_offsets, axis=0),\n tf.cast(x_indices, dtype=tf.float32) + tf.expand_dims(x_offsets, axis=0)\n ], axis=2)\n keypoint_candidates = tf.reshape(\n keypoint_candidates, [num_instances, num_keypoints, 2])\n\n if keypoint_score_heatmap is None:\n keypoint_scores = tf.gather_nd(\n tf.reduce_max(keypoint_heatmap_predictions, axis=2), combined_indices)\n else:\n keypoint_scores = tf.gather_nd(keypoint_score_heatmap, combined_indices)\n return tf.expand_dims(keypoint_candidates, axis=0), tf.reshape(\n keypoint_scores, [1, num_instances, num_keypoints])\n\n\ndef prediction_to_keypoints_argmax(\n prediction_dict,\n object_y_indices,\n object_x_indices,\n boxes,\n task_name,\n kp_params):\n \"\"\"Postprocess function to predict multi instance keypoints with argmax op.\n\n This is a different implementation of the original keypoint postprocessing\n function such that it avoids using topk op (replaced by argmax) as it runs\n much slower in the browser. Note that in this function, we assume the\n batch_size to be 1 to avoid using 5D tensors which cause issues when\n converting to the TfLite model.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors, returned from the\n predict() method. This dictionary should contain keypoint prediction\n feature maps for each keypoint task.\n object_y_indices: A float tensor of shape [batch_size, max_instances]\n representing the location indices of the object centers.\n object_x_indices: A float tensor of shape [batch_size, max_instances]\n representing the location indices of the object centers.\n boxes: A tensor of shape [batch_size, num_instances, 4] with predicted\n bounding boxes for each instance, expressed in the output coordinate\n frame.\n task_name: string, the name of the task this namedtuple corresponds to.\n Note that it should be an unique identifier of the task.\n kp_params: A `KeypointEstimationParams` object with parameters for a single\n keypoint class.\n\n Returns:\n A tuple of two tensors:\n keypoint_candidates: A float tensor with shape [batch_size,\n num_instances, num_keypoints, 2] representing the yx-coordinates of\n the keypoints in the output feature map space.\n keypoint_scores: A float tensor with shape [batch_size, num_instances,\n num_keypoints] representing the keypoint prediction scores.\n\n Raises:\n ValueError: if the candidate_ranking_mode is not supported.\n \"\"\"\n keypoint_heatmap = tf.squeeze(tf.nn.sigmoid(prediction_dict[\n get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1]), axis=0)\n keypoint_offset = tf.squeeze(prediction_dict[\n get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1], axis=0)\n keypoint_regression = tf.squeeze(prediction_dict[\n get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1], axis=0)\n height, width, num_keypoints = _get_shape(keypoint_heatmap, 3)\n\n # Create the y,x grids: [height, width]\n (y_grid, x_grid) = ta_utils.image_shape_to_grids(height, width)\n\n # Prepare the indices to retrieve the information from object centers.\n num_instances = _get_shape(object_y_indices, 2)[1]\n combined_obj_indices = tf.stack([\n tf.reshape(object_y_indices, [-1]),\n tf.reshape(object_x_indices, [-1])\n ], axis=1)\n\n # Select the regression vectors from the object center.\n selected_regression_flat = tf.gather_nd(\n keypoint_regression, combined_obj_indices)\n selected_regression = tf.reshape(\n selected_regression_flat, [num_instances, num_keypoints, 2])\n (y_reg, x_reg) = tf.unstack(selected_regression, axis=2)\n\n # shape: [num_instances, num_keypoints].\n y_regressed = tf.cast(\n tf.reshape(object_y_indices, [num_instances, 1]),\n dtype=tf.float32) + y_reg\n x_regressed = tf.cast(\n tf.reshape(object_x_indices, [num_instances, 1]),\n dtype=tf.float32) + x_reg\n\n if kp_params.candidate_ranking_mode == 'gaussian_weighted_const':\n rescored_heatmap = _gaussian_weighted_map_const_multi(\n y_grid, x_grid, keypoint_heatmap, y_regressed, x_regressed,\n tf.squeeze(boxes, axis=0), kp_params.gaussian_denom_ratio)\n\n # shape: [height, width, num_keypoints].\n keypoint_score_heatmap = tf.math.reduce_max(rescored_heatmap, axis=2)\n else:\n raise ValueError(\n 'Unsupported ranking mode in the multipose no topk method: %s' %\n kp_params.candidate_ranking_mode)\n (keypoint_candidates,\n keypoint_scores) = prediction_tensors_to_multi_instance_kpts(\n keypoint_heatmap_predictions=rescored_heatmap,\n keypoint_heatmap_offsets=keypoint_offset,\n keypoint_score_heatmap=keypoint_score_heatmap)\n return keypoint_candidates, keypoint_scores\n\n\ndef regressed_keypoints_at_object_centers(regressed_keypoint_predictions,\n y_indices, x_indices):\n \"\"\"Returns the regressed keypoints at specified object centers.\n\n The original keypoint predictions are regressed relative to each feature map\n location. The returned keypoints are expressed in absolute coordinates in the\n output frame (i.e. the center offsets are added to each individual regressed\n set of keypoints).\n\n Args:\n regressed_keypoint_predictions: A float tensor of shape\n [batch_size, height, width, 2 * num_keypoints] holding regressed\n keypoints. The last dimension has keypoint coordinates ordered as follows:\n [y0, x0, y1, x1, ..., y{J-1}, x{J-1}] where J is the number of keypoints.\n y_indices: A [batch, num_instances] int tensor holding y indices for object\n centers. These indices correspond to locations in the output feature map.\n x_indices: A [batch, num_instances] int tensor holding x indices for object\n centers. These indices correspond to locations in the output feature map.\n\n Returns:\n A float tensor of shape [batch_size, num_objects, 2 * num_keypoints] where\n regressed keypoints are gathered at the provided locations, and converted\n to absolute coordinates in the output coordinate frame.\n \"\"\"\n batch_size, num_instances = _get_shape(y_indices, 2)\n\n # TF Lite does not support tf.gather with batch_dims > 0, so we need to use\n # tf_gather_nd instead and here we prepare the indices for that.\n combined_indices = tf.stack([\n _multi_range(batch_size, value_repetitions=num_instances),\n tf.reshape(y_indices, [-1]),\n tf.reshape(x_indices, [-1])\n ], axis=1)\n\n relative_regressed_keypoints = tf.gather_nd(regressed_keypoint_predictions,\n combined_indices)\n relative_regressed_keypoints = tf.reshape(\n relative_regressed_keypoints,\n [batch_size, num_instances, -1, 2])\n relative_regressed_keypoints_y, relative_regressed_keypoints_x = tf.unstack(\n relative_regressed_keypoints, axis=3)\n y_indices = _to_float32(tf.expand_dims(y_indices, axis=-1))\n x_indices = _to_float32(tf.expand_dims(x_indices, axis=-1))\n absolute_regressed_keypoints = tf.stack(\n [y_indices + relative_regressed_keypoints_y,\n x_indices + relative_regressed_keypoints_x],\n axis=3)\n return tf.reshape(absolute_regressed_keypoints,\n [batch_size, num_instances, -1])\n\n\ndef sdr_scaled_ranking_score(\n keypoint_scores, distances, bboxes, score_distance_multiplier):\n \"\"\"Score-to-distance-ratio method to rank keypoint candidates.\n\n This corresponds to the ranking method: 'score_scaled_distance_ratio'. The\n keypoint candidates are ranked using the formula:\n ranking_score = score / (distance + offset)\n\n where 'score' is the keypoint heatmap scores, 'distance' is the distance\n between the heatmap peak location and the regressed joint location,\n 'offset' is a function of the predicted bounding box:\n offset = max(bbox height, bbox width) * score_distance_multiplier\n\n The ranking score is used to find the best keypoint candidate for snapping\n regressed joints.\n\n Args:\n keypoint_scores: A float tensor of shape\n [batch_size, max_candidates, num_keypoints] indicating the scores for\n keypoint candidates.\n distances: A float tensor of shape\n [batch_size, num_instances, max_candidates, num_keypoints] indicating the\n distances between the keypoint candidates and the joint regression\n locations of each instances.\n bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted\n bounding boxes for each instance, expressed in the output coordinate\n frame. If not provided, boxes will be computed from regressed keypoints.\n score_distance_multiplier: A scalar used to multiply the bounding box size\n to be the offset in the score-to-distance-ratio formula.\n\n Returns:\n A float tensor of shape [batch_size, num_instances, max_candidates,\n num_keypoints] representing the ranking scores of each keypoint candidates.\n \"\"\"\n # Get ymin, xmin, ymax, xmax bounding box coordinates.\n # Shape: [batch_size, num_instances]\n ymin, xmin, ymax, xmax = tf.unstack(bboxes, axis=2)\n\n # Shape: [batch_size, num_instances].\n offsets = tf.math.maximum(\n ymax - ymin, xmax - xmin) * score_distance_multiplier\n\n # Shape: [batch_size, num_instances, max_candidates, num_keypoints]\n ranking_scores = keypoint_scores[:, tf.newaxis, :, :] / (\n distances + offsets[:, :, tf.newaxis, tf.newaxis])\n return ranking_scores\n\n\ndef gaussian_weighted_score(\n keypoint_scores, distances, keypoint_std_dev, bboxes):\n \"\"\"Gaussian weighted method to rank keypoint candidates.\n\n This corresponds to the ranking method: 'gaussian_weighted'. The\n keypoint candidates are ranked using the formula:\n score * exp((-distances^2) / (2 * sigma^2))\n\n where 'score' is the keypoint heatmap score, 'distances' is the distance\n between the heatmap peak location and the regressed joint location and 'sigma'\n is a Gaussian standard deviation used in generating the Gausian heatmap target\n multiplied by the 'std_dev_multiplier'.\n\n The ranking score is used to find the best keypoint candidate for snapping\n regressed joints.\n\n Args:\n keypoint_scores: A float tensor of shape\n [batch_size, max_candidates, num_keypoints] indicating the scores for\n keypoint candidates.\n distances: A float tensor of shape\n [batch_size, num_instances, max_candidates, num_keypoints] indicating the\n distances between the keypoint candidates and the joint regression\n locations of each instances.\n keypoint_std_dev: A list of float represent the standard deviation of the\n Gaussian kernel used to generate the keypoint heatmap. It is to provide\n the flexibility of using different sizes of Gaussian kernel for each\n keypoint class.\n bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted\n bounding boxes for each instance, expressed in the output coordinate\n frame. If not provided, boxes will be computed from regressed keypoints.\n\n Returns:\n A float tensor of shape [batch_size, num_instances, max_candidates,\n num_keypoints] representing the ranking scores of each keypoint candidates.\n \"\"\"\n # Get ymin, xmin, ymax, xmax bounding box coordinates.\n # Shape: [batch_size, num_instances]\n ymin, xmin, ymax, xmax = tf.unstack(bboxes, axis=2)\n\n # shape: [num_keypoints]\n keypoint_std_dev = tf.constant(keypoint_std_dev)\n\n # shape: [batch_size, num_instances]\n sigma = cn_assigner._compute_std_dev_from_box_size( # pylint: disable=protected-access\n ymax - ymin, xmax - xmin, min_overlap=0.7)\n # shape: [batch_size, num_instances, num_keypoints]\n sigma = keypoint_std_dev[tf.newaxis, tf.newaxis, :] * sigma[:, :, tf.newaxis]\n (_, _, max_candidates, _) = _get_shape(distances, 4)\n # shape: [batch_size, num_instances, max_candidates, num_keypoints]\n sigma = tf.tile(\n sigma[:, :, tf.newaxis, :], multiples=[1, 1, max_candidates, 1])\n\n gaussian_map = tf.exp((-1 * distances * distances) / (2 * sigma * sigma))\n return keypoint_scores[:, tf.newaxis, :, :] * gaussian_map\n\n\ndef refine_keypoints(regressed_keypoints,\n keypoint_candidates,\n keypoint_scores,\n num_keypoint_candidates,\n bboxes=None,\n unmatched_keypoint_score=0.1,\n box_scale=1.2,\n candidate_search_scale=0.3,\n candidate_ranking_mode='min_distance',\n score_distance_offset=1e-6,\n keypoint_depth_candidates=None,\n keypoint_score_threshold=0.1,\n score_distance_multiplier=0.1,\n keypoint_std_dev=None):\n \"\"\"Refines regressed keypoints by snapping to the nearest candidate keypoints.\n\n The initial regressed keypoints represent a full set of keypoints regressed\n from the centers of the objects. The keypoint candidates are estimated\n independently from heatmaps, and are not associated with any object instances.\n This function refines the regressed keypoints by \"snapping\" to the\n nearest/highest score/highest score-distance ratio (depending on the\n candidate_ranking_mode) candidate of the same keypoint type (e.g. \"nose\").\n If no candidates are nearby, the regressed keypoint remains unchanged.\n\n In order to snap a regressed keypoint to a candidate keypoint, the following\n must be satisfied:\n - the candidate keypoint must be of the same type as the regressed keypoint\n - the candidate keypoint must not lie outside the predicted boxes (or the\n boxes which encloses the regressed keypoints for the instance if `bboxes` is\n not provided). Note that the box is scaled by\n `regressed_box_scale` in height and width, to provide some margin around the\n keypoints\n - the distance to the closest candidate keypoint cannot exceed\n candidate_search_scale * max(height, width), where height and width refer to\n the bounding box for the instance.\n\n Note that the same candidate keypoint is allowed to snap to regressed\n keypoints in difference instances.\n\n Args:\n regressed_keypoints: A float tensor of shape\n [batch_size, num_instances, num_keypoints, 2] with the initial regressed\n keypoints.\n keypoint_candidates: A tensor of shape\n [batch_size, max_candidates, num_keypoints, 2] holding the location of\n keypoint candidates in [y, x] format (expressed in absolute coordinates in\n the output coordinate frame).\n keypoint_scores: A float tensor of shape\n [batch_size, max_candidates, num_keypoints] indicating the scores for\n keypoint candidates.\n num_keypoint_candidates: An integer tensor of shape\n [batch_size, num_keypoints] indicating the number of valid candidates for\n each keypoint type, as there may be padding (dim 1) of\n `keypoint_candidates` and `keypoint_scores`.\n bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted\n bounding boxes for each instance, expressed in the output coordinate\n frame. If not provided, boxes will be computed from regressed keypoints.\n unmatched_keypoint_score: float, the default score to use for regressed\n keypoints that are not successfully snapped to a nearby candidate.\n box_scale: float, the multiplier to expand the bounding boxes (either the\n provided boxes or those which tightly cover the regressed keypoints) for\n an instance. This scale is typically larger than 1.0 when not providing\n `bboxes`.\n candidate_search_scale: float, the scale parameter that multiplies the\n largest dimension of a bounding box. The resulting distance becomes a\n search radius for candidates in the vicinity of each regressed keypoint.\n candidate_ranking_mode: A string as one of ['min_distance',\n 'score_distance_ratio', 'score_scaled_distance_ratio',\n 'gaussian_weighted'] indicating how to select the candidate. If invalid\n value is provided, an ValueError will be raised.\n score_distance_offset: The distance offset to apply in the denominator when\n candidate_ranking_mode is 'score_distance_ratio'. The metric to maximize\n in this scenario is score / (distance + score_distance_offset). Larger\n values of score_distance_offset make the keypoint score gain more relative\n importance.\n keypoint_depth_candidates: (optional) A float tensor of shape\n [batch_size, max_candidates, num_keypoints] indicating the depths for\n keypoint candidates.\n keypoint_score_threshold: float, The heatmap score threshold for\n a keypoint to become a valid candidate.\n score_distance_multiplier: A scalar used to multiply the bounding box size\n to be the offset in the score-to-distance-ratio formula.\n keypoint_std_dev: A list of float represent the standard deviation of the\n Gaussian kernel used to rank the keypoint candidates. It offers the\n flexibility of using different sizes of Gaussian kernel for each keypoint\n class. Only applicable when the candidate_ranking_mode equals to\n 'gaussian_weighted'.\n\n Returns:\n A tuple with:\n refined_keypoints: A float tensor of shape\n [batch_size, num_instances, num_keypoints, 2] with the final, refined\n keypoints.\n refined_scores: A float tensor of shape\n [batch_size, num_instances, num_keypoints] with scores associated with all\n instances and keypoints in `refined_keypoints`.\n\n Raises:\n ValueError: if provided candidate_ranking_mode is not one of\n ['min_distance', 'score_distance_ratio']\n \"\"\"\n batch_size, num_instances, num_keypoints, _ = (\n shape_utils.combined_static_and_dynamic_shape(regressed_keypoints))\n max_candidates = keypoint_candidates.shape[1]\n\n # Replace all invalid (i.e. padded) keypoint candidates with NaN.\n # This will prevent them from being considered.\n range_tiled = tf.tile(\n tf.reshape(tf.range(max_candidates), [1, max_candidates, 1]),\n [batch_size, 1, num_keypoints])\n num_candidates_tiled = tf.tile(tf.expand_dims(num_keypoint_candidates, 1),\n [1, max_candidates, 1])\n invalid_candidates = range_tiled >= num_candidates_tiled\n\n # Pairwise squared distances between regressed keypoints and candidate\n # keypoints (for a single keypoint type).\n # Shape [batch_size, num_instances, 1, num_keypoints, 2].\n regressed_keypoint_expanded = tf.expand_dims(regressed_keypoints,\n axis=2)\n # Shape [batch_size, 1, max_candidates, num_keypoints, 2].\n keypoint_candidates_expanded = tf.expand_dims(\n keypoint_candidates, axis=1)\n # Use explicit tensor shape broadcasting (since the tensor dimensions are\n # expanded to 5D) to make it tf.lite compatible.\n regressed_keypoint_expanded = tf.tile(\n regressed_keypoint_expanded, multiples=[1, 1, max_candidates, 1, 1])\n keypoint_candidates_expanded = tf.tile(\n keypoint_candidates_expanded, multiples=[1, num_instances, 1, 1, 1])\n # Replace tf.math.squared_difference by \"-\" operator and tf.multiply ops since\n # tf.lite convert doesn't support squared_difference with undetermined\n # dimension.\n diff = regressed_keypoint_expanded - keypoint_candidates_expanded\n sqrd_distances = tf.math.reduce_sum(tf.multiply(diff, diff), axis=-1)\n distances = tf.math.sqrt(sqrd_distances)\n\n # Replace the invalid candidated with large constant (10^5) to make sure the\n # following reduce_min/argmin behaves properly.\n max_dist = 1e5\n distances = tf.where(\n tf.tile(\n tf.expand_dims(invalid_candidates, axis=1),\n multiples=[1, num_instances, 1, 1]),\n tf.ones_like(distances) * max_dist,\n distances\n )\n\n # Determine the candidates that have the minimum distance to the regressed\n # keypoints. Shape [batch_size, num_instances, num_keypoints].\n min_distances = tf.math.reduce_min(distances, axis=2)\n if candidate_ranking_mode == 'min_distance':\n nearby_candidate_inds = tf.math.argmin(distances, axis=2)\n elif candidate_ranking_mode == 'score_distance_ratio':\n # tiled_keypoint_scores:\n # Shape [batch_size, num_instances, max_candidates, num_keypoints].\n tiled_keypoint_scores = tf.tile(\n tf.expand_dims(keypoint_scores, axis=1),\n multiples=[1, num_instances, 1, 1])\n ranking_scores = tiled_keypoint_scores / (distances + score_distance_offset)\n nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2)\n elif candidate_ranking_mode == 'score_scaled_distance_ratio':\n ranking_scores = sdr_scaled_ranking_score(\n keypoint_scores, distances, bboxes, score_distance_multiplier)\n nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2)\n elif candidate_ranking_mode == 'gaussian_weighted':\n ranking_scores = gaussian_weighted_score(\n keypoint_scores, distances, keypoint_std_dev, bboxes)\n nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2)\n weighted_scores = tf.math.reduce_max(ranking_scores, axis=2)\n else:\n raise ValueError('Not recognized candidate_ranking_mode: %s' %\n candidate_ranking_mode)\n\n # Gather the coordinates and scores corresponding to the closest candidates.\n # Shape of tensors are [batch_size, num_instances, num_keypoints, 2] and\n # [batch_size, num_instances, num_keypoints], respectively.\n (nearby_candidate_coords, nearby_candidate_scores,\n nearby_candidate_depths) = (\n _gather_candidates_at_indices(keypoint_candidates, keypoint_scores,\n nearby_candidate_inds,\n keypoint_depth_candidates))\n\n # If the ranking mode is 'gaussian_weighted', we use the ranking scores as the\n # final keypoint confidence since their values are in between [0, 1].\n if candidate_ranking_mode == 'gaussian_weighted':\n nearby_candidate_scores = weighted_scores\n\n if bboxes is None:\n # Filter out the chosen candidate with score lower than unmatched\n # keypoint score.\n mask = tf.cast(nearby_candidate_scores <\n keypoint_score_threshold, tf.int32)\n else:\n bboxes_flattened = tf.reshape(bboxes, [-1, 4])\n\n # Scale the bounding boxes.\n # Shape [batch_size, num_instances, 4].\n boxlist = box_list.BoxList(bboxes_flattened)\n boxlist_scaled = box_list_ops.scale_height_width(\n boxlist, box_scale, box_scale)\n bboxes_scaled = boxlist_scaled.get()\n bboxes = tf.reshape(bboxes_scaled, [batch_size, num_instances, 4])\n\n # Get ymin, xmin, ymax, xmax bounding box coordinates, tiled per keypoint.\n # Shape [batch_size, num_instances, num_keypoints].\n bboxes_tiled = tf.tile(tf.expand_dims(bboxes, 2), [1, 1, num_keypoints, 1])\n ymin, xmin, ymax, xmax = tf.unstack(bboxes_tiled, axis=3)\n\n # Produce a mask that indicates whether the original regressed keypoint\n # should be used instead of a candidate keypoint.\n # Shape [batch_size, num_instances, num_keypoints].\n search_radius = (\n tf.math.maximum(ymax - ymin, xmax - xmin) * candidate_search_scale)\n mask = (tf.cast(nearby_candidate_coords[:, :, :, 0] < ymin, tf.int32) +\n tf.cast(nearby_candidate_coords[:, :, :, 0] > ymax, tf.int32) +\n tf.cast(nearby_candidate_coords[:, :, :, 1] < xmin, tf.int32) +\n tf.cast(nearby_candidate_coords[:, :, :, 1] > xmax, tf.int32) +\n # Filter out the chosen candidate with score lower than unmatched\n # keypoint score.\n tf.cast(nearby_candidate_scores <\n keypoint_score_threshold, tf.int32) +\n tf.cast(min_distances > search_radius, tf.int32))\n mask = mask > 0\n\n # Create refined keypoints where candidate keypoints replace original\n # regressed keypoints if they are in the vicinity of the regressed keypoints.\n # Shape [batch_size, num_instances, num_keypoints, 2].\n refined_keypoints = tf.where(\n tf.tile(tf.expand_dims(mask, -1), [1, 1, 1, 2]),\n regressed_keypoints,\n nearby_candidate_coords)\n\n # Update keypoints scores. In the case where we use the original regressed\n # keypoints, we use a default score of `unmatched_keypoint_score`.\n # Shape [batch_size, num_instances, num_keypoints].\n refined_scores = tf.where(\n mask,\n unmatched_keypoint_score * tf.ones_like(nearby_candidate_scores),\n nearby_candidate_scores)\n\n refined_depths = None\n if nearby_candidate_depths is not None:\n refined_depths = tf.where(mask, tf.zeros_like(nearby_candidate_depths),\n nearby_candidate_depths)\n\n return refined_keypoints, refined_scores, refined_depths\n\n\ndef _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds,\n num_total_keypoints):\n \"\"\"Scatter keypoint elements into tensors with full keypoints dimension.\n\n Args:\n keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32\n tensor.\n keypoint_scores: a [batch_size, num_instances, num_keypoints] float32\n tensor.\n keypoint_inds: a list of integers that indicate the keypoint indices for\n this specific keypoint class. These indices are used to scatter into\n tensors that have a `num_total_keypoints` dimension.\n num_total_keypoints: The total number of keypoints that this model predicts.\n\n Returns:\n A tuple with\n keypoint_coords_padded: a\n [batch_size, num_instances, num_total_keypoints,2] float32 tensor.\n keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints]\n float32 tensor.\n \"\"\"\n batch_size, num_instances, _, _ = (\n shape_utils.combined_static_and_dynamic_shape(keypoint_coords))\n kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3])\n kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1])\n kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=-1)\n kpt_coords_scattered = tf.scatter_nd(\n indices=kpt_inds_tensor,\n updates=kpt_coords_transposed,\n shape=[num_total_keypoints, batch_size, num_instances, 2])\n kpt_scores_scattered = tf.scatter_nd(\n indices=kpt_inds_tensor,\n updates=kpt_scores_transposed,\n shape=[num_total_keypoints, batch_size, num_instances])\n keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3])\n keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0])\n return keypoint_coords_padded, keypoint_scores_padded\n\n\ndef _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds,\n max_instances):\n \"\"\"Scatter keypoint elements into tensors with full instance dimension.\n\n Args:\n keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32\n tensor.\n keypoint_scores: a [batch_size, num_instances, num_keypoints] float32\n tensor.\n instance_inds: a list of integers that indicate the instance indices for\n these keypoints. These indices are used to scatter into tensors\n that have a `max_instances` dimension.\n max_instances: The maximum number of instances detected by the model.\n\n Returns:\n A tuple with\n keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2]\n float32 tensor.\n keypoint_scores_padded: a [batch_size, max_instances, num_keypoints]\n float32 tensor.\n \"\"\"\n batch_size, _, num_keypoints, _ = (\n shape_utils.combined_static_and_dynamic_shape(keypoint_coords))\n kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3])\n kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2])\n instance_inds = tf.expand_dims(instance_inds, axis=-1)\n kpt_coords_scattered = tf.scatter_nd(\n indices=instance_inds,\n updates=kpt_coords_transposed,\n shape=[max_instances, batch_size, num_keypoints, 2])\n kpt_scores_scattered = tf.scatter_nd(\n indices=instance_inds,\n updates=kpt_scores_transposed,\n shape=[max_instances, batch_size, num_keypoints])\n keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3])\n keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2])\n return keypoint_coords_padded, keypoint_scores_padded\n\n\ndef _gather_candidates_at_indices(keypoint_candidates,\n keypoint_scores,\n indices,\n keypoint_depth_candidates=None):\n \"\"\"Gathers keypoint candidate coordinates and scores at indices.\n\n Args:\n keypoint_candidates: a float tensor of shape [batch_size, max_candidates,\n num_keypoints, 2] with candidate coordinates.\n keypoint_scores: a float tensor of shape [batch_size, max_candidates,\n num_keypoints] with keypoint scores.\n indices: an integer tensor of shape [batch_size, num_indices, num_keypoints]\n with indices.\n keypoint_depth_candidates: (optional) a float tensor of shape [batch_size,\n max_candidates, num_keypoints] with keypoint depths.\n\n Returns:\n A tuple with\n gathered_keypoint_candidates: a float tensor of shape [batch_size,\n num_indices, num_keypoints, 2] with gathered coordinates.\n gathered_keypoint_scores: a float tensor of shape [batch_size,\n num_indices, num_keypoints].\n gathered_keypoint_depths: a float tensor of shape [batch_size,\n num_indices, num_keypoints]. Return None if the input\n keypoint_depth_candidates is None.\n \"\"\"\n batch_size, num_indices, num_keypoints = _get_shape(indices, 3)\n\n # Transpose tensors so that all batch dimensions are up front.\n keypoint_candidates_transposed = tf.transpose(keypoint_candidates,\n [0, 2, 1, 3])\n keypoint_scores_transposed = tf.transpose(keypoint_scores, [0, 2, 1])\n nearby_candidate_inds_transposed = tf.transpose(indices, [0, 2, 1])\n\n # TF Lite does not support tf.gather with batch_dims > 0, so we need to use\n # tf_gather_nd instead and here we prepare the indices for that.\n combined_indices = tf.stack([\n _multi_range(\n batch_size,\n value_repetitions=num_keypoints * num_indices,\n dtype=tf.int64),\n _multi_range(\n num_keypoints,\n value_repetitions=num_indices,\n range_repetitions=batch_size,\n dtype=tf.int64),\n tf.reshape(nearby_candidate_inds_transposed, [-1])\n ], axis=1)\n\n nearby_candidate_coords_transposed = tf.gather_nd(\n keypoint_candidates_transposed, combined_indices)\n nearby_candidate_coords_transposed = tf.reshape(\n nearby_candidate_coords_transposed,\n [batch_size, num_keypoints, num_indices, -1])\n\n nearby_candidate_scores_transposed = tf.gather_nd(keypoint_scores_transposed,\n combined_indices)\n nearby_candidate_scores_transposed = tf.reshape(\n nearby_candidate_scores_transposed,\n [batch_size, num_keypoints, num_indices])\n\n gathered_keypoint_candidates = tf.transpose(\n nearby_candidate_coords_transposed, [0, 2, 1, 3])\n # The reshape operation above may result in a singleton last dimension, but\n # downstream code requires it to always be at least 2-valued.\n original_shape = tf.shape(gathered_keypoint_candidates)\n new_shape = tf.concat((original_shape[:3],\n [tf.maximum(original_shape[3], 2)]), 0)\n gathered_keypoint_candidates = tf.reshape(gathered_keypoint_candidates,\n new_shape)\n gathered_keypoint_scores = tf.transpose(nearby_candidate_scores_transposed,\n [0, 2, 1])\n\n gathered_keypoint_depths = None\n if keypoint_depth_candidates is not None:\n keypoint_depths_transposed = tf.transpose(keypoint_depth_candidates,\n [0, 2, 1])\n nearby_candidate_depths_transposed = tf.gather_nd(\n keypoint_depths_transposed, combined_indices)\n nearby_candidate_depths_transposed = tf.reshape(\n nearby_candidate_depths_transposed,\n [batch_size, num_keypoints, num_indices])\n gathered_keypoint_depths = tf.transpose(nearby_candidate_depths_transposed,\n [0, 2, 1])\n return (gathered_keypoint_candidates, gathered_keypoint_scores,\n gathered_keypoint_depths)\n\n\ndef flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols):\n \"\"\"Get the index in a flattened array given row and column indices.\"\"\"\n return (row_indices * num_cols) + col_indices\n\n\ndef row_col_channel_indices_from_flattened_indices(indices, num_cols,\n num_channels):\n \"\"\"Computes row, column and channel indices from flattened indices.\n\n Args:\n indices: An integer tensor of any shape holding the indices in the flattened\n space.\n num_cols: Number of columns in the image (width).\n num_channels: Number of channels in the image.\n\n Returns:\n row_indices: The row indices corresponding to each of the input indices.\n Same shape as indices.\n col_indices: The column indices corresponding to each of the input indices.\n Same shape as indices.\n channel_indices. The channel indices corresponding to each of the input\n indices.\n\n \"\"\"\n # Be careful with this function when running a model in float16 precision\n # (e.g. TF.js with WebGL) because the array indices may not be represented\n # accurately if they are too large, resulting in incorrect channel indices.\n # See:\n # https://en.wikipedia.org/wiki/Half-precision_floating-point_format#Precision_limitations_on_integer_values\n #\n # Avoid using mod operator to make the ops more easy to be compatible with\n # different environments, e.g. WASM.\n row_indices = (indices // num_channels) // num_cols\n col_indices = (indices // num_channels) - row_indices * num_cols\n channel_indices_temp = indices // num_channels\n channel_indices = indices - channel_indices_temp * num_channels\n\n return row_indices, col_indices, channel_indices\n\n\ndef row_col_indices_from_flattened_indices(indices, num_cols):\n \"\"\"Computes row and column indices from flattened indices.\n\n Args:\n indices: An integer tensor of any shape holding the indices in the flattened\n space.\n num_cols: Number of columns in the image (width).\n\n Returns:\n row_indices: The row indices corresponding to each of the input indices.\n Same shape as indices.\n col_indices: The column indices corresponding to each of the input indices.\n Same shape as indices.\n\n \"\"\"\n # Avoid using mod operator to make the ops more easy to be compatible with\n # different environments, e.g. WASM.\n row_indices = indices // num_cols\n col_indices = indices - row_indices * num_cols\n\n return row_indices, col_indices\n\n\ndef get_valid_anchor_weights_in_flattened_image(true_image_shapes, height,\n width):\n \"\"\"Computes valid anchor weights for an image assuming pixels will be flattened.\n\n This function is useful when we only want to penalize valid areas in the\n image in the case when padding is used. The function assumes that the loss\n function will be applied after flattening the spatial dimensions and returns\n anchor weights accordingly.\n\n Args:\n true_image_shapes: An integer tensor of shape [batch_size, 3] representing\n the true image shape (without padding) for each sample in the batch.\n height: height of the prediction from the network.\n width: width of the prediction from the network.\n\n Returns:\n valid_anchor_weights: a float tensor of shape [batch_size, height * width]\n with 1s in locations where the spatial coordinates fall within the height\n and width in true_image_shapes.\n \"\"\"\n\n indices = tf.reshape(tf.range(height * width), [1, -1])\n batch_size = tf.shape(true_image_shapes)[0]\n batch_indices = tf.ones((batch_size, 1), dtype=tf.int32) * indices\n\n y_coords, x_coords, _ = row_col_channel_indices_from_flattened_indices(\n batch_indices, width, 1)\n\n max_y, max_x = true_image_shapes[:, 0], true_image_shapes[:, 1]\n max_x = _to_float32(tf.expand_dims(max_x, 1))\n max_y = _to_float32(tf.expand_dims(max_y, 1))\n\n x_coords = _to_float32(x_coords)\n y_coords = _to_float32(y_coords)\n\n valid_mask = tf.math.logical_and(x_coords < max_x, y_coords < max_y)\n\n return _to_float32(valid_mask)\n\n\ndef convert_strided_predictions_to_normalized_boxes(boxes, stride,\n true_image_shapes):\n \"\"\"Converts predictions in the output space to normalized boxes.\n\n Boxes falling outside the valid image boundary are clipped to be on the\n boundary.\n\n Args:\n boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw\n coordinates of boxes in the model's output space.\n stride: The stride in the output space.\n true_image_shapes: A tensor of shape [batch_size, 3] representing the true\n shape of the input not considering padding.\n\n Returns:\n boxes: A tensor of shape [batch_size, num_boxes, 4] representing the\n coordinates of the normalized boxes.\n \"\"\"\n # Note: We use tf ops instead of functions in box_list_ops to make this\n # function compatible with dynamic batch size.\n boxes = boxes * stride\n true_image_shapes = tf.tile(true_image_shapes[:, tf.newaxis, :2], [1, 1, 2])\n boxes = boxes / tf.cast(true_image_shapes, tf.float32)\n boxes = tf.clip_by_value(boxes, 0.0, 1.0)\n return boxes\n\n\ndef convert_strided_predictions_to_normalized_keypoints(\n keypoint_coords, keypoint_scores, stride, true_image_shapes,\n clip_out_of_frame_keypoints=False):\n \"\"\"Converts predictions in the output space to normalized keypoints.\n\n If clip_out_of_frame_keypoints=False, keypoint coordinates falling outside\n the valid image boundary are normalized but not clipped; If\n clip_out_of_frame_keypoints=True, keypoint coordinates falling outside the\n valid image boundary are clipped to the closest image boundary and the scores\n will be set to 0.0.\n\n Args:\n keypoint_coords: A tensor of shape\n [batch_size, num_instances, num_keypoints, 2] holding the raw coordinates\n of keypoints in the model's output space.\n keypoint_scores: A tensor of shape\n [batch_size, num_instances, num_keypoints] holding the keypoint scores.\n stride: The stride in the output space.\n true_image_shapes: A tensor of shape [batch_size, 3] representing the true\n shape of the input not considering padding.\n clip_out_of_frame_keypoints: A boolean indicating whether keypoints outside\n the image boundary should be clipped. If True, keypoint coords will be\n clipped to image boundary. If False, keypoints are normalized but not\n filtered based on their location.\n\n Returns:\n keypoint_coords_normalized: A tensor of shape\n [batch_size, num_instances, num_keypoints, 2] representing the coordinates\n of the normalized keypoints.\n keypoint_scores: A tensor of shape\n [batch_size, num_instances, num_keypoints] representing the updated\n keypoint scores.\n \"\"\"\n # Flatten keypoints and scores.\n batch_size, _, _, _ = (\n shape_utils.combined_static_and_dynamic_shape(keypoint_coords))\n\n # Scale and normalize keypoints.\n true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)\n yscale = float(stride) / tf.cast(true_heights, tf.float32)\n xscale = float(stride) / tf.cast(true_widths, tf.float32)\n yx_scale = tf.stack([yscale, xscale], axis=1)\n keypoint_coords_normalized = keypoint_coords * tf.reshape(\n yx_scale, [batch_size, 1, 1, 2])\n\n if clip_out_of_frame_keypoints:\n # Determine the keypoints that are in the true image regions.\n valid_indices = tf.logical_and(\n tf.logical_and(keypoint_coords_normalized[:, :, :, 0] >= 0.0,\n keypoint_coords_normalized[:, :, :, 0] <= 1.0),\n tf.logical_and(keypoint_coords_normalized[:, :, :, 1] >= 0.0,\n keypoint_coords_normalized[:, :, :, 1] <= 1.0))\n batch_window = tf.tile(\n tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32),\n multiples=[batch_size, 1])\n def clip_to_window(inputs):\n keypoints, window = inputs\n return keypoint_ops.clip_to_window(keypoints, window)\n\n keypoint_coords_normalized = shape_utils.static_or_dynamic_map_fn(\n clip_to_window, [keypoint_coords_normalized, batch_window],\n dtype=tf.float32, back_prop=False)\n keypoint_scores = tf.where(valid_indices, keypoint_scores,\n tf.zeros_like(keypoint_scores))\n return keypoint_coords_normalized, keypoint_scores\n\n\ndef convert_strided_predictions_to_instance_masks(\n boxes, classes, masks, true_image_shapes,\n densepose_part_heatmap=None, densepose_surface_coords=None, stride=4,\n mask_height=256, mask_width=256, score_threshold=0.5,\n densepose_class_index=-1):\n \"\"\"Converts predicted full-image masks into instance masks.\n\n For each predicted detection box:\n * Crop and resize the predicted mask (and optionally DensePose coordinates)\n based on the detected bounding box coordinates and class prediction. Uses\n bilinear resampling.\n * Binarize the mask using the provided score threshold.\n\n Args:\n boxes: A tensor of shape [batch, max_detections, 4] holding the predicted\n boxes, in normalized coordinates (relative to the true image dimensions).\n classes: An integer tensor of shape [batch, max_detections] containing the\n detected class for each box (0-indexed).\n masks: A [batch, output_height, output_width, num_classes] float32\n tensor with class probabilities.\n true_image_shapes: A tensor of shape [batch, 3] representing the true\n shape of the inputs not considering padding.\n densepose_part_heatmap: (Optional) A [batch, output_height, output_width,\n num_parts] float32 tensor with part scores (i.e. logits).\n densepose_surface_coords: (Optional) A [batch, output_height, output_width,\n 2 * num_parts] float32 tensor with predicted part coordinates (in\n vu-format).\n stride: The stride in the output space.\n mask_height: The desired resized height for instance masks.\n mask_width: The desired resized width for instance masks.\n score_threshold: The threshold at which to convert predicted mask\n into foreground pixels.\n densepose_class_index: The class index (0-indexed) corresponding to the\n class which has DensePose labels (e.g. person class).\n\n Returns:\n A tuple of masks and surface_coords.\n instance_masks: A [batch_size, max_detections, mask_height, mask_width]\n uint8 tensor with predicted foreground mask for each\n instance. If DensePose tensors are provided, then each pixel value in the\n mask encodes the 1-indexed part.\n surface_coords: A [batch_size, max_detections, mask_height, mask_width, 2]\n float32 tensor with (v, u) coordinates. Note that v, u coordinates are\n only defined on instance masks, and the coordinates at each location of\n the foreground mask correspond to coordinates on a local part coordinate\n system (the specific part can be inferred from the `instance_masks`\n output. If DensePose feature maps are not passed to this function, this\n output will be None.\n\n Raises:\n ValueError: If one but not both of `densepose_part_heatmap` and\n `densepose_surface_coords` is provided.\n \"\"\"\n batch_size, output_height, output_width, _ = (\n shape_utils.combined_static_and_dynamic_shape(masks))\n input_height = stride * output_height\n input_width = stride * output_width\n\n true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)\n # If necessary, create dummy DensePose tensors to simplify the map function.\n densepose_present = True\n if ((densepose_part_heatmap is not None) ^\n (densepose_surface_coords is not None)):\n raise ValueError('To use DensePose, both `densepose_part_heatmap` and '\n '`densepose_surface_coords` must be provided')\n if densepose_part_heatmap is None and densepose_surface_coords is None:\n densepose_present = False\n densepose_part_heatmap = tf.zeros(\n (batch_size, output_height, output_width, 1), dtype=tf.float32)\n densepose_surface_coords = tf.zeros(\n (batch_size, output_height, output_width, 2), dtype=tf.float32)\n crop_and_threshold_fn = functools.partial(\n crop_and_threshold_masks, input_height=input_height,\n input_width=input_width, mask_height=mask_height, mask_width=mask_width,\n score_threshold=score_threshold,\n densepose_class_index=densepose_class_index)\n\n instance_masks, surface_coords = shape_utils.static_or_dynamic_map_fn(\n crop_and_threshold_fn,\n elems=[boxes, classes, masks, densepose_part_heatmap,\n densepose_surface_coords, true_heights, true_widths],\n dtype=[tf.uint8, tf.float32],\n back_prop=False)\n surface_coords = surface_coords if densepose_present else None\n return instance_masks, surface_coords\n\n\ndef crop_and_threshold_masks(elems, input_height, input_width, mask_height=256,\n mask_width=256, score_threshold=0.5,\n densepose_class_index=-1):\n \"\"\"Crops and thresholds masks based on detection boxes.\n\n Args:\n elems: A tuple of\n boxes - float32 tensor of shape [max_detections, 4]\n classes - int32 tensor of shape [max_detections] (0-indexed)\n masks - float32 tensor of shape [output_height, output_width, num_classes]\n part_heatmap - float32 tensor of shape [output_height, output_width,\n num_parts]\n surf_coords - float32 tensor of shape [output_height, output_width,\n 2 * num_parts]\n true_height - scalar int tensor\n true_width - scalar int tensor\n input_height: Input height to network.\n input_width: Input width to network.\n mask_height: Height for resizing mask crops.\n mask_width: Width for resizing mask crops.\n score_threshold: The threshold at which to convert predicted mask\n into foreground pixels.\n densepose_class_index: scalar int tensor with the class index (0-indexed)\n for DensePose.\n\n Returns:\n A tuple of\n all_instances: A [max_detections, mask_height, mask_width] uint8 tensor\n with a predicted foreground mask for each instance. Background is encoded\n as 0, and foreground is encoded as a positive integer. Specific part\n indices are encoded as 1-indexed parts (for classes that have part\n information).\n surface_coords: A [max_detections, mask_height, mask_width, 2]\n float32 tensor with (v, u) coordinates. for each part.\n \"\"\"\n (boxes, classes, masks, part_heatmap, surf_coords, true_height,\n true_width) = elems\n # Boxes are in normalized coordinates relative to true image shapes. Convert\n # coordinates to be normalized relative to input image shapes (since masks\n # may still have padding).\n boxlist = box_list.BoxList(boxes)\n y_scale = true_height / input_height\n x_scale = true_width / input_width\n boxlist = box_list_ops.scale(boxlist, y_scale, x_scale)\n boxes = boxlist.get()\n # Convert masks from [output_height, output_width, num_classes] to\n # [num_classes, output_height, output_width, 1].\n num_classes = tf.shape(masks)[-1]\n masks_4d = tf.transpose(masks, perm=[2, 0, 1])[:, :, :, tf.newaxis]\n # Tile part and surface coordinate masks for all classes.\n part_heatmap_4d = tf.tile(part_heatmap[tf.newaxis, :, :, :],\n multiples=[num_classes, 1, 1, 1])\n surf_coords_4d = tf.tile(surf_coords[tf.newaxis, :, :, :],\n multiples=[num_classes, 1, 1, 1])\n feature_maps_concat = tf.concat([masks_4d, part_heatmap_4d, surf_coords_4d],\n axis=-1)\n # The following tensor has shape\n # [max_detections, mask_height, mask_width, 1 + 3 * num_parts].\n cropped_masks = tf2.image.crop_and_resize(\n feature_maps_concat,\n boxes=boxes,\n box_indices=classes,\n crop_size=[mask_height, mask_width],\n method='bilinear')\n\n # Split the cropped masks back into instance masks, part masks, and surface\n # coordinates.\n num_parts = tf.shape(part_heatmap)[-1]\n instance_masks, part_heatmap_cropped, surface_coords_cropped = tf.split(\n cropped_masks, [1, num_parts, 2 * num_parts], axis=-1)\n\n # Threshold the instance masks. Resulting tensor has shape\n # [max_detections, mask_height, mask_width, 1].\n instance_masks_int = tf.cast(\n tf.math.greater_equal(instance_masks, score_threshold), dtype=tf.int32)\n\n # Produce a binary mask that is 1.0 only:\n # - in the foreground region for an instance\n # - in detections corresponding to the DensePose class\n det_with_parts = tf.equal(classes, densepose_class_index)\n det_with_parts = tf.cast(\n tf.reshape(det_with_parts, [-1, 1, 1, 1]), dtype=tf.int32)\n instance_masks_with_parts = tf.math.multiply(instance_masks_int,\n det_with_parts)\n\n # Similarly, produce a binary mask that holds the foreground masks only for\n # instances without parts (i.e. non-DensePose classes).\n det_without_parts = 1 - det_with_parts\n instance_masks_without_parts = tf.math.multiply(instance_masks_int,\n det_without_parts)\n\n # Assemble a tensor that has standard instance segmentation masks for\n # non-DensePose classes (with values in [0, 1]), and part segmentation masks\n # for DensePose classes (with vaues in [0, 1, ..., num_parts]).\n part_mask_int_zero_indexed = tf.math.argmax(\n part_heatmap_cropped, axis=-1, output_type=tf.int32)[:, :, :, tf.newaxis]\n part_mask_int_one_indexed = part_mask_int_zero_indexed + 1\n all_instances = (instance_masks_without_parts +\n instance_masks_with_parts * part_mask_int_one_indexed)\n\n # Gather the surface coordinates for the parts.\n surface_coords_cropped = tf.reshape(\n surface_coords_cropped, [-1, mask_height, mask_width, num_parts, 2])\n surface_coords = gather_surface_coords_for_parts(surface_coords_cropped,\n part_mask_int_zero_indexed)\n surface_coords = (\n surface_coords * tf.cast(instance_masks_with_parts, tf.float32))\n\n return [tf.squeeze(all_instances, axis=3), surface_coords]\n\n\ndef gather_surface_coords_for_parts(surface_coords_cropped,\n highest_scoring_part):\n \"\"\"Gathers the (v, u) coordinates for the highest scoring DensePose parts.\n\n Args:\n surface_coords_cropped: A [max_detections, height, width, num_parts, 2]\n float32 tensor with (v, u) surface coordinates.\n highest_scoring_part: A [max_detections, height, width] integer tensor with\n the highest scoring part (0-indexed) indices for each location.\n\n Returns:\n A [max_detections, height, width, 2] float32 tensor with the (v, u)\n coordinates selected from the highest scoring parts.\n \"\"\"\n max_detections, height, width, num_parts, _ = (\n shape_utils.combined_static_and_dynamic_shape(surface_coords_cropped))\n flattened_surface_coords = tf.reshape(surface_coords_cropped, [-1, 2])\n flattened_part_ids = tf.reshape(highest_scoring_part, [-1])\n\n # Produce lookup indices that represent the locations of the highest scoring\n # parts in the `flattened_surface_coords` tensor.\n flattened_lookup_indices = (\n num_parts * tf.range(max_detections * height * width) +\n flattened_part_ids)\n\n vu_coords_flattened = tf.gather(flattened_surface_coords,\n flattened_lookup_indices, axis=0)\n return tf.reshape(vu_coords_flattened, [max_detections, height, width, 2])\n\n\ndef predicted_embeddings_at_object_centers(embedding_predictions,\n y_indices, x_indices):\n \"\"\"Returns the predicted embeddings at specified object centers.\n\n Args:\n embedding_predictions: A float tensor of shape [batch_size, height, width,\n reid_embed_size] holding predicted embeddings.\n y_indices: A [batch, num_instances] int tensor holding y indices for object\n centers. These indices correspond to locations in the output feature map.\n x_indices: A [batch, num_instances] int tensor holding x indices for object\n centers. These indices correspond to locations in the output feature map.\n\n Returns:\n A float tensor of shape [batch_size, num_objects, reid_embed_size] where\n predicted embeddings are gathered at the provided locations.\n \"\"\"\n batch_size, _, width, _ = _get_shape(embedding_predictions, 4)\n flattened_indices = flattened_indices_from_row_col_indices(\n y_indices, x_indices, width)\n _, num_instances = _get_shape(flattened_indices, 2)\n embeddings_flat = _flatten_spatial_dimensions(embedding_predictions)\n embeddings = tf.gather(embeddings_flat, flattened_indices, batch_dims=1)\n embeddings = tf.reshape(embeddings, [batch_size, num_instances, -1])\n\n return embeddings\n\n\ndef mask_from_true_image_shape(data_shape, true_image_shapes):\n \"\"\"Get a binary mask based on the true_image_shape.\n\n Args:\n data_shape: a possibly static (4,) tensor for the shape of the feature\n map.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is of\n the form [height, width, channels] indicating the shapes of true\n images in the resized images, as resized images can be padded with\n zeros.\n Returns:\n a [batch, data_height, data_width, 1] tensor of 1.0 wherever data_height\n is less than height, etc.\n \"\"\"\n mask_h = tf.cast(\n tf.range(data_shape[1]) < true_image_shapes[:, tf.newaxis, 0],\n tf.float32)\n mask_w = tf.cast(\n tf.range(data_shape[2]) < true_image_shapes[:, tf.newaxis, 1],\n tf.float32)\n mask = tf.expand_dims(\n mask_h[:, :, tf.newaxis] * mask_w[:, tf.newaxis, :], 3)\n return mask\n\n\nclass ObjectDetectionParams(\n collections.namedtuple('ObjectDetectionParams', [\n 'localization_loss', 'scale_loss_weight', 'offset_loss_weight',\n 'task_loss_weight', 'scale_head_num_filters',\n 'scale_head_kernel_sizes', 'offset_head_num_filters',\n 'offset_head_kernel_sizes'\n ])):\n \"\"\"Namedtuple to host object detection related parameters.\n\n This is a wrapper class over the fields that are either the hyper-parameters\n or the loss functions needed for the object detection task. The class is\n immutable after constructed. Please see the __new__ function for detailed\n information for each fields.\n \"\"\"\n\n __slots__ = ()\n\n def __new__(cls,\n localization_loss,\n scale_loss_weight,\n offset_loss_weight,\n task_loss_weight=1.0,\n scale_head_num_filters=(256),\n scale_head_kernel_sizes=(3),\n offset_head_num_filters=(256),\n offset_head_kernel_sizes=(3)):\n \"\"\"Constructor with default values for ObjectDetectionParams.\n\n Args:\n localization_loss: a object_detection.core.losses.Loss object to compute\n the loss for the center offset and height/width predictions in\n CenterNet.\n scale_loss_weight: float, The weight for localizing box size. Note that\n the scale loss is dependent on the input image size, since we penalize\n the raw height and width. This constant may need to be adjusted\n depending on the input size.\n offset_loss_weight: float, The weight for localizing center offsets.\n task_loss_weight: float, the weight of the object detection loss.\n scale_head_num_filters: filter numbers of the convolutional layers used\n by the object detection box scale prediction head.\n scale_head_kernel_sizes: kernel size of the convolutional layers used\n by the object detection box scale prediction head.\n offset_head_num_filters: filter numbers of the convolutional layers used\n by the object detection box offset prediction head.\n offset_head_kernel_sizes: kernel size of the convolutional layers used\n by the object detection box offset prediction head.\n\n Returns:\n An initialized ObjectDetectionParams namedtuple.\n \"\"\"\n return super(ObjectDetectionParams,\n cls).__new__(cls, localization_loss, scale_loss_weight,\n offset_loss_weight, task_loss_weight,\n scale_head_num_filters, scale_head_kernel_sizes,\n offset_head_num_filters, offset_head_kernel_sizes)\n\n\nclass KeypointEstimationParams(\n collections.namedtuple('KeypointEstimationParams', [\n 'task_name', 'class_id', 'keypoint_indices', 'classification_loss',\n 'localization_loss', 'keypoint_labels', 'keypoint_std_dev',\n 'keypoint_heatmap_loss_weight', 'keypoint_offset_loss_weight',\n 'keypoint_regression_loss_weight', 'keypoint_candidate_score_threshold',\n 'heatmap_bias_init', 'num_candidates_per_keypoint', 'task_loss_weight',\n 'peak_max_pool_kernel_size', 'unmatched_keypoint_score', 'box_scale',\n 'candidate_search_scale', 'candidate_ranking_mode',\n 'offset_peak_radius', 'per_keypoint_offset', 'predict_depth',\n 'per_keypoint_depth', 'keypoint_depth_loss_weight',\n 'score_distance_offset', 'clip_out_of_frame_keypoints',\n 'rescore_instances', 'heatmap_head_num_filters',\n 'heatmap_head_kernel_sizes', 'offset_head_num_filters',\n 'offset_head_kernel_sizes', 'regress_head_num_filters',\n 'regress_head_kernel_sizes', 'score_distance_multiplier',\n 'std_dev_multiplier', 'rescoring_threshold', 'gaussian_denom_ratio',\n 'argmax_postprocessing'\n ])):\n \"\"\"Namedtuple to host object detection related parameters.\n\n This is a wrapper class over the fields that are either the hyper-parameters\n or the loss functions needed for the keypoint estimation task. The class is\n immutable after constructed. Please see the __new__ function for detailed\n information for each fields.\n \"\"\"\n\n __slots__ = ()\n\n def __new__(cls,\n task_name,\n class_id,\n keypoint_indices,\n classification_loss,\n localization_loss,\n keypoint_labels=None,\n keypoint_std_dev=None,\n keypoint_heatmap_loss_weight=1.0,\n keypoint_offset_loss_weight=1.0,\n keypoint_regression_loss_weight=1.0,\n keypoint_candidate_score_threshold=0.1,\n heatmap_bias_init=-2.19,\n num_candidates_per_keypoint=100,\n task_loss_weight=1.0,\n peak_max_pool_kernel_size=3,\n unmatched_keypoint_score=0.1,\n box_scale=1.2,\n candidate_search_scale=0.3,\n candidate_ranking_mode='min_distance',\n offset_peak_radius=0,\n per_keypoint_offset=False,\n predict_depth=False,\n per_keypoint_depth=False,\n keypoint_depth_loss_weight=1.0,\n score_distance_offset=1e-6,\n clip_out_of_frame_keypoints=False,\n rescore_instances=False,\n heatmap_head_num_filters=(256),\n heatmap_head_kernel_sizes=(3),\n offset_head_num_filters=(256),\n offset_head_kernel_sizes=(3),\n regress_head_num_filters=(256),\n regress_head_kernel_sizes=(3),\n score_distance_multiplier=0.1,\n std_dev_multiplier=1.0,\n rescoring_threshold=0.0,\n argmax_postprocessing=False,\n gaussian_denom_ratio=0.1):\n \"\"\"Constructor with default values for KeypointEstimationParams.\n\n Args:\n task_name: string, the name of the task this namedtuple corresponds to.\n Note that it should be an unique identifier of the task.\n class_id: int, the ID of the class that contains the target keypoints to\n considered in this task. For example, if the task is human pose\n estimation, the class id should correspond to the \"human\" class. Note\n that the ID is 0-based, meaning that class 0 corresponds to the first\n non-background object class.\n keypoint_indices: A list of integers representing the indicies of the\n keypoints to be considered in this task. This is used to retrieve the\n subset of the keypoints from gt_keypoints that should be considered in\n this task.\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the class predictions in CenterNet.\n localization_loss: an object_detection.core.losses.Loss object to compute\n the loss for the center offset and height/width predictions in\n CenterNet.\n keypoint_labels: A list of strings representing the label text of each\n keypoint, e.g. \"nose\", 'left_shoulder\". Note that the length of this\n list should be equal to keypoint_indices.\n keypoint_std_dev: A list of float represent the standard deviation of the\n Gaussian kernel used to generate the keypoint heatmap. It is to provide\n the flexibility of using different sizes of Gaussian kernel for each\n keypoint class.\n keypoint_heatmap_loss_weight: float, The weight for the keypoint heatmap.\n keypoint_offset_loss_weight: float, The weight for the keypoint offsets\n loss.\n keypoint_regression_loss_weight: float, The weight for keypoint regression\n loss. Note that the loss is dependent on the input image size, since we\n penalize the raw height and width. This constant may need to be adjusted\n depending on the input size.\n keypoint_candidate_score_threshold: float, The heatmap score threshold for\n a keypoint to become a valid candidate.\n heatmap_bias_init: float, the initial value of bias in the convolutional\n kernel of the class prediction head. If set to None, the bias is\n initialized with zeros.\n num_candidates_per_keypoint: The maximum number of candidates to retrieve\n for each keypoint.\n task_loss_weight: float, the weight of the keypoint estimation loss.\n peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak\n score locations in a neighborhood (independently for each keypoint\n types).\n unmatched_keypoint_score: The default score to use for regressed keypoints\n that are not successfully snapped to a nearby candidate.\n box_scale: The multiplier to expand the bounding boxes (either the\n provided boxes or those which tightly cover the regressed keypoints).\n candidate_search_scale: The scale parameter that multiplies the largest\n dimension of a bounding box. The resulting distance becomes a search\n radius for candidates in the vicinity of each regressed keypoint.\n candidate_ranking_mode: One of ['min_distance', 'score_distance_ratio',\n 'score_scaled_distance_ratio', 'gaussian_weighted'] indicating how to\n select the keypoint candidate.\n offset_peak_radius: The radius (in the unit of output pixel) around\n groundtruth heatmap peak to assign the offset targets. If set 0, then\n the offset target will only be assigned to the heatmap peak (same\n behavior as the original paper).\n per_keypoint_offset: A bool indicates whether to assign offsets for each\n keypoint channel separately. If set False, the output offset target has\n the shape [batch_size, out_height, out_width, 2] (same behavior as the\n original paper). If set True, the output offset target has the shape\n [batch_size, out_height, out_width, 2 * num_keypoints] (recommended when\n the offset_peak_radius is not zero).\n predict_depth: A bool indicates whether to predict the depth of each\n keypoints.\n per_keypoint_depth: A bool indicates whether the model predicts the depth\n of each keypoints in independent channels. Similar to\n per_keypoint_offset but for the keypoint depth.\n keypoint_depth_loss_weight: The weight of the keypoint depth loss.\n score_distance_offset: The distance offset to apply in the denominator\n when candidate_ranking_mode is 'score_distance_ratio'. The metric to\n maximize in this scenario is score / (distance + score_distance_offset).\n Larger values of score_distance_offset make the keypoint score gain more\n relative importance.\n clip_out_of_frame_keypoints: Whether keypoints outside the image frame\n should be clipped back to the image boundary. If True, the keypoints\n that are clipped have scores set to 0.0.\n rescore_instances: Whether to rescore instances based on a combination of\n detection score and keypoint scores.\n heatmap_head_num_filters: filter numbers of the convolutional layers used\n by the keypoint heatmap prediction head.\n heatmap_head_kernel_sizes: kernel size of the convolutional layers used\n by the keypoint heatmap prediction head.\n offset_head_num_filters: filter numbers of the convolutional layers used\n by the keypoint offset prediction head.\n offset_head_kernel_sizes: kernel size of the convolutional layers used\n by the keypoint offset prediction head.\n regress_head_num_filters: filter numbers of the convolutional layers used\n by the keypoint regression prediction head.\n regress_head_kernel_sizes: kernel size of the convolutional layers used\n by the keypoint regression prediction head.\n score_distance_multiplier: A scalar used to multiply the bounding box size\n to be used as the offset in the score-to-distance-ratio formula.\n std_dev_multiplier: A scalar used to multiply the standard deviation to\n control the Gaussian kernel which used to weight the candidates.\n rescoring_threshold: A scalar used when \"rescore_instances\" is set to\n True. The detection score of an instance is set to be the average over\n the scores of the keypoints which their scores higher than the\n threshold.\n argmax_postprocessing: Whether to use the keypoint postprocessing logic\n that replaces the topk op with argmax. Usually used when exporting the\n model for predicting keypoints of multiple instances in the browser.\n gaussian_denom_ratio: The ratio used to multiply the image size to\n determine the denominator of the Gaussian formula. Only applicable when\n the candidate_ranking_mode is set to be 'gaussian_weighted_const'.\n\n Returns:\n An initialized KeypointEstimationParams namedtuple.\n \"\"\"\n return super(KeypointEstimationParams, cls).__new__(\n cls, task_name, class_id, keypoint_indices, classification_loss,\n localization_loss, keypoint_labels, keypoint_std_dev,\n keypoint_heatmap_loss_weight, keypoint_offset_loss_weight,\n keypoint_regression_loss_weight, keypoint_candidate_score_threshold,\n heatmap_bias_init, num_candidates_per_keypoint, task_loss_weight,\n peak_max_pool_kernel_size, unmatched_keypoint_score, box_scale,\n candidate_search_scale, candidate_ranking_mode, offset_peak_radius,\n per_keypoint_offset, predict_depth, per_keypoint_depth,\n keypoint_depth_loss_weight, score_distance_offset,\n clip_out_of_frame_keypoints, rescore_instances,\n heatmap_head_num_filters, heatmap_head_kernel_sizes,\n offset_head_num_filters, offset_head_kernel_sizes,\n regress_head_num_filters, regress_head_kernel_sizes,\n score_distance_multiplier, std_dev_multiplier, rescoring_threshold,\n argmax_postprocessing, gaussian_denom_ratio)\n\n\nclass ObjectCenterParams(\n collections.namedtuple('ObjectCenterParams', [\n 'classification_loss', 'object_center_loss_weight', 'heatmap_bias_init',\n 'min_box_overlap_iou', 'max_box_predictions', 'use_labeled_classes',\n 'keypoint_weights_for_center', 'center_head_num_filters',\n 'center_head_kernel_sizes', 'peak_max_pool_kernel_size'\n ])):\n \"\"\"Namedtuple to store object center prediction related parameters.\"\"\"\n\n __slots__ = ()\n\n def __new__(cls,\n classification_loss,\n object_center_loss_weight,\n heatmap_bias_init=-2.19,\n min_box_overlap_iou=0.7,\n max_box_predictions=100,\n use_labeled_classes=False,\n keypoint_weights_for_center=None,\n center_head_num_filters=(256),\n center_head_kernel_sizes=(3),\n peak_max_pool_kernel_size=3):\n \"\"\"Constructor with default values for ObjectCenterParams.\n\n Args:\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the class predictions in CenterNet.\n object_center_loss_weight: float, The weight for the object center loss.\n heatmap_bias_init: float, the initial value of bias in the convolutional\n kernel of the object center prediction head. If set to None, the bias is\n initialized with zeros.\n min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes\n need have with groundtruth boxes to not be penalized. This is used for\n computing the class specific center heatmaps.\n max_box_predictions: int, the maximum number of boxes to predict.\n use_labeled_classes: boolean, compute the loss only labeled classes.\n keypoint_weights_for_center: (optional) The keypoint weights used for\n calculating the location of object center. If provided, the number of\n weights need to be the same as the number of keypoints. The object\n center is calculated by the weighted mean of the keypoint locations. If\n not provided, the object center is determined by the center of the\n bounding box (default behavior).\n center_head_num_filters: filter numbers of the convolutional layers used\n by the object center prediction head.\n center_head_kernel_sizes: kernel size of the convolutional layers used\n by the object center prediction head.\n peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak\n score locations in a neighborhood for the object detection heatmap.\n Returns:\n An initialized ObjectCenterParams namedtuple.\n \"\"\"\n return super(ObjectCenterParams,\n cls).__new__(cls, classification_loss,\n object_center_loss_weight, heatmap_bias_init,\n min_box_overlap_iou, max_box_predictions,\n use_labeled_classes, keypoint_weights_for_center,\n center_head_num_filters, center_head_kernel_sizes,\n peak_max_pool_kernel_size)\n\n\nclass MaskParams(\n collections.namedtuple('MaskParams', [\n 'classification_loss', 'task_loss_weight', 'mask_height', 'mask_width',\n 'score_threshold', 'heatmap_bias_init', 'mask_head_num_filters',\n 'mask_head_kernel_sizes'\n ])):\n \"\"\"Namedtuple to store mask prediction related parameters.\"\"\"\n\n __slots__ = ()\n\n def __new__(cls,\n classification_loss,\n task_loss_weight=1.0,\n mask_height=256,\n mask_width=256,\n score_threshold=0.5,\n heatmap_bias_init=-2.19,\n mask_head_num_filters=(256),\n mask_head_kernel_sizes=(3)):\n \"\"\"Constructor with default values for MaskParams.\n\n Args:\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the semantic segmentation predictions in CenterNet.\n task_loss_weight: float, The loss weight for the segmentation task.\n mask_height: The height of the resized instance segmentation mask.\n mask_width: The width of the resized instance segmentation mask.\n score_threshold: The threshold at which to convert predicted mask\n probabilities (after passing through sigmoid) into foreground pixels.\n heatmap_bias_init: float, the initial value of bias in the convolutional\n kernel of the semantic segmentation prediction head. If set to None, the\n bias is initialized with zeros.\n mask_head_num_filters: filter numbers of the convolutional layers used\n by the mask prediction head.\n mask_head_kernel_sizes: kernel size of the convolutional layers used\n by the mask prediction head.\n\n Returns:\n An initialized MaskParams namedtuple.\n \"\"\"\n return super(MaskParams,\n cls).__new__(cls, classification_loss,\n task_loss_weight, mask_height, mask_width,\n score_threshold, heatmap_bias_init,\n mask_head_num_filters, mask_head_kernel_sizes)\n\n\nclass DensePoseParams(\n collections.namedtuple('DensePoseParams', [\n 'class_id', 'classification_loss', 'localization_loss',\n 'part_loss_weight', 'coordinate_loss_weight', 'num_parts',\n 'task_loss_weight', 'upsample_to_input_res', 'upsample_method',\n 'heatmap_bias_init'\n ])):\n \"\"\"Namedtuple to store DensePose prediction related parameters.\"\"\"\n\n __slots__ = ()\n\n def __new__(cls,\n class_id,\n classification_loss,\n localization_loss,\n part_loss_weight=1.0,\n coordinate_loss_weight=1.0,\n num_parts=24,\n task_loss_weight=1.0,\n upsample_to_input_res=True,\n upsample_method='bilinear',\n heatmap_bias_init=-2.19):\n \"\"\"Constructor with default values for DensePoseParams.\n\n Args:\n class_id: the ID of the class that contains the DensePose groundtruth.\n This should typically correspond to the \"person\" class. Note that the ID\n is 0-based, meaning that class 0 corresponds to the first non-background\n object class.\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the body part predictions in CenterNet.\n localization_loss: an object_detection.core.losses.Loss object to compute\n the loss for the surface coordinate regression in CenterNet.\n part_loss_weight: The loss weight to apply to part prediction.\n coordinate_loss_weight: The loss weight to apply to surface coordinate\n prediction.\n num_parts: The number of DensePose parts to predict.\n task_loss_weight: float, the loss weight for the DensePose task.\n upsample_to_input_res: Whether to upsample the DensePose feature maps to\n the input resolution before applying loss. Note that the prediction\n outputs are still at the standard CenterNet output stride.\n upsample_method: Method for upsampling DensePose feature maps. Options are\n either 'bilinear' or 'nearest'). This takes no effect when\n `upsample_to_input_res` is False.\n heatmap_bias_init: float, the initial value of bias in the convolutional\n kernel of the part prediction head. If set to None, the\n bias is initialized with zeros.\n\n Returns:\n An initialized DensePoseParams namedtuple.\n \"\"\"\n return super(DensePoseParams,\n cls).__new__(cls, class_id, classification_loss,\n localization_loss, part_loss_weight,\n coordinate_loss_weight, num_parts,\n task_loss_weight, upsample_to_input_res,\n upsample_method, heatmap_bias_init)\n\n\nclass TrackParams(\n collections.namedtuple('TrackParams', [\n 'num_track_ids', 'reid_embed_size', 'num_fc_layers',\n 'classification_loss', 'task_loss_weight'\n ])):\n \"\"\"Namedtuple to store tracking prediction related parameters.\"\"\"\n\n __slots__ = ()\n\n def __new__(cls,\n num_track_ids,\n reid_embed_size,\n num_fc_layers,\n classification_loss,\n task_loss_weight=1.0):\n \"\"\"Constructor with default values for TrackParams.\n\n Args:\n num_track_ids: int. The maximum track ID in the dataset. Used for ReID\n embedding classification task.\n reid_embed_size: int. The embedding size for ReID task.\n num_fc_layers: int. The number of (fully-connected, batch-norm, relu)\n layers for track ID classification head.\n classification_loss: an object_detection.core.losses.Loss object to\n compute the loss for the ReID embedding in CenterNet.\n task_loss_weight: float, the loss weight for the tracking task.\n\n Returns:\n An initialized TrackParams namedtuple.\n \"\"\"\n return super(TrackParams,\n cls).__new__(cls, num_track_ids, reid_embed_size,\n num_fc_layers, classification_loss,\n task_loss_weight)\n\n\nclass TemporalOffsetParams(\n collections.namedtuple('TemporalOffsetParams', [\n 'localization_loss', 'task_loss_weight'\n ])):\n \"\"\"Namedtuple to store temporal offset related parameters.\"\"\"\n\n __slots__ = ()\n\n def __new__(cls,\n localization_loss,\n task_loss_weight=1.0):\n \"\"\"Constructor with default values for TrackParams.\n\n Args:\n localization_loss: an object_detection.core.losses.Loss object to\n compute the loss for the temporal offset in CenterNet.\n task_loss_weight: float, the loss weight for the temporal offset\n task.\n\n Returns:\n An initialized TemporalOffsetParams namedtuple.\n \"\"\"\n return super(TemporalOffsetParams,\n cls).__new__(cls, localization_loss, task_loss_weight)\n\n# The following constants are used to generate the keys of the\n# (prediction, loss, target assigner,...) dictionaries used in CenterNetMetaArch\n# class.\nDETECTION_TASK = 'detection_task'\nOBJECT_CENTER = 'object_center'\nBOX_SCALE = 'box/scale'\nBOX_OFFSET = 'box/offset'\nKEYPOINT_REGRESSION = 'keypoint/regression'\nKEYPOINT_HEATMAP = 'keypoint/heatmap'\nKEYPOINT_OFFSET = 'keypoint/offset'\nKEYPOINT_DEPTH = 'keypoint/depth'\nSEGMENTATION_TASK = 'segmentation_task'\nSEGMENTATION_HEATMAP = 'segmentation/heatmap'\nDENSEPOSE_TASK = 'densepose_task'\nDENSEPOSE_HEATMAP = 'densepose/heatmap'\nDENSEPOSE_REGRESSION = 'densepose/regression'\nLOSS_KEY_PREFIX = 'Loss'\nTRACK_TASK = 'track_task'\nTRACK_REID = 'track/reid'\nTEMPORALOFFSET_TASK = 'temporal_offset_task'\nTEMPORAL_OFFSET = 'track/offset'\n\n\ndef get_keypoint_name(task_name, head_name):\n return '%s/%s' % (task_name, head_name)\n\n\ndef get_num_instances_from_weights(groundtruth_weights_list):\n \"\"\"Computes the number of instances/boxes from the weights in a batch.\n\n Args:\n groundtruth_weights_list: A list of float tensors with shape\n [max_num_instances] representing whether there is an actual instance in\n the image (with non-zero value) or is padded to match the\n max_num_instances (with value 0.0). The list represents the batch\n dimension.\n\n Returns:\n A scalar integer tensor incidating how many instances/boxes are in the\n images in the batch. Note that this function is usually used to normalize\n the loss so the minimum return value is 1 to avoid weird behavior.\n \"\"\"\n num_instances = tf.reduce_sum(\n [tf.math.count_nonzero(w) for w in groundtruth_weights_list])\n num_instances = tf.maximum(num_instances, 1)\n return num_instances\n\n\nclass CenterNetMetaArch(model.DetectionModel):\n \"\"\"The CenterNet meta architecture [1].\n\n [1]: https://arxiv.org/abs/1904.07850\n \"\"\"\n\n def __init__(self,\n is_training,\n add_summaries,\n num_classes,\n feature_extractor,\n image_resizer_fn,\n object_center_params,\n object_detection_params=None,\n keypoint_params_dict=None,\n mask_params=None,\n densepose_params=None,\n track_params=None,\n temporal_offset_params=None,\n use_depthwise=False,\n compute_heatmap_sparse=False,\n non_max_suppression_fn=None,\n unit_height_conv=False):\n \"\"\"Initializes a CenterNet model.\n\n Args:\n is_training: Set to True if this model is being built for training.\n add_summaries: Whether to add tf summaries in the model.\n num_classes: int, The number of classes that the model should predict.\n feature_extractor: A CenterNetFeatureExtractor to use to extract features\n from an image.\n image_resizer_fn: a callable for image resizing. This callable always\n takes a rank-3 image tensor (corresponding to a single image) and\n returns a rank-3 image tensor, possibly with new spatial dimensions and\n a 1-D tensor of shape [3] indicating shape of true image within the\n resized image tensor as the resized image tensor could be padded. See\n builders/image_resizer_builder.py.\n object_center_params: An ObjectCenterParams namedtuple. This object holds\n the hyper-parameters for object center prediction. This is required by\n either object detection or keypoint estimation tasks.\n object_detection_params: An ObjectDetectionParams namedtuple. This object\n holds the hyper-parameters necessary for object detection. Please see\n the class definition for more details.\n keypoint_params_dict: A dictionary that maps from task name to the\n corresponding KeypointEstimationParams namedtuple. This object holds the\n hyper-parameters necessary for multiple keypoint estimations. Please\n see the class definition for more details.\n mask_params: A MaskParams namedtuple. This object\n holds the hyper-parameters for segmentation. Please see the class\n definition for more details.\n densepose_params: A DensePoseParams namedtuple. This object holds the\n hyper-parameters for DensePose prediction. Please see the class\n definition for more details. Note that if this is provided, it is\n expected that `mask_params` is also provided.\n track_params: A TrackParams namedtuple. This object\n holds the hyper-parameters for tracking. Please see the class\n definition for more details.\n temporal_offset_params: A TemporalOffsetParams namedtuple. This object\n holds the hyper-parameters for offset prediction based tracking.\n use_depthwise: If true, all task heads will be constructed using\n separable_conv. Otherwise, standard convoltuions will be used.\n compute_heatmap_sparse: bool, whether or not to use the sparse version of\n the Op that computes the center heatmaps. The sparse version scales\n better with number of channels in the heatmap, but in some cases is\n known to cause an OOM error. See b/170989061.\n non_max_suppression_fn: Optional Non Max Suppression function to apply.\n unit_height_conv: If True, Conv2Ds in prediction heads have asymmetric\n kernels with height=1.\n \"\"\"\n assert object_detection_params or keypoint_params_dict\n # Shorten the name for convenience and better formatting.\n self._is_training = is_training\n # The Objects as Points paper attaches loss functions to multiple\n # (`num_feature_outputs`) feature maps in the the backbone. E.g.\n # for the hourglass backbone, `num_feature_outputs` is 2.\n self._num_classes = num_classes\n self._feature_extractor = feature_extractor\n self._num_feature_outputs = feature_extractor.num_feature_outputs\n self._stride = self._feature_extractor.out_stride\n self._image_resizer_fn = image_resizer_fn\n self._center_params = object_center_params\n self._od_params = object_detection_params\n self._kp_params_dict = keypoint_params_dict\n self._mask_params = mask_params\n if densepose_params is not None and mask_params is None:\n raise ValueError('To run DensePose prediction, `mask_params` must also '\n 'be supplied.')\n self._densepose_params = densepose_params\n self._track_params = track_params\n self._temporal_offset_params = temporal_offset_params\n\n self._use_depthwise = use_depthwise\n self._compute_heatmap_sparse = compute_heatmap_sparse\n\n # subclasses may not implement the unit_height_conv arg, so only provide it\n # as a kwarg if it is True.\n kwargs = {'unit_height_conv': unit_height_conv} if unit_height_conv else {}\n # Construct the prediction head nets.\n self._prediction_head_dict = self._construct_prediction_heads(\n num_classes,\n self._num_feature_outputs,\n class_prediction_bias_init=self._center_params.heatmap_bias_init,\n **kwargs)\n # Initialize the target assigners.\n self._target_assigner_dict = self._initialize_target_assigners(\n stride=self._stride,\n min_box_overlap_iou=self._center_params.min_box_overlap_iou)\n\n # Will be used in VOD single_frame_meta_arch for tensor reshape.\n self._batched_prediction_tensor_names = []\n self._non_max_suppression_fn = non_max_suppression_fn\n\n super(CenterNetMetaArch, self).__init__(num_classes)\n\n def set_trainability_by_layer_traversal(self, trainable):\n \"\"\"Sets trainability layer by layer.\n\n The commonly-seen `model.trainable = False` method does not traverse\n the children layer. For example, if the parent is not trainable, we won't\n be able to set individual layers as trainable/non-trainable differentially.\n\n Args:\n trainable: (bool) Setting this for the model layer by layer except for\n the parent itself.\n \"\"\"\n for layer in self._flatten_layers(include_self=False):\n layer.trainable = trainable\n\n @property\n def prediction_head_dict(self):\n return self._prediction_head_dict\n\n @property\n def batched_prediction_tensor_names(self):\n if not self._batched_prediction_tensor_names:\n raise RuntimeError('Must call predict() method to get batched prediction '\n 'tensor names.')\n return self._batched_prediction_tensor_names\n\n def _make_prediction_net_list(self, num_feature_outputs, num_out_channels,\n kernel_sizes=(3), num_filters=(256),\n bias_fill=None, name=None,\n unit_height_conv=False):\n prediction_net_list = []\n for i in range(num_feature_outputs):\n prediction_net_list.append(\n make_prediction_net(\n num_out_channels,\n kernel_sizes=kernel_sizes,\n num_filters=num_filters,\n bias_fill=bias_fill,\n use_depthwise=self._use_depthwise,\n name='{}_{}'.format(name, i) if name else name,\n unit_height_conv=unit_height_conv))\n return prediction_net_list\n\n def _construct_prediction_heads(self, num_classes, num_feature_outputs,\n class_prediction_bias_init,\n unit_height_conv=False):\n \"\"\"Constructs the prediction heads based on the specific parameters.\n\n Args:\n num_classes: An integer indicating how many classes in total to predict.\n num_feature_outputs: An integer indicating how many feature outputs to use\n for calculating the loss. The Objects as Points paper attaches loss\n functions to multiple (`num_feature_outputs`) feature maps in the the\n backbone. E.g. for the hourglass backbone, `num_feature_outputs` is 2.\n class_prediction_bias_init: float, the initial value of bias in the\n convolutional kernel of the class prediction head. If set to None, the\n bias is initialized with zeros.\n unit_height_conv: If True, Conv2Ds have asymmetric kernels with height=1.\n\n Returns:\n A dictionary of keras modules generated by calling make_prediction_net\n function. It will also create and set a private member of the class when\n learning the tracking task.\n \"\"\"\n prediction_heads = {}\n prediction_heads[OBJECT_CENTER] = self._make_prediction_net_list(\n num_feature_outputs,\n num_classes,\n kernel_sizes=self._center_params.center_head_kernel_sizes,\n num_filters=self._center_params.center_head_num_filters,\n bias_fill=class_prediction_bias_init,\n name='center',\n unit_height_conv=unit_height_conv)\n\n if self._od_params is not None:\n prediction_heads[BOX_SCALE] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_SIZE_CHANNELS,\n kernel_sizes=self._od_params.scale_head_kernel_sizes,\n num_filters=self._od_params.scale_head_num_filters,\n name='box_scale',\n unit_height_conv=unit_height_conv)\n prediction_heads[BOX_OFFSET] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_OFFSET_CHANNELS,\n kernel_sizes=self._od_params.offset_head_kernel_sizes,\n num_filters=self._od_params.offset_head_num_filters,\n name='box_offset',\n unit_height_conv=unit_height_conv)\n\n if self._kp_params_dict is not None:\n for task_name, kp_params in self._kp_params_dict.items():\n num_keypoints = len(kp_params.keypoint_indices)\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_HEATMAP)] = self._make_prediction_net_list(\n num_feature_outputs,\n num_keypoints,\n kernel_sizes=kp_params.heatmap_head_kernel_sizes,\n num_filters=kp_params.heatmap_head_num_filters,\n bias_fill=kp_params.heatmap_bias_init,\n name='kpt_heatmap',\n unit_height_conv=unit_height_conv)\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_REGRESSION)] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_OFFSET_CHANNELS * num_keypoints,\n kernel_sizes=kp_params.regress_head_kernel_sizes,\n num_filters=kp_params.regress_head_num_filters,\n name='kpt_regress',\n unit_height_conv=unit_height_conv)\n\n if kp_params.per_keypoint_offset:\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_OFFSET)] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_OFFSET_CHANNELS * num_keypoints,\n kernel_sizes=kp_params.offset_head_kernel_sizes,\n num_filters=kp_params.offset_head_num_filters,\n name='kpt_offset',\n unit_height_conv=unit_height_conv)\n else:\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_OFFSET)] = self._make_prediction_net_list(\n num_feature_outputs,\n NUM_OFFSET_CHANNELS,\n kernel_sizes=kp_params.offset_head_kernel_sizes,\n num_filters=kp_params.offset_head_num_filters,\n name='kpt_offset',\n unit_height_conv=unit_height_conv)\n\n if kp_params.predict_depth:\n num_depth_channel = (\n num_keypoints if kp_params.per_keypoint_depth else 1)\n prediction_heads[get_keypoint_name(\n task_name, KEYPOINT_DEPTH)] = self._make_prediction_net_list(\n num_feature_outputs, num_depth_channel, name='kpt_depth',\n unit_height_conv=unit_height_conv)\n\n if self._mask_params is not None:\n prediction_heads[SEGMENTATION_HEATMAP] = self._make_prediction_net_list(\n num_feature_outputs,\n num_classes,\n kernel_sizes=self._mask_params.mask_head_kernel_sizes,\n num_filters=self._mask_params.mask_head_num_filters,\n bias_fill=self._mask_params.heatmap_bias_init,\n name='seg_heatmap',\n unit_height_conv=unit_height_conv)\n\n if self._densepose_params is not None:\n prediction_heads[DENSEPOSE_HEATMAP] = self._make_prediction_net_list(\n num_feature_outputs,\n self._densepose_params.num_parts,\n bias_fill=self._densepose_params.heatmap_bias_init,\n name='dense_pose_heatmap',\n unit_height_conv=unit_height_conv)\n prediction_heads[DENSEPOSE_REGRESSION] = self._make_prediction_net_list(\n num_feature_outputs,\n 2 * self._densepose_params.num_parts,\n name='dense_pose_regress',\n unit_height_conv=unit_height_conv)\n\n if self._track_params is not None:\n prediction_heads[TRACK_REID] = self._make_prediction_net_list(\n num_feature_outputs,\n self._track_params.reid_embed_size,\n name='track_reid',\n unit_height_conv=unit_height_conv)\n\n # Creates a classification network to train object embeddings by learning\n # a projection from embedding space to object track ID space.\n self.track_reid_classification_net = tf.keras.Sequential()\n for _ in range(self._track_params.num_fc_layers - 1):\n self.track_reid_classification_net.add(\n tf.keras.layers.Dense(self._track_params.reid_embed_size))\n self.track_reid_classification_net.add(\n tf.keras.layers.BatchNormalization())\n self.track_reid_classification_net.add(tf.keras.layers.ReLU())\n self.track_reid_classification_net.add(\n tf.keras.layers.Dense(self._track_params.num_track_ids))\n if self._temporal_offset_params is not None:\n prediction_heads[TEMPORAL_OFFSET] = self._make_prediction_net_list(\n num_feature_outputs, NUM_OFFSET_CHANNELS, name='temporal_offset',\n unit_height_conv=unit_height_conv)\n return prediction_heads\n\n def _initialize_target_assigners(self, stride, min_box_overlap_iou):\n \"\"\"Initializes the target assigners and puts them in a dictionary.\n\n Args:\n stride: An integer indicating the stride of the image.\n min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes\n need have with groundtruth boxes to not be penalized. This is used for\n computing the class specific center heatmaps.\n\n Returns:\n A dictionary of initialized target assigners for each task.\n \"\"\"\n target_assigners = {}\n keypoint_weights_for_center = (\n self._center_params.keypoint_weights_for_center)\n if not keypoint_weights_for_center:\n target_assigners[OBJECT_CENTER] = (\n cn_assigner.CenterNetCenterHeatmapTargetAssigner(\n stride, min_box_overlap_iou, self._compute_heatmap_sparse))\n self._center_from_keypoints = False\n else:\n # Determining the object center location by keypoint location is only\n # supported when there is exactly one keypoint prediction task and no\n # object detection task is specified.\n assert len(self._kp_params_dict) == 1 and self._od_params is None\n kp_params = next(iter(self._kp_params_dict.values()))\n # The number of keypoint_weights_for_center needs to be the same as the\n # number of keypoints.\n assert len(keypoint_weights_for_center) == len(kp_params.keypoint_indices)\n target_assigners[OBJECT_CENTER] = (\n cn_assigner.CenterNetCenterHeatmapTargetAssigner(\n stride,\n min_box_overlap_iou,\n self._compute_heatmap_sparse,\n keypoint_class_id=kp_params.class_id,\n keypoint_indices=kp_params.keypoint_indices,\n keypoint_weights_for_center=keypoint_weights_for_center))\n self._center_from_keypoints = True\n if self._od_params is not None:\n target_assigners[DETECTION_TASK] = (\n cn_assigner.CenterNetBoxTargetAssigner(stride))\n if self._kp_params_dict is not None:\n for task_name, kp_params in self._kp_params_dict.items():\n target_assigners[task_name] = (\n cn_assigner.CenterNetKeypointTargetAssigner(\n stride=stride,\n class_id=kp_params.class_id,\n keypoint_indices=kp_params.keypoint_indices,\n keypoint_std_dev=kp_params.keypoint_std_dev,\n peak_radius=kp_params.offset_peak_radius,\n per_keypoint_offset=kp_params.per_keypoint_offset,\n compute_heatmap_sparse=self._compute_heatmap_sparse,\n per_keypoint_depth=kp_params.per_keypoint_depth))\n if self._mask_params is not None:\n target_assigners[SEGMENTATION_TASK] = (\n cn_assigner.CenterNetMaskTargetAssigner(stride, boxes_scale=1.05))\n if self._densepose_params is not None:\n dp_stride = 1 if self._densepose_params.upsample_to_input_res else stride\n target_assigners[DENSEPOSE_TASK] = (\n cn_assigner.CenterNetDensePoseTargetAssigner(dp_stride))\n if self._track_params is not None:\n target_assigners[TRACK_TASK] = (\n cn_assigner.CenterNetTrackTargetAssigner(\n stride, self._track_params.num_track_ids))\n if self._temporal_offset_params is not None:\n target_assigners[TEMPORALOFFSET_TASK] = (\n cn_assigner.CenterNetTemporalOffsetTargetAssigner(stride))\n\n return target_assigners\n\n def _compute_object_center_loss(self, input_height, input_width,\n object_center_predictions, per_pixel_weights,\n maximum_normalized_coordinate=1.1):\n \"\"\"Computes the object center loss.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n object_center_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, num_classes] representing the object center\n feature maps.\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n maximum_normalized_coordinate: Maximum coordinate value to be considered\n as normalized, default to 1.1. This is used to check bounds during\n converting normalized coordinates to absolute coordinates.\n\n Returns:\n A float scalar tensor representing the object center loss per instance.\n \"\"\"\n gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)\n gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)\n\n if self._center_params.use_labeled_classes:\n gt_labeled_classes_list = self.groundtruth_lists(\n fields.InputDataFields.groundtruth_labeled_classes)\n batch_labeled_classes = tf.stack(gt_labeled_classes_list, axis=0)\n batch_labeled_classes_shape = tf.shape(batch_labeled_classes)\n batch_labeled_classes = tf.reshape(\n batch_labeled_classes,\n [batch_labeled_classes_shape[0], 1, batch_labeled_classes_shape[-1]])\n per_pixel_weights = per_pixel_weights * batch_labeled_classes\n\n # Convert the groundtruth to targets.\n assigner = self._target_assigner_dict[OBJECT_CENTER]\n if self._center_from_keypoints:\n gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)\n heatmap_targets = assigner.assign_center_targets_from_keypoints(\n height=input_height,\n width=input_width,\n gt_classes_list=gt_classes_list,\n gt_keypoints_list=gt_keypoints_list,\n gt_weights_list=gt_weights_list,\n maximum_normalized_coordinate=maximum_normalized_coordinate)\n else:\n gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)\n heatmap_targets = assigner.assign_center_targets_from_boxes(\n height=input_height,\n width=input_width,\n gt_boxes_list=gt_boxes_list,\n gt_classes_list=gt_classes_list,\n gt_weights_list=gt_weights_list,\n maximum_normalized_coordinate=maximum_normalized_coordinate)\n\n flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)\n num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))\n\n loss = 0.0\n object_center_loss = self._center_params.classification_loss\n # Loop through each feature output head.\n for pred in object_center_predictions:\n pred = _flatten_spatial_dimensions(pred)\n loss += object_center_loss(\n pred, flattened_heatmap_targets, weights=per_pixel_weights)\n loss_per_instance = tf.reduce_sum(loss) / (\n float(len(object_center_predictions)) * num_boxes)\n return loss_per_instance\n\n def _compute_object_detection_losses(self, input_height, input_width,\n prediction_dict, per_pixel_weights):\n \"\"\"Computes the weighted object detection losses.\n\n This wrapper function calls the function which computes the losses for\n object detection task and applies corresponding weights to the losses.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n prediction_dict: A dictionary holding predicted tensors output by\n \"predict\" function. See \"predict\" function for more detailed\n description.\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n\n Returns:\n A dictionary of scalar float tensors representing the weighted losses for\n object detection task:\n BOX_SCALE: the weighted scale (height/width) loss.\n BOX_OFFSET: the weighted object offset loss.\n \"\"\"\n od_scale_loss, od_offset_loss = self._compute_box_scale_and_offset_loss(\n scale_predictions=prediction_dict[BOX_SCALE],\n offset_predictions=prediction_dict[BOX_OFFSET],\n input_height=input_height,\n input_width=input_width)\n loss_dict = {}\n loss_dict[BOX_SCALE] = (\n self._od_params.scale_loss_weight * od_scale_loss)\n loss_dict[BOX_OFFSET] = (\n self._od_params.offset_loss_weight * od_offset_loss)\n return loss_dict\n\n def _compute_box_scale_and_offset_loss(self, input_height, input_width,\n scale_predictions, offset_predictions):\n \"\"\"Computes the scale loss of the object detection task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n scale_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2] representing the prediction heads of the model\n for object scale (i.e height and width).\n offset_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2] representing the prediction heads of the model\n for object offset.\n\n Returns:\n A tuple of two losses:\n scale_loss: A float scalar tensor representing the object height/width\n loss normalized by total number of boxes.\n offset_loss: A float scalar tensor representing the object offset loss\n normalized by total number of boxes\n \"\"\"\n # TODO(vighneshb) Explore a size invariant version of scale loss.\n gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)\n gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)\n num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))\n num_predictions = float(len(scale_predictions))\n\n assigner = self._target_assigner_dict[DETECTION_TASK]\n (batch_indices, batch_height_width_targets, batch_offset_targets,\n batch_weights) = assigner.assign_size_and_offset_targets(\n height=input_height,\n width=input_width,\n gt_boxes_list=gt_boxes_list,\n gt_weights_list=gt_weights_list)\n batch_weights = tf.expand_dims(batch_weights, -1)\n\n scale_loss = 0\n offset_loss = 0\n localization_loss_fn = self._od_params.localization_loss\n for scale_pred, offset_pred in zip(scale_predictions, offset_predictions):\n # Compute the scale loss.\n scale_pred = cn_assigner.get_batch_predictions_from_indices(\n scale_pred, batch_indices)\n scale_loss += localization_loss_fn(\n scale_pred, batch_height_width_targets, weights=batch_weights)\n # Compute the offset loss.\n offset_pred = cn_assigner.get_batch_predictions_from_indices(\n offset_pred, batch_indices)\n offset_loss += localization_loss_fn(\n offset_pred, batch_offset_targets, weights=batch_weights)\n scale_loss = tf.reduce_sum(scale_loss) / (\n num_predictions * num_boxes)\n offset_loss = tf.reduce_sum(offset_loss) / (\n num_predictions * num_boxes)\n return scale_loss, offset_loss\n\n def _compute_keypoint_estimation_losses(self, task_name, input_height,\n input_width, prediction_dict,\n per_pixel_weights):\n \"\"\"Computes the weighted keypoint losses.\"\"\"\n kp_params = self._kp_params_dict[task_name]\n heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP)\n offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET)\n regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION)\n depth_key = get_keypoint_name(task_name, KEYPOINT_DEPTH)\n heatmap_loss = self._compute_kp_heatmap_loss(\n input_height=input_height,\n input_width=input_width,\n task_name=task_name,\n heatmap_predictions=prediction_dict[heatmap_key],\n classification_loss_fn=kp_params.classification_loss,\n per_pixel_weights=per_pixel_weights)\n offset_loss = self._compute_kp_offset_loss(\n input_height=input_height,\n input_width=input_width,\n task_name=task_name,\n offset_predictions=prediction_dict[offset_key],\n localization_loss_fn=kp_params.localization_loss)\n reg_loss = self._compute_kp_regression_loss(\n input_height=input_height,\n input_width=input_width,\n task_name=task_name,\n regression_predictions=prediction_dict[regression_key],\n localization_loss_fn=kp_params.localization_loss)\n\n loss_dict = {}\n loss_dict[heatmap_key] = (\n kp_params.keypoint_heatmap_loss_weight * heatmap_loss)\n loss_dict[offset_key] = (\n kp_params.keypoint_offset_loss_weight * offset_loss)\n loss_dict[regression_key] = (\n kp_params.keypoint_regression_loss_weight * reg_loss)\n if kp_params.predict_depth:\n depth_loss = self._compute_kp_depth_loss(\n input_height=input_height,\n input_width=input_width,\n task_name=task_name,\n depth_predictions=prediction_dict[depth_key],\n localization_loss_fn=kp_params.localization_loss)\n loss_dict[depth_key] = kp_params.keypoint_depth_loss_weight * depth_loss\n return loss_dict\n\n def _compute_kp_heatmap_loss(self, input_height, input_width, task_name,\n heatmap_predictions, classification_loss_fn,\n per_pixel_weights):\n \"\"\"Computes the heatmap loss of the keypoint estimation task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n task_name: A string representing the name of the keypoint task.\n heatmap_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, num_keypoints] representing the prediction heads\n of the model for keypoint heatmap.\n classification_loss_fn: An object_detection.core.losses.Loss object to\n compute the loss for the class predictions in CenterNet.\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n\n Returns:\n loss: A float scalar tensor representing the object keypoint heatmap loss\n normalized by number of instances.\n \"\"\"\n gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)\n gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)\n gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)\n gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)\n\n assigner = self._target_assigner_dict[task_name]\n (keypoint_heatmap, num_instances_per_kp_type,\n valid_mask_batch) = assigner.assign_keypoint_heatmap_targets(\n height=input_height,\n width=input_width,\n gt_keypoints_list=gt_keypoints_list,\n gt_weights_list=gt_weights_list,\n gt_classes_list=gt_classes_list,\n gt_boxes_list=gt_boxes_list)\n flattened_valid_mask = _flatten_spatial_dimensions(valid_mask_batch)\n flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap)\n # Sum over the number of instances per keypoint types to get the total\n # number of keypoints. Note that this is used to normalized the loss and we\n # keep the minimum value to be 1 to avoid generating weird loss value when\n # no keypoint is in the image batch.\n num_instances = tf.maximum(\n tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32),\n 1.0)\n loss = 0.0\n # Loop through each feature output head.\n for pred in heatmap_predictions:\n pred = _flatten_spatial_dimensions(pred)\n unweighted_loss = classification_loss_fn(\n pred,\n flattened_heapmap_targets,\n weights=tf.ones_like(per_pixel_weights))\n # Apply the weights after the loss function to have full control over it.\n loss += unweighted_loss * per_pixel_weights * flattened_valid_mask\n loss = tf.reduce_sum(loss) / (\n float(len(heatmap_predictions)) * num_instances)\n return loss\n\n def _compute_kp_offset_loss(self, input_height, input_width, task_name,\n offset_predictions, localization_loss_fn):\n \"\"\"Computes the offset loss of the keypoint estimation task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n task_name: A string representing the name of the keypoint task.\n offset_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2] representing the prediction heads of the model\n for keypoint offset.\n localization_loss_fn: An object_detection.core.losses.Loss object to\n compute the loss for the keypoint offset predictions in CenterNet.\n\n Returns:\n loss: A float scalar tensor representing the keypoint offset loss\n normalized by number of total keypoints.\n \"\"\"\n gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)\n gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)\n gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)\n\n assigner = self._target_assigner_dict[task_name]\n (batch_indices, batch_offsets,\n batch_weights) = assigner.assign_keypoints_offset_targets(\n height=input_height,\n width=input_width,\n gt_keypoints_list=gt_keypoints_list,\n gt_weights_list=gt_weights_list,\n gt_classes_list=gt_classes_list)\n\n # Keypoint offset loss.\n loss = 0.0\n for prediction in offset_predictions:\n batch_size, out_height, out_width, channels = _get_shape(prediction, 4)\n if channels > 2:\n prediction = tf.reshape(\n prediction, shape=[batch_size, out_height, out_width, -1, 2])\n prediction = cn_assigner.get_batch_predictions_from_indices(\n prediction, batch_indices)\n # The dimensions passed are not as per the doc string but the loss\n # still computes the correct value.\n unweighted_loss = localization_loss_fn(\n prediction,\n batch_offsets,\n weights=tf.expand_dims(tf.ones_like(batch_weights), -1))\n # Apply the weights after the loss function to have full control over it.\n loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)\n\n loss = tf.reduce_sum(loss) / (\n float(len(offset_predictions)) *\n tf.maximum(tf.reduce_sum(batch_weights), 1.0))\n return loss\n\n def _compute_kp_regression_loss(self, input_height, input_width, task_name,\n regression_predictions, localization_loss_fn):\n \"\"\"Computes the keypoint regression loss of the keypoint estimation task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n task_name: A string representing the name of the keypoint task.\n regression_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2 * num_keypoints] representing the prediction\n heads of the model for keypoint regression offset.\n localization_loss_fn: An object_detection.core.losses.Loss object to\n compute the loss for the keypoint regression offset predictions in\n CenterNet.\n\n Returns:\n loss: A float scalar tensor representing the keypoint regression offset\n loss normalized by number of total keypoints.\n \"\"\"\n gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)\n gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)\n gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)\n gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)\n # keypoint regression offset loss.\n assigner = self._target_assigner_dict[task_name]\n (batch_indices, batch_regression_offsets,\n batch_weights) = assigner.assign_joint_regression_targets(\n height=input_height,\n width=input_width,\n gt_keypoints_list=gt_keypoints_list,\n gt_classes_list=gt_classes_list,\n gt_weights_list=gt_weights_list,\n gt_boxes_list=gt_boxes_list)\n\n loss = 0.0\n for prediction in regression_predictions:\n batch_size, out_height, out_width, _ = _get_shape(prediction, 4)\n reshaped_prediction = tf.reshape(\n prediction, shape=[batch_size, out_height, out_width, -1, 2])\n reg_prediction = cn_assigner.get_batch_predictions_from_indices(\n reshaped_prediction, batch_indices)\n unweighted_loss = localization_loss_fn(\n reg_prediction,\n batch_regression_offsets,\n weights=tf.expand_dims(tf.ones_like(batch_weights), -1))\n # Apply the weights after the loss function to have full control over it.\n loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)\n\n loss = tf.reduce_sum(loss) / (\n float(len(regression_predictions)) *\n tf.maximum(tf.reduce_sum(batch_weights), 1.0))\n return loss\n\n def _compute_kp_depth_loss(self, input_height, input_width, task_name,\n depth_predictions, localization_loss_fn):\n \"\"\"Computes the loss of the keypoint depth estimation.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n task_name: A string representing the name of the keypoint task.\n depth_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 1 (or num_keypoints)] representing the prediction\n heads of the model for keypoint depth.\n localization_loss_fn: An object_detection.core.losses.Loss object to\n compute the loss for the keypoint offset predictions in CenterNet.\n\n Returns:\n loss: A float scalar tensor representing the keypoint depth loss\n normalized by number of total keypoints.\n \"\"\"\n kp_params = self._kp_params_dict[task_name]\n gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)\n gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)\n gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)\n gt_keypoint_depths_list = self.groundtruth_lists(\n fields.BoxListFields.keypoint_depths)\n gt_keypoint_depth_weights_list = self.groundtruth_lists(\n fields.BoxListFields.keypoint_depth_weights)\n\n assigner = self._target_assigner_dict[task_name]\n (batch_indices, batch_depths,\n batch_weights) = assigner.assign_keypoints_depth_targets(\n height=input_height,\n width=input_width,\n gt_keypoints_list=gt_keypoints_list,\n gt_weights_list=gt_weights_list,\n gt_classes_list=gt_classes_list,\n gt_keypoint_depths_list=gt_keypoint_depths_list,\n gt_keypoint_depth_weights_list=gt_keypoint_depth_weights_list)\n\n # Keypoint offset loss.\n loss = 0.0\n for prediction in depth_predictions:\n if kp_params.per_keypoint_depth:\n prediction = tf.expand_dims(prediction, axis=-1)\n selected_depths = cn_assigner.get_batch_predictions_from_indices(\n prediction, batch_indices)\n # The dimensions passed are not as per the doc string but the loss\n # still computes the correct value.\n unweighted_loss = localization_loss_fn(\n selected_depths,\n batch_depths,\n weights=tf.expand_dims(tf.ones_like(batch_weights), -1))\n # Apply the weights after the loss function to have full control over it.\n loss += batch_weights * tf.squeeze(unweighted_loss, axis=1)\n\n loss = tf.reduce_sum(loss) / (\n float(len(depth_predictions)) *\n tf.maximum(tf.reduce_sum(batch_weights), 1.0))\n return loss\n\n def _compute_segmentation_losses(self, prediction_dict, per_pixel_weights):\n \"\"\"Computes all the losses associated with segmentation.\n\n Args:\n prediction_dict: The dictionary returned from the predict() method.\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n\n Returns:\n A dictionary with segmentation losses.\n \"\"\"\n segmentation_heatmap = prediction_dict[SEGMENTATION_HEATMAP]\n mask_loss = self._compute_mask_loss(\n segmentation_heatmap, per_pixel_weights)\n losses = {\n SEGMENTATION_HEATMAP: mask_loss\n }\n return losses\n\n def _compute_mask_loss(self, segmentation_predictions,\n per_pixel_weights):\n \"\"\"Computes the mask loss.\n\n Args:\n segmentation_predictions: A list of float32 tensors of shape [batch_size,\n out_height, out_width, num_classes].\n per_pixel_weights: A float tensor of shape [batch_size,\n out_height * out_width, 1] with 1s in locations where the spatial\n coordinates fall within the height and width in true_image_shapes.\n\n Returns:\n A float scalar tensor representing the mask loss.\n \"\"\"\n gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)\n gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks)\n gt_mask_weights_list = None\n if self.groundtruth_has_field(fields.BoxListFields.mask_weights):\n gt_mask_weights_list = self.groundtruth_lists(\n fields.BoxListFields.mask_weights)\n gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)\n\n # Convert the groundtruth to targets.\n assigner = self._target_assigner_dict[SEGMENTATION_TASK]\n heatmap_targets, heatmap_weight = assigner.assign_segmentation_targets(\n gt_masks_list=gt_masks_list,\n gt_classes_list=gt_classes_list,\n gt_boxes_list=gt_boxes_list,\n gt_mask_weights_list=gt_mask_weights_list)\n\n flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)\n flattened_heatmap_mask = _flatten_spatial_dimensions(\n heatmap_weight[:, :, :, tf.newaxis])\n per_pixel_weights *= flattened_heatmap_mask\n\n loss = 0.0\n mask_loss_fn = self._mask_params.classification_loss\n\n total_pixels_in_loss = tf.math.maximum(\n tf.reduce_sum(per_pixel_weights), 1)\n\n # Loop through each feature output head.\n for pred in segmentation_predictions:\n pred = _flatten_spatial_dimensions(pred)\n loss += mask_loss_fn(\n pred, flattened_heatmap_targets, weights=per_pixel_weights)\n # TODO(ronnyvotel): Consider other ways to normalize loss.\n total_loss = tf.reduce_sum(loss) / (\n float(len(segmentation_predictions)) * total_pixels_in_loss)\n return total_loss\n\n def _compute_densepose_losses(self, input_height, input_width,\n prediction_dict):\n \"\"\"Computes the weighted DensePose losses.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n prediction_dict: A dictionary holding predicted tensors output by the\n \"predict\" function. See the \"predict\" function for more detailed\n description.\n\n Returns:\n A dictionary of scalar float tensors representing the weighted losses for\n the DensePose task:\n DENSEPOSE_HEATMAP: the weighted part segmentation loss.\n DENSEPOSE_REGRESSION: the weighted part surface coordinate loss.\n \"\"\"\n dp_heatmap_loss, dp_regression_loss = (\n self._compute_densepose_part_and_coordinate_losses(\n input_height=input_height,\n input_width=input_width,\n part_predictions=prediction_dict[DENSEPOSE_HEATMAP],\n surface_coord_predictions=prediction_dict[DENSEPOSE_REGRESSION]))\n loss_dict = {}\n loss_dict[DENSEPOSE_HEATMAP] = (\n self._densepose_params.part_loss_weight * dp_heatmap_loss)\n loss_dict[DENSEPOSE_REGRESSION] = (\n self._densepose_params.coordinate_loss_weight * dp_regression_loss)\n return loss_dict\n\n def _compute_densepose_part_and_coordinate_losses(\n self, input_height, input_width, part_predictions,\n surface_coord_predictions):\n \"\"\"Computes the individual losses for the DensePose task.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n part_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, num_parts].\n surface_coord_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, 2 * num_parts].\n\n Returns:\n A tuple with two scalar loss tensors: part_prediction_loss and\n surface_coord_loss.\n \"\"\"\n gt_dp_num_points_list = self.groundtruth_lists(\n fields.BoxListFields.densepose_num_points)\n gt_dp_part_ids_list = self.groundtruth_lists(\n fields.BoxListFields.densepose_part_ids)\n gt_dp_surface_coords_list = self.groundtruth_lists(\n fields.BoxListFields.densepose_surface_coords)\n gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)\n\n assigner = self._target_assigner_dict[DENSEPOSE_TASK]\n batch_indices, batch_part_ids, batch_surface_coords, batch_weights = (\n assigner.assign_part_and_coordinate_targets(\n height=input_height,\n width=input_width,\n gt_dp_num_points_list=gt_dp_num_points_list,\n gt_dp_part_ids_list=gt_dp_part_ids_list,\n gt_dp_surface_coords_list=gt_dp_surface_coords_list,\n gt_weights_list=gt_weights_list))\n\n part_prediction_loss = 0\n surface_coord_loss = 0\n classification_loss_fn = self._densepose_params.classification_loss\n localization_loss_fn = self._densepose_params.localization_loss\n num_predictions = float(len(part_predictions))\n num_valid_points = tf.math.count_nonzero(batch_weights)\n num_valid_points = tf.cast(tf.math.maximum(num_valid_points, 1), tf.float32)\n for part_pred, surface_coord_pred in zip(part_predictions,\n surface_coord_predictions):\n # Potentially upsample the feature maps, so that better quality (i.e.\n # higher res) groundtruth can be applied.\n if self._densepose_params.upsample_to_input_res:\n part_pred = tf.keras.layers.UpSampling2D(\n self._stride, interpolation=self._densepose_params.upsample_method)(\n part_pred)\n surface_coord_pred = tf.keras.layers.UpSampling2D(\n self._stride, interpolation=self._densepose_params.upsample_method)(\n surface_coord_pred)\n # Compute the part prediction loss.\n part_pred = cn_assigner.get_batch_predictions_from_indices(\n part_pred, batch_indices[:, 0:3])\n part_prediction_loss += classification_loss_fn(\n part_pred[:, tf.newaxis, :],\n batch_part_ids[:, tf.newaxis, :],\n weights=batch_weights[:, tf.newaxis, tf.newaxis])\n # Compute the surface coordinate loss.\n batch_size, out_height, out_width, _ = _get_shape(\n surface_coord_pred, 4)\n surface_coord_pred = tf.reshape(\n surface_coord_pred, [batch_size, out_height, out_width, -1, 2])\n surface_coord_pred = cn_assigner.get_batch_predictions_from_indices(\n surface_coord_pred, batch_indices)\n surface_coord_loss += localization_loss_fn(\n surface_coord_pred,\n batch_surface_coords,\n weights=batch_weights[:, tf.newaxis])\n part_prediction_loss = tf.reduce_sum(part_prediction_loss) / (\n num_predictions * num_valid_points)\n surface_coord_loss = tf.reduce_sum(surface_coord_loss) / (\n num_predictions * num_valid_points)\n return part_prediction_loss, surface_coord_loss\n\n def _compute_track_losses(self, input_height, input_width, prediction_dict):\n \"\"\"Computes all the losses associated with tracking.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n prediction_dict: The dictionary returned from the predict() method.\n\n Returns:\n A dictionary with tracking losses.\n \"\"\"\n object_reid_predictions = prediction_dict[TRACK_REID]\n embedding_loss = self._compute_track_embedding_loss(\n input_height=input_height,\n input_width=input_width,\n object_reid_predictions=object_reid_predictions)\n losses = {\n TRACK_REID: embedding_loss\n }\n return losses\n\n def _compute_track_embedding_loss(self, input_height, input_width,\n object_reid_predictions):\n \"\"\"Computes the object ReID loss.\n\n The embedding is trained as a classification task where the target is the\n ID of each track among all tracks in the whole dataset.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n object_reid_predictions: A list of float tensors of shape [batch_size,\n out_height, out_width, reid_embed_size] representing the object\n embedding feature maps.\n\n Returns:\n A float scalar tensor representing the object ReID loss per instance.\n \"\"\"\n gt_track_ids_list = self.groundtruth_lists(fields.BoxListFields.track_ids)\n gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)\n gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)\n num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))\n\n # Convert the groundtruth to targets.\n assigner = self._target_assigner_dict[TRACK_TASK]\n batch_indices, batch_weights, track_targets = assigner.assign_track_targets(\n height=input_height,\n width=input_width,\n gt_track_ids_list=gt_track_ids_list,\n gt_boxes_list=gt_boxes_list,\n gt_weights_list=gt_weights_list)\n batch_weights = tf.expand_dims(batch_weights, -1)\n\n loss = 0.0\n object_reid_loss = self._track_params.classification_loss\n # Loop through each feature output head.\n for pred in object_reid_predictions:\n embedding_pred = cn_assigner.get_batch_predictions_from_indices(\n pred, batch_indices)\n\n reid_classification = self.track_reid_classification_net(embedding_pred)\n\n loss += object_reid_loss(\n reid_classification, track_targets, weights=batch_weights)\n\n loss_per_instance = tf.reduce_sum(loss) / (\n float(len(object_reid_predictions)) * num_boxes)\n\n return loss_per_instance\n\n def _compute_temporal_offset_loss(self, input_height,\n input_width, prediction_dict):\n \"\"\"Computes the temporal offset loss for tracking.\n\n Args:\n input_height: An integer scalar tensor representing input image height.\n input_width: An integer scalar tensor representing input image width.\n prediction_dict: The dictionary returned from the predict() method.\n\n Returns:\n A dictionary with track/temporal_offset losses.\n \"\"\"\n gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)\n gt_offsets_list = self.groundtruth_lists(\n fields.BoxListFields.temporal_offsets)\n gt_match_list = self.groundtruth_lists(\n fields.BoxListFields.track_match_flags)\n gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)\n num_boxes = tf.cast(\n get_num_instances_from_weights(gt_weights_list), tf.float32)\n\n offset_predictions = prediction_dict[TEMPORAL_OFFSET]\n num_predictions = float(len(offset_predictions))\n\n assigner = self._target_assigner_dict[TEMPORALOFFSET_TASK]\n (batch_indices, batch_offset_targets,\n batch_weights) = assigner.assign_temporal_offset_targets(\n height=input_height,\n width=input_width,\n gt_boxes_list=gt_boxes_list,\n gt_offsets_list=gt_offsets_list,\n gt_match_list=gt_match_list,\n gt_weights_list=gt_weights_list)\n batch_weights = tf.expand_dims(batch_weights, -1)\n\n offset_loss_fn = self._temporal_offset_params.localization_loss\n loss_dict = {}\n offset_loss = 0\n for offset_pred in offset_predictions:\n offset_pred = cn_assigner.get_batch_predictions_from_indices(\n offset_pred, batch_indices)\n offset_loss += offset_loss_fn(offset_pred[:, None],\n batch_offset_targets[:, None],\n weights=batch_weights)\n offset_loss = tf.reduce_sum(offset_loss) / (num_predictions * num_boxes)\n loss_dict[TEMPORAL_OFFSET] = offset_loss\n return loss_dict\n\n def _should_clip_keypoints(self):\n \"\"\"Returns a boolean indicating whether keypoint clipping should occur.\n\n If there is only one keypoint task, clipping is controlled by the field\n `clip_out_of_frame_keypoints`. If there are multiple keypoint tasks,\n clipping logic is defined based on unanimous agreement of keypoint\n parameters. If there is any ambiguity, clip_out_of_frame_keypoints is set\n to False (default).\n \"\"\"\n kp_params_iterator = iter(self._kp_params_dict.values())\n if len(self._kp_params_dict) == 1:\n kp_params = next(kp_params_iterator)\n return kp_params.clip_out_of_frame_keypoints\n\n # Multi-task setting.\n kp_params = next(kp_params_iterator)\n should_clip = kp_params.clip_out_of_frame_keypoints\n for kp_params in kp_params_iterator:\n if kp_params.clip_out_of_frame_keypoints != should_clip:\n return False\n return should_clip\n\n def _rescore_instances(self, classes, scores, keypoint_scores):\n \"\"\"Rescores instances based on detection and keypoint scores.\n\n Args:\n classes: A [batch, max_detections] int32 tensor with detection classes.\n scores: A [batch, max_detections] float32 tensor with detection scores.\n keypoint_scores: A [batch, max_detections, total_num_keypoints] float32\n tensor with keypoint scores.\n\n Returns:\n A [batch, max_detections] float32 tensor with possibly altered detection\n scores.\n \"\"\"\n batch, max_detections, total_num_keypoints = (\n shape_utils.combined_static_and_dynamic_shape(keypoint_scores))\n classes_tiled = tf.tile(classes[:, :, tf.newaxis],\n multiples=[1, 1, total_num_keypoints])\n # TODO(yuhuic): Investigate whether this function will create subgraphs in\n # tflite that will cause the model to run slower at inference.\n for kp_params in self._kp_params_dict.values():\n if not kp_params.rescore_instances:\n continue\n class_id = kp_params.class_id\n keypoint_indices = kp_params.keypoint_indices\n kpt_mask = tf.reduce_sum(\n tf.one_hot(keypoint_indices, depth=total_num_keypoints), axis=0)\n kpt_mask_tiled = tf.tile(kpt_mask[tf.newaxis, tf.newaxis, :],\n multiples=[batch, max_detections, 1])\n class_and_keypoint_mask = tf.math.logical_and(\n classes_tiled == class_id,\n kpt_mask_tiled == 1.0)\n class_and_keypoint_mask_float = tf.cast(class_and_keypoint_mask,\n dtype=tf.float32)\n visible_keypoints = tf.math.greater(\n keypoint_scores, kp_params.rescoring_threshold)\n keypoint_scores = tf.where(\n visible_keypoints, keypoint_scores, tf.zeros_like(keypoint_scores))\n num_visible_keypoints = tf.reduce_sum(\n class_and_keypoint_mask_float *\n tf.cast(visible_keypoints, tf.float32), axis=-1)\n num_visible_keypoints = tf.math.maximum(num_visible_keypoints, 1.0)\n scores_for_class = (1./num_visible_keypoints) * (\n tf.reduce_sum(class_and_keypoint_mask_float *\n scores[:, :, tf.newaxis] *\n keypoint_scores, axis=-1))\n scores = tf.where(classes == class_id,\n scores_for_class,\n scores)\n return scores\n\n def preprocess(self, inputs):\n outputs = shape_utils.resize_images_and_return_shapes(\n inputs, self._image_resizer_fn)\n resized_inputs, true_image_shapes = outputs\n\n return (self._feature_extractor.preprocess(resized_inputs),\n true_image_shapes)\n\n def predict(self, preprocessed_inputs, _):\n \"\"\"Predicts CenterNet prediction tensors given an input batch.\n\n Feature extractors are free to produce predictions from multiple feature\n maps and therefore we return a dictionary mapping strings to lists.\n E.g. the hourglass backbone produces two feature maps.\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] float32 tensor\n representing a batch of images.\n\n Returns:\n prediction_dict: a dictionary holding predicted tensors with\n 'preprocessed_inputs' - The input image after being resized and\n preprocessed by the feature extractor.\n 'object_center' - A list of size num_feature_outputs containing\n float tensors of size [batch_size, output_height, output_width,\n num_classes] representing the predicted object center heatmap logits.\n 'box/scale' - [optional] A list of size num_feature_outputs holding\n float tensors of size [batch_size, output_height, output_width, 2]\n representing the predicted box height and width at each output\n location. This field exists only when object detection task is\n specified.\n 'box/offset' - [optional] A list of size num_feature_outputs holding\n float tensors of size [batch_size, output_height, output_width, 2]\n representing the predicted y and x offsets at each output location.\n '$TASK_NAME/keypoint_heatmap' - [optional] A list of size\n num_feature_outputs holding float tensors of size [batch_size,\n output_height, output_width, num_keypoints] representing the predicted\n keypoint heatmap logits.\n '$TASK_NAME/keypoint_offset' - [optional] A list of size\n num_feature_outputs holding float tensors of size [batch_size,\n output_height, output_width, 2] representing the predicted keypoint\n offsets at each output location.\n '$TASK_NAME/keypoint_regression' - [optional] A list of size\n num_feature_outputs holding float tensors of size [batch_size,\n output_height, output_width, 2 * num_keypoints] representing the\n predicted keypoint regression at each output location.\n 'segmentation/heatmap' - [optional] A list of size num_feature_outputs\n holding float tensors of size [batch_size, output_height,\n output_width, num_classes] representing the mask logits.\n 'densepose/heatmap' - [optional] A list of size num_feature_outputs\n holding float tensors of size [batch_size, output_height,\n output_width, num_parts] representing the mask logits for each part.\n 'densepose/regression' - [optional] A list of size num_feature_outputs\n holding float tensors of size [batch_size, output_height,\n output_width, 2 * num_parts] representing the DensePose surface\n coordinate predictions.\n Note the $TASK_NAME is provided by the KeypointEstimation namedtuple\n used to differentiate between different keypoint tasks.\n \"\"\"\n features_list = self._feature_extractor(preprocessed_inputs)\n\n predictions = {}\n for head_name, heads in self._prediction_head_dict.items():\n predictions[head_name] = [\n head(feature) for (feature, head) in zip(features_list, heads)\n ]\n predictions['preprocessed_inputs'] = preprocessed_inputs\n\n self._batched_prediction_tensor_names = predictions.keys()\n return predictions\n\n def loss(\n self, prediction_dict, true_image_shapes, scope=None,\n maximum_normalized_coordinate=1.1):\n \"\"\"Computes scalar loss tensors with respect to provided groundtruth.\n\n This function implements the various CenterNet losses.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors returned by\n \"predict\" function.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is of\n the form [height, width, channels] indicating the shapes of true images\n in the resized images, as resized images can be padded with zeros.\n scope: Optional scope name.\n maximum_normalized_coordinate: Maximum coordinate value to be considered\n as normalized, default to 1.1. This is used to check bounds during\n converting normalized coordinates to absolute coordinates.\n\n Returns:\n A dictionary mapping the keys [\n 'Loss/object_center',\n 'Loss/box/scale', (optional)\n 'Loss/box/offset', (optional)\n 'Loss/$TASK_NAME/keypoint/heatmap', (optional)\n 'Loss/$TASK_NAME/keypoint/offset', (optional)\n 'Loss/$TASK_NAME/keypoint/regression', (optional)\n 'Loss/segmentation/heatmap', (optional)\n 'Loss/densepose/heatmap', (optional)\n 'Loss/densepose/regression', (optional)\n 'Loss/track/reid'] (optional)\n 'Loss/track/offset'] (optional)\n scalar tensors corresponding to the losses for different tasks. Note the\n $TASK_NAME is provided by the KeypointEstimation namedtuple used to\n differentiate between different keypoint tasks.\n \"\"\"\n\n _, input_height, input_width, _ = _get_shape(\n prediction_dict['preprocessed_inputs'], 4)\n\n output_height, output_width = (tf.maximum(input_height // self._stride, 1),\n tf.maximum(input_width // self._stride, 1))\n\n # TODO(vighneshb) Explore whether using floor here is safe.\n output_true_image_shapes = tf.ceil(\n tf.cast(true_image_shapes, tf.float32) / self._stride)\n valid_anchor_weights = get_valid_anchor_weights_in_flattened_image(\n output_true_image_shapes, output_height, output_width)\n valid_anchor_weights = tf.expand_dims(valid_anchor_weights, 2)\n\n object_center_loss = self._compute_object_center_loss(\n object_center_predictions=prediction_dict[OBJECT_CENTER],\n input_height=input_height,\n input_width=input_width,\n per_pixel_weights=valid_anchor_weights,\n maximum_normalized_coordinate=maximum_normalized_coordinate)\n losses = {\n OBJECT_CENTER:\n self._center_params.object_center_loss_weight * object_center_loss\n }\n if self._od_params is not None:\n od_losses = self._compute_object_detection_losses(\n input_height=input_height,\n input_width=input_width,\n prediction_dict=prediction_dict,\n per_pixel_weights=valid_anchor_weights)\n for key in od_losses:\n od_losses[key] = od_losses[key] * self._od_params.task_loss_weight\n losses.update(od_losses)\n\n if self._kp_params_dict is not None:\n for task_name, params in self._kp_params_dict.items():\n kp_losses = self._compute_keypoint_estimation_losses(\n task_name=task_name,\n input_height=input_height,\n input_width=input_width,\n prediction_dict=prediction_dict,\n per_pixel_weights=valid_anchor_weights)\n for key in kp_losses:\n kp_losses[key] = kp_losses[key] * params.task_loss_weight\n losses.update(kp_losses)\n\n if self._mask_params is not None:\n seg_losses = self._compute_segmentation_losses(\n prediction_dict=prediction_dict,\n per_pixel_weights=valid_anchor_weights)\n for key in seg_losses:\n seg_losses[key] = seg_losses[key] * self._mask_params.task_loss_weight\n losses.update(seg_losses)\n\n if self._densepose_params is not None:\n densepose_losses = self._compute_densepose_losses(\n input_height=input_height,\n input_width=input_width,\n prediction_dict=prediction_dict)\n for key in densepose_losses:\n densepose_losses[key] = (\n densepose_losses[key] * self._densepose_params.task_loss_weight)\n losses.update(densepose_losses)\n\n if self._track_params is not None:\n track_losses = self._compute_track_losses(\n input_height=input_height,\n input_width=input_width,\n prediction_dict=prediction_dict)\n for key in track_losses:\n track_losses[key] = (\n track_losses[key] * self._track_params.task_loss_weight)\n losses.update(track_losses)\n\n if self._temporal_offset_params is not None:\n offset_losses = self._compute_temporal_offset_loss(\n input_height=input_height,\n input_width=input_width,\n prediction_dict=prediction_dict)\n for key in offset_losses:\n offset_losses[key] = (\n offset_losses[key] * self._temporal_offset_params.task_loss_weight)\n losses.update(offset_losses)\n\n # Prepend the LOSS_KEY_PREFIX to the keys in the dictionary such that the\n # losses will be grouped together in Tensorboard.\n return dict([('%s/%s' % (LOSS_KEY_PREFIX, key), val)\n for key, val in losses.items()])\n\n def postprocess(self, prediction_dict, true_image_shapes, **params):\n \"\"\"Produces boxes given a prediction dict returned by predict().\n\n Although predict returns a list of tensors, only the last tensor in\n each list is used for making box predictions.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors from \"predict\"\n function.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is of\n the form [height, width, channels] indicating the shapes of true images\n in the resized images, as resized images can be padded with zeros.\n **params: Currently ignored.\n\n Returns:\n detections: a dictionary containing the following fields\n detection_boxes - A tensor of shape [batch, max_detections, 4]\n holding the predicted boxes.\n detection_boxes_strided: A tensor of shape [batch_size, num_detections,\n 4] holding the predicted boxes in absolute coordinates of the\n feature extractor's final layer output.\n detection_scores: A tensor of shape [batch, max_detections] holding\n the predicted score for each box.\n detection_multiclass_scores: A tensor of shape [batch, max_detection,\n num_classes] holding multiclass score for each box.\n detection_classes: An integer tensor of shape [batch, max_detections]\n containing the detected class for each box.\n num_detections: An integer tensor of shape [batch] containing the\n number of detected boxes for each sample in the batch.\n detection_keypoints: (Optional) A float tensor of shape [batch,\n max_detections, num_keypoints, 2] with normalized keypoints. Any\n invalid keypoints have their coordinates and scores set to 0.0.\n detection_keypoint_scores: (Optional) A float tensor of shape [batch,\n max_detection, num_keypoints] with scores for each keypoint.\n detection_masks: (Optional) A uint8 tensor of shape [batch,\n max_detections, mask_height, mask_width] with masks for each\n detection. Background is specified with 0, and foreground is specified\n with positive integers (1 for standard instance segmentation mask, and\n 1-indexed parts for DensePose task).\n detection_surface_coords: (Optional) A float32 tensor of shape [batch,\n max_detection, mask_height, mask_width, 2] with DensePose surface\n coordinates, in (v, u) format.\n detection_embeddings: (Optional) A float tensor of shape [batch,\n max_detections, reid_embed_size] containing object embeddings.\n \"\"\"\n object_center_prob = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1])\n\n if true_image_shapes is None:\n # If true_image_shapes is not provided, we assume the whole image is valid\n # and infer the true_image_shapes from the object_center_prob shape.\n batch_size, strided_height, strided_width, _ = _get_shape(\n object_center_prob, 4)\n true_image_shapes = tf.stack(\n [strided_height * self._stride, strided_width * self._stride,\n tf.constant(len(self._feature_extractor._channel_means))]) # pylint: disable=protected-access\n true_image_shapes = tf.stack([true_image_shapes] * batch_size, axis=0)\n else:\n # Mask object centers by true_image_shape. [batch, h, w, 1]\n object_center_mask = mask_from_true_image_shape(\n _get_shape(object_center_prob, 4), true_image_shapes)\n object_center_prob *= object_center_mask\n\n # Get x, y and channel indices corresponding to the top indices in the class\n # center predictions.\n detection_scores, y_indices, x_indices, channel_indices = (\n top_k_feature_map_locations(\n object_center_prob,\n max_pool_kernel_size=self._center_params.peak_max_pool_kernel_size,\n k=self._center_params.max_box_predictions))\n multiclass_scores = tf.gather_nd(\n object_center_prob, tf.stack([y_indices, x_indices], -1), batch_dims=1)\n num_detections = tf.reduce_sum(\n tf.cast(detection_scores > 0, tf.int32), axis=1)\n postprocess_dict = {\n fields.DetectionResultFields.detection_scores: detection_scores,\n fields.DetectionResultFields.detection_multiclass_scores:\n multiclass_scores,\n fields.DetectionResultFields.detection_classes: channel_indices,\n fields.DetectionResultFields.num_detections: num_detections,\n }\n\n boxes_strided = None\n if self._od_params:\n boxes_strided = (\n prediction_tensors_to_boxes(y_indices, x_indices,\n prediction_dict[BOX_SCALE][-1],\n prediction_dict[BOX_OFFSET][-1]))\n\n boxes = convert_strided_predictions_to_normalized_boxes(\n boxes_strided, self._stride, true_image_shapes)\n\n postprocess_dict.update({\n fields.DetectionResultFields.detection_boxes: boxes,\n 'detection_boxes_strided': boxes_strided\n })\n\n if self._kp_params_dict:\n # If the model is trained to predict only one class of object and its\n # keypoint, we fall back to a simpler postprocessing function which uses\n # the ops that are supported by tf.lite on GPU.\n clip_keypoints = self._should_clip_keypoints()\n if len(self._kp_params_dict) == 1 and self._num_classes == 1:\n task_name, kp_params = next(iter(self._kp_params_dict.items()))\n keypoint_depths = None\n if kp_params.argmax_postprocessing:\n keypoints, keypoint_scores = (\n prediction_to_keypoints_argmax(\n prediction_dict,\n object_y_indices=y_indices,\n object_x_indices=x_indices,\n boxes=boxes_strided,\n task_name=task_name,\n kp_params=kp_params))\n else:\n (keypoints, keypoint_scores,\n keypoint_depths) = self._postprocess_keypoints_single_class(\n prediction_dict, channel_indices, y_indices, x_indices,\n boxes_strided, num_detections)\n keypoints, keypoint_scores = (\n convert_strided_predictions_to_normalized_keypoints(\n keypoints, keypoint_scores, self._stride, true_image_shapes,\n clip_out_of_frame_keypoints=clip_keypoints))\n if keypoint_depths is not None:\n postprocess_dict.update({\n fields.DetectionResultFields.detection_keypoint_depths:\n keypoint_depths\n })\n else:\n # Multi-class keypoint estimation task does not support depth\n # estimation.\n assert all([\n not kp_dict.predict_depth\n for kp_dict in self._kp_params_dict.values()\n ])\n keypoints, keypoint_scores = self._postprocess_keypoints_multi_class(\n prediction_dict, channel_indices, y_indices, x_indices,\n boxes_strided, num_detections)\n keypoints, keypoint_scores = (\n convert_strided_predictions_to_normalized_keypoints(\n keypoints, keypoint_scores, self._stride, true_image_shapes,\n clip_out_of_frame_keypoints=clip_keypoints))\n\n # Update instance scores based on keypoints.\n scores = self._rescore_instances(\n channel_indices, detection_scores, keypoint_scores)\n postprocess_dict.update({\n fields.DetectionResultFields.detection_scores: scores,\n fields.DetectionResultFields.detection_keypoints: keypoints,\n fields.DetectionResultFields.detection_keypoint_scores:\n keypoint_scores\n })\n if self._od_params is None:\n # Still output the box prediction by enclosing the keypoints for\n # evaluation purpose.\n boxes = keypoint_ops.keypoints_to_enclosing_bounding_boxes(\n keypoints, keypoints_axis=2)\n postprocess_dict.update({\n fields.DetectionResultFields.detection_boxes: boxes,\n })\n\n if self._mask_params:\n masks = tf.nn.sigmoid(prediction_dict[SEGMENTATION_HEATMAP][-1])\n densepose_part_heatmap, densepose_surface_coords = None, None\n densepose_class_index = 0\n if self._densepose_params:\n densepose_part_heatmap = prediction_dict[DENSEPOSE_HEATMAP][-1]\n densepose_surface_coords = prediction_dict[DENSEPOSE_REGRESSION][-1]\n densepose_class_index = self._densepose_params.class_id\n instance_masks, surface_coords = (\n convert_strided_predictions_to_instance_masks(\n boxes, channel_indices, masks, true_image_shapes,\n densepose_part_heatmap, densepose_surface_coords,\n stride=self._stride, mask_height=self._mask_params.mask_height,\n mask_width=self._mask_params.mask_width,\n score_threshold=self._mask_params.score_threshold,\n densepose_class_index=densepose_class_index))\n postprocess_dict[\n fields.DetectionResultFields.detection_masks] = instance_masks\n if self._densepose_params:\n postprocess_dict[\n fields.DetectionResultFields.detection_surface_coords] = (\n surface_coords)\n\n if self._track_params:\n embeddings = self._postprocess_embeddings(prediction_dict,\n y_indices, x_indices)\n postprocess_dict.update({\n fields.DetectionResultFields.detection_embeddings: embeddings\n })\n\n if self._temporal_offset_params:\n offsets = prediction_tensors_to_temporal_offsets(\n y_indices, x_indices,\n prediction_dict[TEMPORAL_OFFSET][-1])\n postprocess_dict[fields.DetectionResultFields.detection_offsets] = offsets\n\n if self._non_max_suppression_fn:\n boxes = tf.expand_dims(\n postprocess_dict.pop(fields.DetectionResultFields.detection_boxes),\n axis=-2)\n multiclass_scores = postprocess_dict[\n fields.DetectionResultFields.detection_multiclass_scores]\n num_valid_boxes = postprocess_dict.pop(\n fields.DetectionResultFields.num_detections)\n # Remove scores and classes as NMS will compute these form multiclass\n # scores.\n postprocess_dict.pop(fields.DetectionResultFields.detection_scores)\n postprocess_dict.pop(fields.DetectionResultFields.detection_classes)\n (nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields,\n num_detections) = self._non_max_suppression_fn(\n boxes,\n multiclass_scores,\n additional_fields=postprocess_dict,\n num_valid_boxes=num_valid_boxes)\n postprocess_dict = nmsed_additional_fields\n postprocess_dict[\n fields.DetectionResultFields.detection_boxes] = nmsed_boxes\n postprocess_dict[\n fields.DetectionResultFields.detection_scores] = nmsed_scores\n postprocess_dict[\n fields.DetectionResultFields.detection_classes] = nmsed_classes\n postprocess_dict[\n fields.DetectionResultFields.num_detections] = num_detections\n postprocess_dict.update(nmsed_additional_fields)\n return postprocess_dict\n\n def postprocess_single_instance_keypoints(\n self,\n prediction_dict,\n true_image_shapes):\n \"\"\"Postprocess for predicting single instance keypoints.\n\n This postprocess function is a special case of predicting the keypoint of\n a single instance in the image (original CenterNet postprocess supports\n multi-instance prediction). Due to the simplification assumption, this\n postprocessing function achieves much faster inference time.\n Here is a short list of the modifications made in this function:\n\n 1) Assume the model predicts only single class keypoint.\n 2) Assume there is only one instance in the image. If multiple instances\n appear in the image, the model tends to predict the one that is closer\n to the image center (the other ones are considered as background and\n are rejected by the model).\n 3) Avoid using top_k ops in the postprocessing logics since it is slower\n than using argmax.\n 4) The predictions other than the keypoints are ignored, e.g. boxes.\n 5) The input batch size is assumed to be 1.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors from \"predict\"\n function.\n true_image_shapes: int32 tensor of shape [batch, 3] where each row is of\n the form [height, width, channels] indicating the shapes of true images\n in the resized images, as resized images can be padded with zeros.\n\n Returns:\n detections: a dictionary containing the following fields\n detection_keypoints: A float tensor of shape\n [1, 1, num_keypoints, 2] with normalized keypoints. Any invalid\n keypoints have their coordinates and scores set to 0.0.\n detection_keypoint_scores: A float tensor of shape\n [1, 1, num_keypoints] with scores for each keypoint.\n \"\"\"\n # The number of keypoint task is expected to be 1.\n assert len(self._kp_params_dict) == 1\n task_name, kp_params = next(iter(self._kp_params_dict.items()))\n keypoint_heatmap = tf.nn.sigmoid(prediction_dict[get_keypoint_name(\n task_name, KEYPOINT_HEATMAP)][-1])\n keypoint_offset = prediction_dict[get_keypoint_name(task_name,\n KEYPOINT_OFFSET)][-1]\n keypoint_regression = prediction_dict[get_keypoint_name(\n task_name, KEYPOINT_REGRESSION)][-1]\n object_heatmap = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1])\n\n keypoint_depths = None\n if kp_params.predict_depth:\n keypoint_depths = prediction_dict[get_keypoint_name(\n task_name, KEYPOINT_DEPTH)][-1]\n keypoints, keypoint_scores, keypoint_depths = (\n prediction_to_single_instance_keypoints(\n object_heatmap=object_heatmap,\n keypoint_heatmap=keypoint_heatmap,\n keypoint_offset=keypoint_offset,\n keypoint_regression=keypoint_regression,\n kp_params=kp_params,\n keypoint_depths=keypoint_depths))\n\n keypoints, keypoint_scores = (\n convert_strided_predictions_to_normalized_keypoints(\n keypoints,\n keypoint_scores,\n self._stride,\n true_image_shapes,\n clip_out_of_frame_keypoints=False))\n postprocess_dict = {\n fields.DetectionResultFields.detection_keypoints: keypoints,\n fields.DetectionResultFields.detection_keypoint_scores: keypoint_scores\n }\n\n if kp_params.predict_depth:\n postprocess_dict.update({\n fields.DetectionResultFields.detection_keypoint_depths:\n keypoint_depths\n })\n return postprocess_dict\n\n def _postprocess_embeddings(self, prediction_dict, y_indices, x_indices):\n \"\"\"Performs postprocessing on embedding predictions.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors, returned from the\n predict() method. This dictionary should contain embedding prediction\n feature maps for tracking task.\n y_indices: A [batch_size, max_detections] int tensor with y indices for\n all object centers.\n x_indices: A [batch_size, max_detections] int tensor with x indices for\n all object centers.\n\n Returns:\n embeddings: A [batch_size, max_detection, reid_embed_size] float32\n tensor with L2 normalized embeddings extracted from detection box\n centers.\n \"\"\"\n embedding_predictions = prediction_dict[TRACK_REID][-1]\n embeddings = predicted_embeddings_at_object_centers(\n embedding_predictions, y_indices, x_indices)\n embeddings, _ = tf.linalg.normalize(embeddings, axis=-1)\n\n return embeddings\n\n def _scatter_keypoints_to_batch(self, num_ind, kpt_coords_for_example,\n kpt_scores_for_example,\n instance_inds_for_example, max_detections,\n total_num_keypoints):\n \"\"\"Helper function to convert scattered keypoints into batch.\"\"\"\n def left_fn(kpt_coords_for_example, kpt_scores_for_example,\n instance_inds_for_example):\n # Scatter into tensor where instances align with original detection\n # instances. New shape of keypoint coordinates and scores are\n # [1, max_detections, num_total_keypoints, 2] and\n # [1, max_detections, num_total_keypoints], respectively.\n return _pad_to_full_instance_dim(\n kpt_coords_for_example, kpt_scores_for_example,\n instance_inds_for_example,\n self._center_params.max_box_predictions)\n\n def right_fn():\n kpt_coords_for_example_all_det = tf.zeros(\n [1, max_detections, total_num_keypoints, 2], dtype=tf.float32)\n kpt_scores_for_example_all_det = tf.zeros(\n [1, max_detections, total_num_keypoints], dtype=tf.float32)\n return (kpt_coords_for_example_all_det,\n kpt_scores_for_example_all_det)\n\n left_fn = functools.partial(left_fn, kpt_coords_for_example,\n kpt_scores_for_example,\n instance_inds_for_example)\n\n # Use dimension values instead of tf.size for tf.lite compatibility.\n return tf.cond(num_ind[0] > 0, left_fn, right_fn)\n\n def _postprocess_keypoints_multi_class(self, prediction_dict, classes,\n y_indices, x_indices, boxes,\n num_detections):\n \"\"\"Performs postprocessing on keypoint predictions.\n\n This is the most general keypoint postprocessing function which supports\n multiple keypoint tasks (e.g. human and dog keypoints) and multiple object\n detection classes. Note that it is the most expensive postprocessing logics\n and is currently not tf.lite/tf.js compatible. See\n _postprocess_keypoints_single_class if you plan to export the model in more\n portable format.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors, returned from the\n predict() method. This dictionary should contain keypoint prediction\n feature maps for each keypoint task.\n classes: A [batch_size, max_detections] int tensor with class indices for\n all detected objects.\n y_indices: A [batch_size, max_detections] int tensor with y indices for\n all object centers.\n x_indices: A [batch_size, max_detections] int tensor with x indices for\n all object centers.\n boxes: A [batch_size, max_detections, 4] float32 tensor with bounding\n boxes in (un-normalized) output space.\n num_detections: A [batch_size] int tensor with the number of valid\n detections for each image.\n\n Returns:\n A tuple of\n keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32\n tensor with keypoints in the output (strided) coordinate frame.\n keypoint_scores: a [batch_size, max_detections, num_total_keypoints]\n float32 tensor with keypoint scores.\n \"\"\"\n total_num_keypoints = sum(len(kp_dict.keypoint_indices) for kp_dict\n in self._kp_params_dict.values())\n batch_size, max_detections = _get_shape(classes, 2)\n kpt_coords_for_example_list = []\n kpt_scores_for_example_list = []\n for ex_ind in range(batch_size):\n # The tensors that host the keypoint coordinates and scores for all\n # instances and all keypoints. They will be updated by scatter_nd_add for\n # each keypoint tasks.\n kpt_coords_for_example_all_det = tf.zeros(\n [max_detections, total_num_keypoints, 2])\n kpt_scores_for_example_all_det = tf.zeros(\n [max_detections, total_num_keypoints])\n for task_name, kp_params in self._kp_params_dict.items():\n keypoint_heatmap = prediction_dict[\n get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1]\n keypoint_offsets = prediction_dict[\n get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1]\n keypoint_regression = prediction_dict[\n get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1]\n instance_inds = self._get_instance_indices(\n classes, num_detections, ex_ind, kp_params.class_id)\n\n # Gather the feature map locations corresponding to the object class.\n y_indices_for_kpt_class = tf.gather(y_indices, instance_inds, axis=1)\n x_indices_for_kpt_class = tf.gather(x_indices, instance_inds, axis=1)\n if boxes is None:\n boxes_for_kpt_class = None\n else:\n boxes_for_kpt_class = tf.gather(boxes, instance_inds, axis=1)\n\n # Postprocess keypoints and scores for class and single image. Shapes\n # are [1, num_instances_i, num_keypoints_i, 2] and\n # [1, num_instances_i, num_keypoints_i], respectively. Note that\n # num_instances_i and num_keypoints_i refers to the number of\n # instances and keypoints for class i, respectively.\n (kpt_coords_for_class, kpt_scores_for_class, _) = (\n self._postprocess_keypoints_for_class_and_image(\n keypoint_heatmap,\n keypoint_offsets,\n keypoint_regression,\n classes,\n y_indices_for_kpt_class,\n x_indices_for_kpt_class,\n boxes_for_kpt_class,\n ex_ind,\n kp_params,\n ))\n\n # Prepare the indices for scatter_nd. The resulting combined_inds has\n # the shape of [num_instances_i * num_keypoints_i, 2], where the first\n # column corresponds to the instance IDs and the second column\n # corresponds to the keypoint IDs.\n kpt_inds = tf.constant(kp_params.keypoint_indices, dtype=tf.int32)\n kpt_inds = tf.expand_dims(kpt_inds, axis=0)\n instance_inds_expand = tf.expand_dims(instance_inds, axis=-1)\n kpt_inds_expand = kpt_inds * tf.ones_like(instance_inds_expand)\n instance_inds_expand = instance_inds_expand * tf.ones_like(kpt_inds)\n combined_inds = tf.stack(\n [instance_inds_expand, kpt_inds_expand], axis=2)\n combined_inds = tf.reshape(combined_inds, [-1, 2])\n\n # Reshape the keypoint coordinates/scores to [num_instances_i *\n # num_keypoints_i, 2]/[num_instances_i * num_keypoints_i] to be used\n # by scatter_nd_add.\n kpt_coords_for_class = tf.reshape(kpt_coords_for_class, [-1, 2])\n kpt_scores_for_class = tf.reshape(kpt_scores_for_class, [-1])\n kpt_coords_for_example_all_det = tf.tensor_scatter_nd_add(\n kpt_coords_for_example_all_det,\n combined_inds, kpt_coords_for_class)\n kpt_scores_for_example_all_det = tf.tensor_scatter_nd_add(\n kpt_scores_for_example_all_det,\n combined_inds, kpt_scores_for_class)\n\n kpt_coords_for_example_list.append(\n tf.expand_dims(kpt_coords_for_example_all_det, axis=0))\n kpt_scores_for_example_list.append(\n tf.expand_dims(kpt_scores_for_example_all_det, axis=0))\n\n # Concatenate all keypoints and scores from all examples in the batch.\n # Shapes are [batch_size, max_detections, num_total_keypoints, 2] and\n # [batch_size, max_detections, num_total_keypoints], respectively.\n keypoints = tf.concat(kpt_coords_for_example_list, axis=0)\n keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0)\n\n return keypoints, keypoint_scores\n\n def _postprocess_keypoints_single_class(self, prediction_dict, classes,\n y_indices, x_indices, boxes,\n num_detections):\n \"\"\"Performs postprocessing on keypoint predictions (single class only).\n\n This function handles the special case of keypoint task that the model\n predicts only one class of the bounding box/keypoint (e.g. person). By the\n assumption, the function uses only tf.lite supported ops and should run\n faster.\n\n Args:\n prediction_dict: a dictionary holding predicted tensors, returned from the\n predict() method. This dictionary should contain keypoint prediction\n feature maps for each keypoint task.\n classes: A [batch_size, max_detections] int tensor with class indices for\n all detected objects.\n y_indices: A [batch_size, max_detections] int tensor with y indices for\n all object centers.\n x_indices: A [batch_size, max_detections] int tensor with x indices for\n all object centers.\n boxes: A [batch_size, max_detections, 4] float32 tensor with bounding\n boxes in (un-normalized) output space.\n num_detections: A [batch_size] int tensor with the number of valid\n detections for each image.\n\n Returns:\n A tuple of\n keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32\n tensor with keypoints in the output (strided) coordinate frame.\n keypoint_scores: a [batch_size, max_detections, num_total_keypoints]\n float32 tensor with keypoint scores.\n \"\"\"\n # This function only works when there is only one keypoint task and the\n # number of classes equal to one. For more general use cases, please use\n # _postprocess_keypoints instead.\n assert len(self._kp_params_dict) == 1 and self._num_classes == 1\n task_name, kp_params = next(iter(self._kp_params_dict.items()))\n keypoint_heatmap = prediction_dict[\n get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1]\n keypoint_offsets = prediction_dict[\n get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1]\n keypoint_regression = prediction_dict[\n get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1]\n keypoint_depth_predictions = None\n if kp_params.predict_depth:\n keypoint_depth_predictions = prediction_dict[get_keypoint_name(\n task_name, KEYPOINT_DEPTH)][-1]\n\n batch_size, _ = _get_shape(classes, 2)\n kpt_coords_for_example_list = []\n kpt_scores_for_example_list = []\n kpt_depths_for_example_list = []\n for ex_ind in range(batch_size):\n # Postprocess keypoints and scores for class and single image. Shapes\n # are [1, max_detections, num_keypoints, 2] and\n # [1, max_detections, num_keypoints], respectively.\n (kpt_coords_for_class, kpt_scores_for_class, kpt_depths_for_class) = (\n self._postprocess_keypoints_for_class_and_image(\n keypoint_heatmap,\n keypoint_offsets,\n keypoint_regression,\n classes,\n y_indices,\n x_indices,\n boxes,\n ex_ind,\n kp_params,\n keypoint_depth_predictions=keypoint_depth_predictions))\n\n kpt_coords_for_example_list.append(kpt_coords_for_class)\n kpt_scores_for_example_list.append(kpt_scores_for_class)\n kpt_depths_for_example_list.append(kpt_depths_for_class)\n\n # Concatenate all keypoints and scores from all examples in the batch.\n # Shapes are [batch_size, max_detections, num_keypoints, 2] and\n # [batch_size, max_detections, num_keypoints], respectively.\n keypoints = tf.concat(kpt_coords_for_example_list, axis=0)\n keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0)\n\n keypoint_depths = None\n if kp_params.predict_depth:\n keypoint_depths = tf.concat(kpt_depths_for_example_list, axis=0)\n\n return keypoints, keypoint_scores, keypoint_depths\n\n def _get_instance_indices(self, classes, num_detections, batch_index,\n class_id):\n \"\"\"Gets the instance indices that match the target class ID.\n\n Args:\n classes: A [batch_size, max_detections] int tensor with class indices for\n all detected objects.\n num_detections: A [batch_size] int tensor with the number of valid\n detections for each image.\n batch_index: An integer specifying the index for an example in the batch.\n class_id: Class id\n\n Returns:\n instance_inds: A [num_instances] int32 tensor where each element indicates\n the instance location within the `classes` tensor. This is useful to\n associate the refined keypoints with the original detections (i.e.\n boxes)\n \"\"\"\n classes = classes[batch_index:batch_index+1, ...]\n _, max_detections = shape_utils.combined_static_and_dynamic_shape(\n classes)\n # Get the detection indices corresponding to the target class.\n # Call tf.math.equal with matched tensor shape to make it tf.lite\n # compatible.\n valid_detections_with_kpt_class = tf.math.logical_and(\n tf.range(max_detections) < num_detections[batch_index],\n tf.math.equal(classes[0], tf.fill(classes[0].shape, class_id)))\n instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0]\n # Cast the indices tensor to int32 for tf.lite compatibility.\n return tf.cast(instance_inds, tf.int32)\n\n def _postprocess_keypoints_for_class_and_image(\n self,\n keypoint_heatmap,\n keypoint_offsets,\n keypoint_regression,\n classes,\n y_indices,\n x_indices,\n boxes,\n batch_index,\n kp_params,\n keypoint_depth_predictions=None):\n \"\"\"Postprocess keypoints for a single image and class.\n\n Args:\n keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32\n tensor with keypoint heatmaps.\n keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with\n local offsets to keypoint centers.\n keypoint_regression: A [batch_size, height, width, 2 * num_keypoints]\n float32 tensor with regressed offsets to all keypoints.\n classes: A [batch_size, max_detections] int tensor with class indices for\n all detected objects.\n y_indices: A [batch_size, max_detections] int tensor with y indices for\n all object centers.\n x_indices: A [batch_size, max_detections] int tensor with x indices for\n all object centers.\n boxes: A [batch_size, max_detections, 4] float32 tensor with detected\n boxes in the output (strided) frame.\n batch_index: An integer specifying the index for an example in the batch.\n kp_params: A `KeypointEstimationParams` object with parameters for a\n single keypoint class.\n keypoint_depth_predictions: (optional) A [batch_size, height, width, 1]\n float32 tensor representing the keypoint depth prediction.\n\n Returns:\n A tuple of\n refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor\n with refined keypoints for a single class in a single image, expressed\n in the output (strided) coordinate frame. Note that `num_instances` is a\n dynamic dimension, and corresponds to the number of valid detections\n for the specific class.\n refined_scores: A [1, num_instances, num_keypoints] float32 tensor with\n keypoint scores.\n refined_depths: A [1, num_instances, num_keypoints] float32 tensor with\n keypoint depths. Return None if the input keypoint_depth_predictions is\n None.\n \"\"\"\n num_keypoints = len(kp_params.keypoint_indices)\n\n keypoint_heatmap = tf.nn.sigmoid(\n keypoint_heatmap[batch_index:batch_index+1, ...])\n keypoint_offsets = keypoint_offsets[batch_index:batch_index+1, ...]\n keypoint_regression = keypoint_regression[batch_index:batch_index+1, ...]\n keypoint_depths = None\n if keypoint_depth_predictions is not None:\n keypoint_depths = keypoint_depth_predictions[batch_index:batch_index + 1,\n ...]\n y_indices = y_indices[batch_index:batch_index+1, ...]\n x_indices = x_indices[batch_index:batch_index+1, ...]\n if boxes is None:\n boxes_slice = None\n else:\n boxes_slice = boxes[batch_index:batch_index+1, ...]\n\n # Gather the regressed keypoints. Final tensor has shape\n # [1, num_instances, num_keypoints, 2].\n regressed_keypoints_for_objects = regressed_keypoints_at_object_centers(\n keypoint_regression, y_indices, x_indices)\n regressed_keypoints_for_objects = tf.reshape(\n regressed_keypoints_for_objects, [1, -1, num_keypoints, 2])\n\n # Get the candidate keypoints and scores.\n # The shape of keypoint_candidates and keypoint_scores is:\n # [1, num_candidates_per_keypoint, num_keypoints, 2] and\n # [1, num_candidates_per_keypoint, num_keypoints], respectively.\n (keypoint_candidates, keypoint_scores, num_keypoint_candidates,\n keypoint_depth_candidates) = (\n prediction_tensors_to_keypoint_candidates(\n keypoint_heatmap,\n keypoint_offsets,\n keypoint_score_threshold=(\n kp_params.keypoint_candidate_score_threshold),\n max_pool_kernel_size=kp_params.peak_max_pool_kernel_size,\n max_candidates=kp_params.num_candidates_per_keypoint,\n keypoint_depths=keypoint_depths))\n\n kpts_std_dev_postprocess = [\n s * kp_params.std_dev_multiplier for s in kp_params.keypoint_std_dev\n ]\n # Get the refined keypoints and scores, of shape\n # [1, num_instances, num_keypoints, 2] and\n # [1, num_instances, num_keypoints], respectively.\n (refined_keypoints, refined_scores, refined_depths) = refine_keypoints(\n regressed_keypoints_for_objects,\n keypoint_candidates,\n keypoint_scores,\n num_keypoint_candidates,\n bboxes=boxes_slice,\n unmatched_keypoint_score=kp_params.unmatched_keypoint_score,\n box_scale=kp_params.box_scale,\n candidate_search_scale=kp_params.candidate_search_scale,\n candidate_ranking_mode=kp_params.candidate_ranking_mode,\n score_distance_offset=kp_params.score_distance_offset,\n keypoint_depth_candidates=keypoint_depth_candidates,\n keypoint_score_threshold=(kp_params.keypoint_candidate_score_threshold),\n score_distance_multiplier=kp_params.score_distance_multiplier,\n keypoint_std_dev=kpts_std_dev_postprocess)\n\n return refined_keypoints, refined_scores, refined_depths\n\n def regularization_losses(self):\n return []\n\n def restore_map(self,\n fine_tune_checkpoint_type='detection',\n load_all_detection_checkpoint_vars=False):\n raise RuntimeError('CenterNetMetaArch not supported under TF1.x.')\n\n def restore_from_objects(self, fine_tune_checkpoint_type='detection'):\n \"\"\"Returns a map of Trackable objects to load from a foreign checkpoint.\n\n Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module\n or Checkpoint). This enables the model to initialize based on weights from\n another task. For example, the feature extractor variables from a\n classification model can be used to bootstrap training of an object\n detector. When loading from an object detection model, the checkpoint model\n should have the same parameters as this detection model with exception of\n the num_classes parameter.\n\n Note that this function is intended to be used to restore Keras-based\n models when running Tensorflow 2, whereas restore_map (not implemented\n in CenterNet) is intended to be used to restore Slim-based models when\n running Tensorflow 1.x.\n\n TODO(jonathanhuang): Make this function consistent with other\n meta-architectures.\n\n Args:\n fine_tune_checkpoint_type: whether to restore from a full detection\n checkpoint (with compatible variable names) or to restore from a\n classification checkpoint for initialization prior to training.\n Valid values: `detection`, `classification`, `fine_tune`.\n Default 'detection'.\n 'detection': used when loading models pre-trained on other detection\n tasks. With this checkpoint type the weights of the feature extractor\n are expected under the attribute 'feature_extractor'.\n 'classification': used when loading models pre-trained on an image\n classification task. Note that only the encoder section of the network\n is loaded and not the upsampling layers. With this checkpoint type,\n the weights of only the encoder section are expected under the\n attribute 'feature_extractor'.\n 'fine_tune': used when loading the entire CenterNet feature extractor\n pre-trained on other tasks. The checkpoints saved during CenterNet\n model training can be directly loaded using this type. With this\n checkpoint type, the weights of the feature extractor are expected\n under the attribute 'model._feature_extractor'.\n For more details, see the tensorflow section on Loading mechanics.\n https://www.tensorflow.org/guide/checkpoint#loading_mechanics\n\n Returns:\n A dict mapping keys to Trackable objects (tf.Module or Checkpoint).\n \"\"\"\n\n if fine_tune_checkpoint_type == 'detection':\n feature_extractor_model = tf.train.Checkpoint(\n _feature_extractor=self._feature_extractor)\n return {'model': feature_extractor_model}\n\n elif fine_tune_checkpoint_type == 'classification':\n return {\n 'feature_extractor':\n self._feature_extractor.classification_backbone\n }\n elif fine_tune_checkpoint_type == 'full':\n return {'model': self}\n elif fine_tune_checkpoint_type == 'fine_tune':\n raise ValueError(('\"fine_tune\" is no longer supported for CenterNet. '\n 'Please set fine_tune_checkpoint_type to \"detection\"'\n ' which has the same functionality. If you are using'\n ' the ExtremeNet checkpoint, download the new version'\n ' from the model zoo.'))\n\n else:\n raise ValueError('Unknown fine tune checkpoint type {}'.format(\n fine_tune_checkpoint_type))\n\n def updates(self):\n if tf_version.is_tf2():\n raise RuntimeError('This model is intended to be used with model_lib_v2 '\n 'which does not support updates()')\n else:\n update_ops = []\n slim_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n # Copy the slim ops to avoid modifying the collection\n if slim_update_ops:\n update_ops.extend(slim_update_ops)\n return update_ops\n"
] |
[
[
"tensorflow.compat.v1.logical_and",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.train.Checkpoint",
"tensorflow.compat.v1.math.top_k",
"tensorflow.compat.v1.math.logical_and",
"tensorflow.compat.v1.keras.Sequential",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.math.greater_equal",
"tensorflow.compat.v1.math.greater",
"tensorflow.compat.v1.keras.layers.Dense",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.math.argmax",
"tensorflow.compat.v1.linalg.normalize",
"tensorflow.compat.v1.to_int32",
"tensorflow.compat.v1.ones",
"tensorflow.compat.v1.nn.sigmoid",
"tensorflow.compat.v1.math.multiply",
"tensorflow.compat.v1.tensor_scatter_nd_add",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.multiply",
"tensorflow.compat.v1.keras.layers.ReLU",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.math.count_nonzero",
"tensorflow.compat.v1.one_hot",
"tensorflow.compat.v1.maximum",
"tensorflow.compat.v1.math.reduce_min",
"tensorflow.compat.v1.zeros_like",
"tensorflow.compat.v1.math.argmin",
"tensorflow.compat.v1.unstack",
"tensorflow.compat.v1.gather_nd",
"tensorflow.compat.v1.transpose",
"tensorflow.compat.v1.keras.layers.BatchNormalization",
"tensorflow.compat.v1.where",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.math.divide",
"tensorflow.compat.v1.exp",
"tensorflow.compat.v1.keras.layers.UpSampling2D",
"tensorflow.compat.v1.split",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.fill",
"tensorflow.compat.v1.tile",
"tensorflow.compat.v1.clip_by_value",
"tensorflow.compat.v1.minimum",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.nn.max_pool",
"tensorflow.compat.v1.keras.backend.flatten",
"tensorflow.compat.v1.math.reduce_max",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.cond",
"tensorflow.compat.v1.math.abs",
"tensorflow.compat.v1.reduce_max",
"tensorflow.compat.v1.stack",
"tensorflow.compat.v1.math.maximum",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.convert_to_tensor",
"tensorflow.compat.v1.scatter_nd",
"tensorflow.compat.v1.keras.initializers.constant",
"tensorflow.compat.v1.gather",
"tensorflow.compat.v1.range",
"tensorflow.compat.v2.image.crop_and_resize",
"tensorflow.compat.v1.squeeze",
"tensorflow.compat.v1.math.sqrt"
]
] |
kads29/Project
|
[
"63c116ef450b2d0e0d51931fff10e1dbc0489864"
] |
[
"master/hopper_training/src/monoped_state.py"
] |
[
"#!/usr/bin/env python\n\nimport rospy\nimport copy\nfrom gazebo_msgs.msg import ContactsState\nfrom sensor_msgs.msg import Imu\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Point, Quaternion, Vector3\nfrom sensor_msgs.msg import JointState\nimport tf\nimport numpy\nimport math\n\n\"\"\"\n wrenches:\n -\n force:\n x: -0.134995398774\n y: -0.252811705608\n z: -0.0861598399337\n torque:\n x: -0.00194729925705\n y: 0.028723398244\n z: -0.081229664152\n total_wrench:\n force:\n x: -0.134995398774\n y: -0.252811705608\n z: -0.0861598399337\n torque:\n x: -0.00194729925705\n y: 0.028723398244\n z: -0.081229664152\n contact_positions:\n -\n x: -0.0214808318267\n y: 0.00291348151391\n z: -0.000138379966267\n contact_normals:\n -\n x: 0.0\n y: 0.0\n z: 1.0\n depths: [0.000138379966266991]\n -\n info: \"Debug: i:(2/4) my geom:monoped::lowerleg::lowerleg_contactsensor_link_collision_1\\\n \\ other geom:ground_plane::link::collision time:50.405000000\\n\"\n collision1_name: \"monoped::lowerleg::lowerleg_contactsensor_link_collision_1\"\n collision2_name: \"ground_plane::link::collision\"\n\n\"\"\"\n\"\"\"\nstd_msgs/Header header\n uint32 seq\n time stamp\n string frame_id\ngazebo_msgs/ContactState[] states\n string info\n string collision1_name\n string collision2_name\n geometry_msgs/Wrench[] wrenches\n geometry_msgs/Vector3 force\n float64 x\n float64 y\n float64 z\n geometry_msgs/Vector3 torque\n float64 x\n float64 y\n float64 z\n geometry_msgs/Wrench total_wrench\n geometry_msgs/Vector3 force\n float64 x\n float64 y\n float64 z\n geometry_msgs/Vector3 torque\n float64 x\n float64 y\n float64 z\n geometry_msgs/Vector3[] contact_positions\n float64 x\n float64 y\n float64 z\n geometry_msgs/Vector3[] contact_normals\n float64 x\n float64 y\n float64 z\n float64[] depths\n\"\"\"\n\nclass MonopedState(object):\n\n def __init__(self, max_height, min_height, abs_max_roll, abs_max_pitch, list_of_observations, joint_limits, episode_done_criteria, joint_increment_value = 0.05, done_reward = -1000.0, alive_reward=10.0, desired_force=7.08, desired_yaw=0.0, weight_r1=1.0, weight_r2=1.0, weight_r3=1.0, weight_r4=1.0, weight_r5=1.0, discrete_division=10, maximum_base_linear_acceleration=3000.0, maximum_base_angular_velocity=20.0, maximum_joint_effort=10.0, jump_increment=0.7):\n rospy.logdebug(\"Starting MonopedState Class object...\")\n self.desired_world_point = Vector3(0.0, 0.0, 0.0)\n self._min_height = min_height\n self._max_height = max_height\n self._abs_max_roll = abs_max_roll\n self._abs_max_pitch = abs_max_pitch\n self._joint_increment_value = joint_increment_value\n self._done_reward = done_reward\n self._alive_reward = alive_reward\n self._desired_force = desired_force\n self._desired_yaw = desired_yaw\n\n self._weight_r1 = weight_r1\n self._weight_r2 = weight_r2\n self._weight_r3 = weight_r3\n self._weight_r4 = weight_r4\n self._weight_r5 = weight_r5\n\n self._list_of_observations = list_of_observations\n\n # Dictionary with the max and min of each of the joints\n self._joint_limits = joint_limits\n\n # Maximum base linear acceleration values\n self.maximum_base_linear_acceleration = maximum_base_linear_acceleration\n\n # Maximum Angular Velocity value\n # By maximum means the value that we consider relevant maximum, the sensor might pick up higher\n # But its an equilibrium between precission and number of divisions of the sensors data.\n self.maximum_base_angular_velocity = maximum_base_angular_velocity\n\n self.maximum_joint_effort = maximum_joint_effort\n\n # List of all the Done Episode Criteria\n self._episode_done_criteria = episode_done_criteria\n assert len(self._episode_done_criteria) != 0, \"Episode_done_criteria list is empty. Minimum one value\"\n\n self._discrete_division = discrete_division\n\n self.jump_increment = jump_increment\n # We init the observation ranges and We create the bins now for all the observations\n self.init_bins()\n\n self.base_position = Point()\n self.base_orientation = Quaternion()\n self.base_angular_velocity = Vector3()\n self.base_linear_acceleration = Vector3()\n self.contact_force = Vector3()\n self.joints_state = JointState()\n\n # Odom we only use it for the height detection and planar position ,\n # because in real robots this data is not trivial.\n rospy.Subscriber(\"/odom\", Odometry, self.odom_callback)\n # We use the IMU for orientation and linearacceleration detection\n rospy.Subscriber(\"/monoped/imu/data\", Imu, self.imu_callback)\n # We use it to get the contact force, to know if its in the air or stumping too hard.\n rospy.Subscriber(\"/lowerleg_contactsensor_state\", ContactsState, self.contact_callback)\n # We use it to get the joints positions and calculate the reward associated to it\n rospy.Subscriber(\"/monoped/joint_states\", JointState, self.joints_state_callback)\n\n def check_all_systems_ready(self):\n \"\"\"\n We check that all systems are ready\n :return:\n \"\"\"\n data_pose = None\n while data_pose is None and not rospy.is_shutdown():\n try:\n data_pose = rospy.wait_for_message(\"/odom\", Odometry, timeout=0.1)\n self.base_position = data_pose.pose.pose.position\n rospy.logdebug(\"Current odom READY\")\n except:\n rospy.logdebug(\"Current odom pose not ready yet, retrying for getting robot base_position\")\n\n imu_data = None\n while imu_data is None and not rospy.is_shutdown():\n try:\n imu_data = rospy.wait_for_message(\"/monoped/imu/data\", Imu, timeout=0.1)\n self.base_orientation = imu_data.orientation\n self.base_angular_velocity = imu_data.angular_velocity\n self.base_linear_acceleration = imu_data.linear_acceleration\n rospy.logdebug(\"Current imu_data READY\")\n except:\n rospy.logdebug(\"Current imu_data not ready yet, retrying for getting robot base_orientation, and base_linear_acceleration\")\n\n contacts_data = None\n while contacts_data is None and not rospy.is_shutdown():\n try:\n contacts_data = rospy.wait_for_message(\"/lowerleg_contactsensor_state\", ContactsState, timeout=0.1)\n for state in contacts_data.states:\n self.contact_force = state.total_wrench.force\n rospy.logdebug(\"Current contacts_data READY\")\n except:\n rospy.logdebug(\"Current contacts_data not ready yet, retrying\")\n\n joint_states_msg = None\n while joint_states_msg is None and not rospy.is_shutdown():\n try:\n joint_states_msg = rospy.wait_for_message(\"/monoped/joint_states\", JointState, timeout=0.1)\n self.joints_state = joint_states_msg\n rospy.logdebug(\"Current joint_states READY\")\n except Exception as e:\n rospy.logdebug(\"Current joint_states not ready yet, retrying==>\"+str(e))\n\n rospy.logdebug(\"ALL SYSTEMS READY\")\n\n def set_desired_world_point(self, x, y, z):\n \"\"\"\n Point where you want the Monoped to be\n :return:\n \"\"\"\n self.desired_world_point.x = x\n self.desired_world_point.y = y\n self.desired_world_point.z = z\n\n\n def get_base_height(self):\n height = self.base_position.z\n rospy.logdebug(\"BASE-HEIGHT=\"+str(height))\n return height\n\n def get_base_rpy(self):\n euler_rpy = Vector3()\n euler = tf.transformations.euler_from_quaternion(\n [self.base_orientation.x, self.base_orientation.y, self.base_orientation.z, self.base_orientation.w])\n\n euler_rpy.x = euler[0]\n euler_rpy.y = euler[1]\n euler_rpy.z = euler[2]\n return euler_rpy\n\n def get_base_angular_velocity(self):\n return self.base_angular_velocity\n\n def get_base_linear_acceleration(self):\n return self.base_linear_acceleration\n\n def get_distance_from_point(self, p_end):\n \"\"\"\n Given a Vector3 Object, get distance from current position\n :param p_end:\n :return:\n \"\"\"\n a = numpy.array((self.base_position.x, self.base_position.y, self.base_position.z))\n b = numpy.array((p_end.x, p_end.y, p_end.z))\n\n distance = numpy.linalg.norm(a - b)\n\n return distance\n\n def get_contact_force_magnitude(self):\n \"\"\"\n You will see that because the X axis is the one pointing downwards, it will be the one with\n higher value when touching the floor\n For a Robot of total mas of 0.55Kg, a gravity of 9.81 m/sec**2, Weight = 0.55*9.81=5.39 N\n Falling from around 5centimetres ( negligible height ), we register peaks around\n Fx = 7.08 N\n :return:\n \"\"\"\n contact_force = self.contact_force\n contact_force_np = numpy.array((contact_force.x, contact_force.y, contact_force.z))\n force_magnitude = numpy.linalg.norm(contact_force_np)\n\n return force_magnitude\n\n def get_joint_states(self):\n return self.joints_state\n\n def odom_callback(self,msg):\n self.base_position = msg.pose.pose.position\n\n def imu_callback(self,msg):\n self.base_orientation = msg.orientation\n self.base_angular_velocity = msg.angular_velocity\n self.base_linear_acceleration = msg.linear_acceleration\n\n def contact_callback(self,msg):\n \"\"\"\n /lowerleg_contactsensor_state/states[0]/contact_positions ==> PointContact in World\n /lowerleg_contactsensor_state/states[0]/contact_normals ==> NormalContact in World\n\n ==> One is an array of all the forces, the other total,\n and are relative to the contact link referred to in the sensor.\n /lowerleg_contactsensor_state/states[0]/wrenches[]\n /lowerleg_contactsensor_state/states[0]/total_wrench\n :param msg:\n :return:\n \"\"\"\n for state in msg.states:\n self.contact_force = state.total_wrench.force\n\n def joints_state_callback(self,msg):\n self.joints_state = msg\n\n def monoped_height_ok(self):\n\n height_ok = self._min_height <= self.get_base_height() < self._max_height\n return height_ok\n\n def monoped_orientation_ok(self):\n\n orientation_rpy = self.get_base_rpy()\n roll_ok = self._abs_max_roll > abs(orientation_rpy.x)\n pitch_ok = self._abs_max_pitch > abs(orientation_rpy.y)\n orientation_ok = roll_ok and pitch_ok\n return orientation_ok\n\n def calculate_reward_joint_position(self, weight=1.0):\n \"\"\"\n We calculate reward base on the joints configuration. The more near 0 the better.\n :return:\n \"\"\"\n acumulated_joint_pos = 0.0\n for joint_pos in self.joints_state.position:\n # Abs to remove sign influence, it doesnt matter the direction of turn.\n acumulated_joint_pos += abs(joint_pos)\n rospy.logdebug(\"calculate_reward_joint_position>>acumulated_joint_pos=\" + str(acumulated_joint_pos))\n reward = weight * acumulated_joint_pos\n rospy.logdebug(\"calculate_reward_joint_position>>reward=\" + str(reward))\n return reward\n\n def calculate_reward_joint_effort(self, weight=1.0):\n \"\"\"\n We calculate reward base on the joints effort readings. The more near 0 the better.\n :return:\n \"\"\"\n acumulated_joint_effort = 0.0\n for joint_effort in self.joints_state.effort:\n # Abs to remove sign influence, it doesnt matter the direction of the effort.\n acumulated_joint_effort += abs(joint_effort)\n rospy.logdebug(\"calculate_reward_joint_effort>>joint_effort=\" + str(joint_effort))\n rospy.logdebug(\"calculate_reward_joint_effort>>acumulated_joint_effort=\" + str(acumulated_joint_effort))\n reward = weight * acumulated_joint_effort\n rospy.logdebug(\"calculate_reward_joint_effort>>reward=\" + str(reward))\n return reward\n\n def calculate_reward_contact_force(self, weight=1.0):\n \"\"\"\n We calculate reward base on the contact force.\n The nearest to the desired contact force the better.\n We use exponential to magnify big departures from the desired force.\n Default ( 7.08 N ) desired force was taken from reading of the robot touching\n the ground from a negligible height of 5cm.\n :return:\n \"\"\"\n force_magnitude = self.get_contact_force_magnitude()\n force_displacement = force_magnitude - self._desired_force\n\n rospy.logdebug(\"calculate_reward_contact_force>>force_magnitude=\" + str(force_magnitude))\n rospy.logdebug(\"calculate_reward_contact_force>>force_displacement=\" + str(force_displacement))\n # Abs to remove sign\n reward = weight * abs(force_displacement)\n rospy.logdebug(\"calculate_reward_contact_force>>reward=\" + str(reward))\n return reward\n\n def calculate_reward_orientation(self, weight=1.0):\n \"\"\"\n We calculate the reward based on the orientation.\n The more its closser to 0 the better because it means its upright\n desired_yaw is the yaw that we want it to be.\n to praise it to have a certain orientation, here is where to set it.\n :return:\n \"\"\"\n curren_orientation = self.get_base_rpy()\n yaw_displacement = curren_orientation.z - self._desired_yaw\n rospy.logdebug(\"calculate_reward_orientation>>[R,P,Y]=\" + str(curren_orientation))\n acumulated_orientation_displacement = abs(curren_orientation.x) + abs(curren_orientation.y) + abs(yaw_displacement)\n reward = weight * acumulated_orientation_displacement\n rospy.logdebug(\"calculate_reward_orientation>>reward=\" + str(reward))\n return reward\n\n def calculate_reward_distance_from_des_point(self, weight=1.0):\n \"\"\"\n We calculate the distance from the desired point.\n The closser the better\n :param weight:\n :return:reward\n \"\"\"\n distance = self.get_distance_from_point(self.desired_world_point)\n reward = weight * distance\n rospy.logdebug(\"calculate_reward_orientation>>reward=\" + str(reward))\n return reward\n\n def calculate_total_reward(self):\n \"\"\"\n We consider VERY BAD REWARD -7 or less\n Perfect reward is 0.0, and total reward 1.0.\n The defaults values are chosen so that when the robot has fallen or very extreme joint config:\n r1 = -8.04\n r2 = -8.84\n r3 = -7.08\n r4 = -10.0 ==> We give priority to this, giving it higher value.\n :return:\n \"\"\"\n\n r1 = self.calculate_reward_joint_position(self._weight_r1)\n r2 = self.calculate_reward_joint_effort(self._weight_r2)\n # Desired Force in Newtons, taken form idle contact with 9.81 gravity.\n r3 = self.calculate_reward_contact_force(self._weight_r3)\n r4 = self.calculate_reward_orientation(self._weight_r4)\n r5 = self.calculate_reward_distance_from_des_point(self._weight_r5)\n\n # The sign depend on its function.\n total_reward = self._alive_reward - r1 - r2 - r3 - r4 - r5\n\n rospy.logdebug(\"###############\")\n rospy.logdebug(\"alive_bonus=\" + str(self._alive_reward))\n rospy.logdebug(\"r1 joint_position=\" + str(r1))\n rospy.logdebug(\"r2 joint_effort=\" + str(r2))\n rospy.logdebug(\"r3 contact_force=\" + str(r3))\n rospy.logdebug(\"r4 orientation=\" + str(r4))\n rospy.logdebug(\"r5 distance=\" + str(r5))\n rospy.logdebug(\"total_reward=\" + str(total_reward))\n rospy.logdebug(\"###############\")\n\n return total_reward\n\n\n\n\n def get_observations(self):\n \"\"\"\n Returns the state of the robot needed for OpenAI QLearn Algorithm\n The state will be defined by an array of the:\n 1) distance from desired point in meters\n 2) The pitch orientation in radians\n 3) the Roll orientation in radians\n 4) the Yaw orientation in radians\n 5) Force in contact sensor in Newtons\n 6-7-8) State of the 3 joints in radians\n\n observation = [distance_from_desired_point,\n base_roll,\n base_pitch,\n base_yaw,\n base_angular_vel_x,\n base_angular_vel_y,\n base_angular_vel_z,\n base_linear_acceleration_x,\n base_linear_acceleration_y,\n base_linear_acceleration_z,\n contact_force,\n joint_states_haa,\n joint_states_hfe,\n joint_states_kfe]\n\n :return: observation\n \"\"\"\n\n distance_from_desired_point = self.get_distance_from_point(self.desired_world_point)\n\n base_orientation = self.get_base_rpy()\n base_roll = base_orientation.x\n base_pitch = base_orientation.y\n base_yaw = base_orientation.z\n\n base_angular_velocity = self.get_base_angular_velocity()\n base_angular_vel_x = base_angular_velocity.x\n base_angular_vel_y = base_angular_velocity.y\n base_angular_vel_z = base_angular_velocity.z\n\n base_linear_acceleration = self.get_base_linear_acceleration()\n base_linear_acceleration_x = base_linear_acceleration.x\n base_linear_acceleration_y = base_linear_acceleration.y\n base_linear_acceleration_z = base_linear_acceleration.z\n\n contact_force = self.get_contact_force_magnitude()\n\n joint_states = self.get_joint_states()\n joint_states_haa = joint_states.position[0]\n joint_states_hfe = joint_states.position[1]\n joint_states_kfe = joint_states.position[2]\n\n joint_effort_haa = joint_states.effort[0]\n joint_effort_hfe = joint_states.effort[1]\n joint_effort_kfe = joint_states.effort[2]\n\n observation = []\n rospy.logdebug(\"List of Observations==>\"+str(self._list_of_observations))\n for obs_name in self._list_of_observations:\n if obs_name == \"distance_from_desired_point\":\n observation.append(distance_from_desired_point)\n elif obs_name == \"base_roll\":\n observation.append(base_roll)\n elif obs_name == \"base_pitch\":\n observation.append(base_pitch)\n elif obs_name == \"base_yaw\":\n observation.append(base_yaw)\n elif obs_name == \"contact_force\":\n observation.append(contact_force)\n elif obs_name == \"joint_states_haa\":\n observation.append(joint_states_haa)\n elif obs_name == \"joint_states_hfe\":\n observation.append(joint_states_hfe)\n elif obs_name == \"joint_states_kfe\":\n observation.append(joint_states_kfe)\n elif obs_name == \"joint_effort_haa\":\n observation.append(joint_effort_haa)\n elif obs_name == \"joint_effort_hfe\":\n observation.append(joint_effort_hfe)\n elif obs_name == \"joint_effort_kfe\":\n observation.append(joint_effort_kfe)\n elif obs_name == \"base_angular_vel_x\":\n observation.append(base_angular_vel_x)\n elif obs_name == \"base_angular_vel_y\":\n observation.append(base_angular_vel_y)\n elif obs_name == \"base_angular_vel_z\":\n observation.append(base_angular_vel_z)\n elif obs_name == \"base_linear_acceleration_x\":\n observation.append(base_linear_acceleration_x)\n elif obs_name == \"base_linear_acceleration_y\":\n observation.append(base_linear_acceleration_y)\n elif obs_name == \"base_linear_acceleration_z\":\n observation.append(base_linear_acceleration_z)\n else:\n raise NameError('Observation Asked does not exist=='+str(obs_name))\n\n return observation\n\n def get_state_as_string(self, observation):\n \"\"\"\n This function will do two things:\n 1) It will make discrete the observations\n 2) Will convert the discrete observations in to state tags strings\n :param observation:\n :return: state\n \"\"\"\n observations_discrete = self.assign_bins(observation)\n string_state = ''.join(map(str, observations_discrete))\n rospy.logdebug(\"STATE==>\"+str(string_state))\n return string_state\n\n def assign_bins(self, observation):\n \"\"\"\n Will make observations discrete by placing each value into its corresponding bin\n :param observation:\n :return:\n \"\"\"\n rospy.logdebug(\"Observations>>\"+str(observation))\n\n state_discrete = numpy.zeros(len(self._list_of_observations), dtype=numpy.int32)\n for i in range(len(self._list_of_observations)):\n # We convert to int because anyway it will be round floats. We add Right True to include limits\n # Ex: [-20, 0, 20], value=-20 ==> index=0, In right = False, would be index=1\n state_discrete[i] = int(numpy.digitize(observation[i], self._bins[i], right=True))\n rospy.logdebug(\"bin=\"+str(self._bins[i])+\"obs=\"+str(observation[i])+\",end_val=\"+str(state_discrete[i]))\n\n rospy.logdebug(str(state_discrete))\n return state_discrete\n\n def init_bins(self):\n \"\"\"\n We initalise all related to the bins\n :return:\n \"\"\"\n self.fill_observations_ranges()\n self.create_bins()\n\n def fill_observations_ranges(self):\n \"\"\"\n We create the dictionary for the ranges of the data related to each observation\n :return:\n \"\"\"\n self._obs_range_dict = {}\n for obs_name in self._list_of_observations:\n\n if obs_name == \"distance_from_desired_point\":\n # We consider the range as based on the range of distance allowed in height\n delta = self._max_height - self._min_height\n max_value = delta\n min_value = -delta\n elif obs_name == \"base_roll\":\n max_value = self._abs_max_roll\n min_value = -self._abs_max_roll\n elif obs_name == \"base_pitch\":\n max_value = self._abs_max_pitch\n min_value = -self._abs_max_pitch\n elif obs_name == \"base_yaw\":\n # We consider that 360 degrees is max range\n max_value = 2*math.pi\n min_value = -2*math.pi\n elif obs_name == \"contact_force\":\n # We consider that no force is the minimum, and the maximum is 2 times the desired\n # We dont want to make a very big range because we might loose the desired force\n # in the middle.\n max_value = 2*self._desired_force\n min_value = 0.0\n\n elif obs_name == \"joint_states_haa\":\n # We consider the URDF maximum values\n max_value = self._joint_limits[\"haa_max\"]\n min_value = self._joint_limits[\"haa_min\"]\n elif obs_name == \"joint_states_hfe\":\n max_value = self._joint_limits[\"hfe_max\"]\n min_value = self._joint_limits[\"hfe_min\"]\n elif obs_name == \"joint_states_kfe\":\n max_value = self._joint_limits[\"kfe_max\"]\n min_value = self._joint_limits[\"kfe_min\"]\n\n elif obs_name == \"joint_effort_haa\":\n # We consider the URDF maximum values\n max_value = self.maximum_joint_effort\n min_value = -self.maximum_joint_effort\n elif obs_name == \"joint_effort_hfe\":\n max_value = self.maximum_joint_effort\n min_value = -self.maximum_joint_effort\n elif obs_name == \"joint_effort_kfe\":\n max_value = self.maximum_joint_effort\n min_value = -self.maximum_joint_effort\n\n\n elif obs_name == \"base_angular_vel_x\":\n max_value = self.maximum_base_angular_velocity\n min_value = -self.maximum_base_angular_velocity\n elif obs_name == \"base_angular_vel_y\":\n max_value = self.maximum_base_angular_velocity\n min_value = -self.maximum_base_angular_velocity\n elif obs_name == \"base_angular_vel_z\":\n max_value = self.maximum_base_angular_velocity\n min_value = -self.maximum_base_angular_velocity\n\n elif obs_name == \"base_linear_acceleration_x\":\n max_value = self.maximum_base_linear_acceleration\n min_value = -self.maximum_base_linear_acceleration\n elif obs_name == \"base_linear_acceleration_y\":\n max_value = self.maximum_base_linear_acceleration\n min_value = -self.maximum_base_linear_acceleration\n elif obs_name == \"base_linear_acceleration_z\":\n max_value = self.maximum_base_linear_acceleration\n min_value = -self.maximum_base_linear_acceleration\n\n else:\n raise NameError('Observation Asked does not exist=='+str(obs_name))\n\n self._obs_range_dict[obs_name] = [min_value,max_value]\n\n def create_bins(self):\n \"\"\"\n We create the Bins for the discretization of the observations\n self.desired_world_point = Vector3(0.0, 0.0, 0.0)\n self._min_height = min_height\n self._max_height = max_height\n self._abs_max_roll = abs_max_roll\n self._abs_max_pitch = abs_max_pitch\n self._joint_increment_value = joint_increment_value\n self._done_reward = done_reward\n self._alive_reward = alive_reward\n self._desired_force = desired_force\n self._desired_yaw = desired_yaw\n\n\n :return:bins\n \"\"\"\n\n number_of_observations = len(self._list_of_observations)\n parts_we_disrcetize = self._discrete_division\n rospy.logdebug(\"Parts to discretise==>\"+str(parts_we_disrcetize))\n self._bins = numpy.zeros((number_of_observations, parts_we_disrcetize))\n for counter in range(number_of_observations):\n obs_name = self._list_of_observations[counter]\n min_value = self._obs_range_dict[obs_name][0]\n max_value = self._obs_range_dict[obs_name][1]\n self._bins[counter] = numpy.linspace(min_value, max_value, parts_we_disrcetize)\n\n rospy.logdebug(\"bins==>\" + str(self._bins[counter]))\n\n def init_joints_pose(self, des_init_pos):\n \"\"\"\n We initialise the Position variable that saves the desired position where we want our\n joints to be\n :param init_pos:\n :return:\n \"\"\"\n self.current_joint_pose =[]\n self.current_joint_pose = copy.deepcopy(des_init_pos)\n self.init_knee_value = copy.deepcopy(self.current_joint_pose[2])\n return self.current_joint_pose\n\n def get_action_to_position(self, action):\n \"\"\"\n Here we have the ACtions number to real joint movement correspondance.\n :param action: Integer that goes from 0 to 6, because we have 7 actions.\n :return:\n \"\"\"\n\n rospy.logdebug(\"current joint pose>>>\"+str(self.current_joint_pose))\n rospy.logdebug(\"Action Number>>>\"+str(action))\n # We dont want to jump unless the action jump is selected\n do_jump = False\n\n if action == 0: #Increment haa_joint\n rospy.logdebug(\"Action Decided:Increment haa_joint>>>\")\n self.current_joint_pose[0] += self._joint_increment_value\n elif action == 1: #Decrement haa_joint\n rospy.logdebug(\"Action Decided:Decrement haa_joint>>>\")\n self.current_joint_pose[0] -= self._joint_increment_value\n elif action == 2: #Increment hfe_joint\n rospy.logdebug(\"Action Decided:Increment hfe_joint>>>\")\n self.current_joint_pose[1] += self._joint_increment_value\n elif action == 3: #Decrement hfe_joint\n rospy.logdebug(\"Action Decided:Decrement hfe_joint>>>\")\n self.current_joint_pose[1] -= self._joint_increment_value\n elif action == 4: # Dont Move\n rospy.logdebug(\"Action Decided:Dont Move>>>\")\n elif action == 5: # Perform One Jump\n rospy.logdebug(\"Action Decided:Perform One Jump>>>\")\n # We get the Value Used for the Knee charged position\n do_jump = True\n\n # We set the Knee to be ready for jump, based on the init knee pose\n self.current_joint_pose[2] = self.init_knee_value\n\n rospy.logdebug(\"action to move joint states>>>\" + str(self.current_joint_pose))\n\n self.clamp_to_joint_limits()\n\n return self.current_joint_pose, do_jump\n\n def clamp_to_joint_limits(self):\n \"\"\"\n clamps self.current_joint_pose based on the joint limits\n self._joint_limits\n {\"haa_max\": haa_max,\n \"haa_min\": haa_min,\n \"hfe_max\": hfe_max,\n \"hfe_min\": hfe_min,\n \"kfe_max\": kfe_max,\n \"kfe_min\": kfe_min\n }\n :return:\n \"\"\"\n\n rospy.logdebug(\"Clamping current_joint_pose>>>\" + str(self.current_joint_pose))\n haa_joint_value = self.current_joint_pose[0]\n hfe_joint_value = self.current_joint_pose[1]\n kfe_joint_value = self.current_joint_pose[2]\n\n self.current_joint_pose[0] = max(min(haa_joint_value, self._joint_limits[\"haa_max\"]),\n self._joint_limits[\"haa_min\"])\n self.current_joint_pose[1] = max(min(hfe_joint_value, self._joint_limits[\"hfe_max\"]),\n self._joint_limits[\"hfe_min\"])\n\n\n rospy.logdebug(\"kfe_min>>>\" + str(self._joint_limits[\"kfe_min\"]))\n rospy.logdebug(\"kfe_max>>>\" + str(self._joint_limits[\"kfe_max\"]))\n self.current_joint_pose[2] = max(min(kfe_joint_value, self._joint_limits[\"kfe_max\"]),\n self._joint_limits[\"kfe_min\"])\n\n rospy.logdebug(\"DONE Clamping current_joint_pose>>>\" + str(self.current_joint_pose))\n\n\n def process_data(self):\n \"\"\"\n We return the total reward based on the state in which we are in and if its done or not\n ( it fell basically )\n :return: reward, done\n \"\"\"\n\n if \"monoped_minimum_height\" in self._episode_done_criteria:\n monoped_height_ok = self.monoped_height_ok()\n else:\n rospy.logdebug(\"monoped_height_ok NOT TAKEN INTO ACCOUNT\")\n monoped_height_ok = True\n\n if \"monoped_vertical_orientation\" in self._episode_done_criteria:\n monoped_orientation_ok = self.monoped_orientation_ok()\n else:\n rospy.logdebug(\"monoped_orientation_ok NOT TAKEN INTO ACCOUNT\")\n monoped_orientation_ok = True\n\n rospy.logdebug(\"monoped_height_ok=\"+str(monoped_height_ok))\n rospy.logdebug(\"monoped_orientation_ok=\" + str(monoped_orientation_ok))\n\n done = not(monoped_height_ok and monoped_orientation_ok)\n if done:\n rospy.logerr(\"It fell, so the reward has to be very low\")\n total_reward = self._done_reward\n else:\n rospy.logdebug(\"Calculate normal reward because it didn't fall.\")\n total_reward = self.calculate_total_reward()\n\n return total_reward, done\n\n def testing_loop(self):\n\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n self.calculate_total_reward()\n rate.sleep()\n\n\nif __name__ == \"__main__\":\n rospy.init_node('monoped_state_node', anonymous=True, log_level=rospy.DEBUG)\n max_height = 3.0\n min_height = 0.5\n max_incl = 1.57\n joint_increment_value = 0.32\n list_of_observations = [\"base_roll\",\n \"base_pitch\",\n \"base_angular_vel_x\",\n \"base_angular_vel_y\",\n \"base_angular_vel_z\",\n \"base_linear_acceleration_x\",\n \"base_linear_acceleration_y\",\n \"base_linear_acceleration_z\"]\n joint_limits = {\"haa_max\": 1.6,\n \"haa_min\": -1.6,\n \"hfe_max\": 1.6,\n \"hfe_min\": -1.6,\n \"kfe_max\": 0.0,\n \"kfe_min\": -1.6\n }\n episode_done_criteria = [ \"monoped_minimum_height\",\n \"monoped_vertical_orientation\"]\n done_reward = -1000.0\n alive_reward = 100.0\n desired_force = 7.08\n desired_yaw = 0.0\n weight_r1 = 0.0 # Weight for joint positions ( joints in the zero is perfect )\n weight_r2 = 0.0 # Weight for joint efforts ( no efforts is perfect )\n weight_r3 = 0.0 # Weight for contact force similar to desired ( weight of monoped )\n weight_r4 = 10.0 # Weight for orientation ( vertical is perfect )\n weight_r5 = 10.0 # Weight for distance from desired point ( on the point is perfect )\n discrete_division = 10\n maximum_base_linear_acceleration = 3000.0\n monoped_state = MonopedState( max_height=max_height,\n min_height=min_height,\n abs_max_roll=max_incl,\n abs_max_pitch=max_incl,\n joint_increment_value=joint_increment_value,\n list_of_observations=list_of_observations,\n joint_limits=joint_limits,\n episode_done_criteria=episode_done_criteria,\n done_reward=done_reward,\n alive_reward=alive_reward,\n desired_force=desired_force,\n desired_yaw=desired_yaw,\n weight_r1=weight_r1,\n weight_r2=weight_r2,\n weight_r3=weight_r3,\n weight_r4=weight_r4,\n weight_r5=weight_r5,\n discrete_division=discrete_division,\n maximum_base_linear_acceleration=maximum_base_linear_acceleration\n )\n monoped_state.testing_loop()\n"
] |
[
[
"numpy.linspace",
"numpy.linalg.norm",
"numpy.digitize",
"numpy.array",
"numpy.zeros"
]
] |
liudengfeng/trading_calendars
|
[
"c56675a9de11830bed7c20e067987a9962d8d1ce"
] |
[
"tests/test_xfra_calendar.py"
] |
[
"from unittest import TestCase\nimport pandas as pd\nfrom pytz import UTC\n\nfrom test_trading_calendar import ExchangeCalendarTestBase\nfrom trading_calendars.exchange_calendar_xfra import XFRAExchangeCalendar\n\n\nclass XFRACalendarTestCase(ExchangeCalendarTestBase, TestCase):\n\n answer_key_filename = 'xfra'\n calendar_class = XFRAExchangeCalendar\n\n # The FWB is open from 9:00 am to 5:30 pm.\n MAX_SESSION_HOURS = 8.5\n\n def test_whit_monday(self):\n # Whit Monday was not observed prior to 2007.\n self.assertIn(\n pd.Timestamp('2006-06-05', tz=UTC),\n self.calendar.all_sessions,\n )\n\n # It was observed as a one-off in 2007...\n self.assertNotIn(\n pd.Timestamp('2007-05-28', tz=UTC),\n self.calendar.all_sessions,\n )\n\n # ...then not again...\n self.assertIn(\n pd.Timestamp('2008-05-12', tz=UTC),\n self.calendar.all_sessions,\n )\n\n # ...until 2015...\n self.assertNotIn(\n pd.Timestamp('2015-05-25', tz=UTC),\n self.calendar.all_sessions,\n )\n\n # ...when it became regularly observed.\n self.assertNotIn(\n pd.Timestamp('2016-05-16', tz=UTC),\n self.calendar.all_sessions,\n )\n\n def test_2012(self):\n expected_holidays_2012 = [\n # New Year's Day fell on a Sunday, so it is not a holiday this year\n pd.Timestamp(\"2012-04-06\", tz=UTC), # Good Friday\n pd.Timestamp(\"2012-04-09\", tz=UTC), # Easter Monday\n pd.Timestamp(\"2012-05-01\", tz=UTC), # Labour Day\n # Whit Monday was observed in 2007, then 2015 and after.\n # German Unity Day started being celebrated in 2014\n pd.Timestamp(\"2012-12-24\", tz=UTC), # Christmas Eve\n pd.Timestamp(\"2012-12-25\", tz=UTC), # Christmas\n pd.Timestamp(\"2012-12-26\", tz=UTC), # Boxing Day\n pd.Timestamp(\"2012-12-31\", tz=UTC), # New Year's Eve\n ]\n\n for session_label in expected_holidays_2012:\n self.assertNotIn(session_label, self.calendar.all_sessions)\n\n early_closes_2012 = [\n pd.Timestamp(\"2012-12-28\", tz=UTC), # Last working day of 2012\n ]\n\n for early_close_session_label in early_closes_2012:\n self.assertIn(early_close_session_label,\n self.calendar.early_closes)\n\n def test_half_days(self):\n half_days = [\n # In 2011, NYE was on a Sat, so Fri is a half day\n pd.Timestamp('2011-12-30', tz='CET'),\n # In 2012, NYE was on a Mon, so the preceding Fri is a half day\n pd.Timestamp('2012-12-28', tz='CET'),\n ]\n\n for half_day in half_days:\n half_day_close_time = self.calendar.next_close(half_day)\n self.assertEqual(\n half_day_close_time,\n half_day + pd.Timedelta(hours=12, minutes=30)\n )\n\n def test_reformation_day(self):\n # Reformation Day was a German national holiday in 2017 only.\n self.assertNotIn(\n pd.Timestamp('2017-10-31', tz=UTC),\n self.calendar.all_sessions,\n )\n\n # Ensure it is a trading day in the surrounding years.\n self.assertIn(\n pd.Timestamp('2016-10-31', tz=UTC),\n self.calendar.all_sessions,\n )\n self.assertIn(\n pd.Timestamp('2018-10-31', tz=UTC),\n self.calendar.all_sessions,\n )\n"
] |
[
[
"pandas.Timestamp",
"pandas.Timedelta"
]
] |
GAIL-4-BARK/tf2rl
|
[
"5468115c375bc7eea4d256033c460da001bdd0e3"
] |
[
"tf2rl/algos/td3.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense\n\nfrom tf2rl.algos.ddpg import DDPG, Actor\nfrom tf2rl.misc.target_update_ops import update_target_variables\nfrom tf2rl.misc.huber_loss import huber_loss\n\n\nclass Critic(tf.keras.Model):\n def __init__(self, state_shape, action_dim, units=[400, 300], name=\"Critic\"):\n super().__init__(name=name)\n\n self.l1 = Dense(units[0], name=\"L1\")\n self.l2 = Dense(units[1], name=\"L2\")\n self.l3 = Dense(1, name=\"L3\")\n\n self.l4 = Dense(units[0], name=\"L4\")\n self.l5 = Dense(units[1], name=\"L5\")\n self.l6 = Dense(1, name=\"L6\")\n\n dummy_state = tf.constant(\n np.zeros(shape=(1,)+state_shape, dtype=np.float32))\n dummy_action = tf.constant(\n np.zeros(shape=[1, action_dim], dtype=np.float32))\n with tf.device(\"/cpu:0\"):\n self([dummy_state, dummy_action])\n\n def call(self, inputs):\n states, actions = inputs\n xu = tf.concat([states, actions], axis=1)\n\n x1 = tf.nn.relu(self.l1(xu))\n x1 = tf.nn.relu(self.l2(x1))\n x1 = self.l3(x1)\n\n x2 = tf.nn.relu(self.l4(xu))\n x2 = tf.nn.relu(self.l5(x2))\n x2 = self.l6(x2)\n\n return x1, x2\n\n\nclass TD3(DDPG):\n def __init__(\n self,\n state_shape,\n action_dim,\n name=\"TD3\",\n actor_update_freq=2,\n policy_noise=0.2,\n noise_clip=0.5,\n actor_units=[400, 300],\n critic_units=[400, 300],\n lr_critic=0.001,\n **kwargs):\n super().__init__(name=name, state_shape=state_shape, action_dim=action_dim,\n actor_units=actor_units, critic_units=critic_units,\n lr_critic=lr_critic, **kwargs)\n\n self.critic = Critic(state_shape, action_dim, critic_units)\n self.critic_target = Critic(state_shape, action_dim, critic_units)\n update_target_variables(\n self.critic_target.weights, self.critic.weights, tau=1.)\n self.critic_optimizer = tf.keras.optimizers.Adam(\n learning_rate=lr_critic)\n\n self._policy_noise = policy_noise\n self._noise_clip = noise_clip\n\n self._actor_update_freq = actor_update_freq\n self._it = tf.Variable(0, dtype=tf.int32)\n\n @tf.function\n def _train_body(self, states, actions, next_states, rewards, done, weights):\n with tf.device(self.device):\n with tf.GradientTape() as tape:\n td_error1, td_error2 = self._compute_td_error_body(\n states, actions, next_states, rewards, done)\n critic_loss = tf.reduce_mean(huber_loss(td_error1, delta=self.max_grad) * weights) + \\\n tf.reduce_mean(huber_loss(td_error2, delta=self.max_grad) * weights)\n\n critic_grad = tape.gradient(\n critic_loss, self.critic.trainable_variables)\n self.critic_optimizer.apply_gradients(\n zip(critic_grad, self.critic.trainable_variables))\n\n self._it.assign_add(1)\n with tf.GradientTape() as tape:\n next_actions = self.actor(states)\n actor_loss = - \\\n tf.reduce_mean(self.critic([states, next_actions]))\n\n remainder = tf.math.mod(self._it, self._actor_update_freq)\n def optimize_actor():\n actor_grad = tape.gradient(\n actor_loss, self.actor.trainable_variables)\n return self.actor_optimizer.apply_gradients(\n zip(actor_grad, self.actor.trainable_variables))\n\n tf.cond(pred=tf.equal(remainder, 0), true_fn=optimize_actor, false_fn=tf.no_op)\n # Update target networks\n update_target_variables(\n self.critic_target.weights, self.critic.weights, self.tau)\n update_target_variables(\n self.actor_target.weights, self.actor.weights, self.tau)\n\n return actor_loss, critic_loss, tf.abs(td_error1) + tf.abs(td_error2)\n\n def compute_td_error(self, states, actions, next_states, rewards, dones):\n td_errors1, td_errors2 = self._compute_td_error_body(\n states, actions, next_states, rewards, dones)\n return np.squeeze(np.abs(td_errors1.numpy()) + np.abs(td_errors2.numpy()))\n\n @tf.function\n def _compute_td_error_body(self, states, actions, next_states, rewards, dones):\n with tf.device(self.device):\n not_dones = 1. - dones\n\n # Get noisy action\n next_action = self.actor_target(next_states)\n noise = tf.cast(tf.clip_by_value(\n tf.random.normal(shape=tf.shape(next_action),\n stddev=self._policy_noise),\n -self._noise_clip, self._noise_clip), tf.float32)\n next_action = tf.clip_by_value(\n next_action + noise, -self.actor_target.max_action, self.actor_target.max_action)\n\n target_Q1, target_Q2 = self.critic_target(\n [next_states, next_action])\n target_Q = tf.minimum(target_Q1, target_Q2)\n target_Q = rewards + (not_dones * self.discount * target_Q)\n target_Q = tf.stop_gradient(target_Q)\n current_Q1, current_Q2 = self.critic([states, actions])\n\n return target_Q - current_Q1, target_Q - current_Q2\n"
] |
[
[
"tensorflow.clip_by_value",
"tensorflow.device",
"tensorflow.concat",
"tensorflow.Variable",
"tensorflow.shape",
"tensorflow.keras.layers.Dense",
"tensorflow.minimum",
"tensorflow.equal",
"tensorflow.stop_gradient",
"tensorflow.keras.optimizers.Adam",
"tensorflow.math.mod",
"numpy.zeros",
"tensorflow.abs",
"tensorflow.GradientTape"
]
] |
zsarnoczay/uqFEM
|
[
"425163f09fbe021a0b274b47e9a6c0207b587ce1"
] |
[
"backend/modules/performUQ/UCSD_UQ/runTMCMC.py"
] |
[
"\"\"\"\nauthors: Mukesh Kumar Ramancha, Maitreya Manoj Kurumbhati, and Prof. J.P. Conte \naffiliation: University of California, San Diego\n\n\"\"\"\n\nimport numpy as np\nimport tmcmcFunctions\nimport multiprocessing as mp\nfrom multiprocessing import Pool\nfrom runFEM import runFEM\nfrom numpy.random import SeedSequence, default_rng\n\n\ndef RunTMCMC(N, AllPars, Nm_steps_max, Nm_steps_maxmax, log_likelihood, variables, resultsLocation, seed):\n \"\"\" Runs TMCMC Algorithm \"\"\"\n\n # Initialize (beta, effective sample size)\n beta = 0\n ESS = N\n mytrace = []\n\n # Initialize other TMCMC variables\n Nm_steps = Nm_steps_max\n parallelize_MCMC = 'yes' # yes or no\n Adap_calc_Nsteps = 'yes' # yes or no\n Adap_scale_cov = 'yes' # yes or no\n scalem = 1 # cov scale factor\n evidence = 1 # model evidence\n\n # initial samples\n Sm = tmcmcFunctions.initial_population(N, AllPars)\n\n # Evaluate posterior at Sm\n Priorm = np.array([tmcmcFunctions.log_prior(s, AllPars) for s in Sm]).squeeze()\n Postm = Priorm # prior = post for beta = 0\n\n # Evaluate log-likelihood at current samples Sm\n if parallelize_MCMC == 'yes':\n pool = Pool(processes=mp.cpu_count())\n Lmt = pool.starmap(runFEM, [(ind, Sm[ind], variables, resultsLocation, log_likelihood) for ind in range(N)], )\n pool.close()\n Lm = np.array(Lmt).squeeze()\n else:\n Lm = np.array([runFEM(ind, Sm[ind], variables, resultsLocation, log_likelihood) for ind in range(N)]).squeeze()\n\n while beta < 1:\n # adaptivly compute beta s.t. ESS = N/2 or ESS = 0.95*prev_ESS\n # plausible weights of Sm corresponding to new beta\n beta, Wm, ESS = tmcmcFunctions.compute_beta(beta, Lm, ESS, threshold=0.95)\n\n # update model evidence\n evidence = evidence * (sum(Wm) / N)\n\n # Calculate covaraince matrix using Wm_n\n Cm = np.cov(Sm, aweights=Wm / sum(Wm), rowvar=0)\n\n # Resample ###################################################\n # Resampling using plausible weights\n SmcapIDs = np.random.choice(range(N), N, p=Wm / sum(Wm))\n # SmcapIDs = resampling.stratified_resample(Wm_n)\n Smcap = Sm[SmcapIDs]\n Lmcap = Lm[SmcapIDs]\n Postmcap = Postm[SmcapIDs]\n\n # save to trace\n # stage m: samples, likelihood, weights, next stage ESS, next stage beta, resampled samples\n mytrace.append([Sm, Lm, Wm, ESS, beta, Smcap])\n\n # print\n print(\"beta = %.5f\" % beta)\n print(\"ESS = %d\" % ESS)\n print(\"scalem = %.2f\" % scalem)\n\n # Perturb ###################################################\n # perform MCMC starting at each Smcap (total: N) for Nm_steps\n Em = (scalem ** 2) * Cm # Proposal dist covariance matrix\n\n numProposals = N * Nm_steps\n numAccepts = 0\n\n # seed to reproduce results\n ss = SeedSequence(seed)\n child_seeds = ss.spawn(N)\n\n if parallelize_MCMC == 'yes':\n pool = Pool(processes=mp.cpu_count())\n results = pool.starmap(tmcmcFunctions.MCMC_MH, [(j1, Em, Nm_steps, Smcap[j1], Lmcap[j1], Postmcap[j1], beta,\n numAccepts, AllPars, log_likelihood, variables,\n resultsLocation, default_rng(child_seeds[j1])) for j1 in\n range(N)], )\n pool.close()\n else:\n results = [\n tmcmcFunctions.MCMC_MH(j1, Em, Nm_steps, Smcap[j1], Lmcap[j1], Postmcap[j1], beta, numAccepts, AllPars,\n log_likelihood, variables, resultsLocation, default_rng(child_seeds[j1])) for j1\n in range(N)]\n\n Sm1, Lm1, Postm1, numAcceptsS, all_proposals, all_PLP = zip(*results)\n Sm1 = np.asarray(Sm1)\n Lm1 = np.asarray(Lm1)\n Postm1 = np.asarray(Postm1)\n numAcceptsS = np.asarray(numAcceptsS)\n numAccepts = sum(numAcceptsS)\n all_proposals = np.asarray(all_proposals)\n all_PLP = np.asarray(all_PLP)\n\n # total observed acceptance rate\n R = numAccepts / numProposals\n print(\"acceptance rate = %.2f\" % R)\n\n # Calculate Nm_steps based on observed acceptance rate\n if Adap_calc_Nsteps == 'yes':\n # increase max Nmcmc with stage number\n Nm_steps_max = min(Nm_steps_max + 1, Nm_steps_maxmax)\n print(\"adapted max MCMC steps = %d\" % Nm_steps_max)\n\n acc_rate = max(1. / numProposals, R)\n Nm_steps = min(Nm_steps_max, 1 + int(np.log(1 - 0.99) / np.log(1 - acc_rate)))\n print(\"next MCMC Nsteps = %d\" % Nm_steps)\n\n print('========================')\n\n # scale factor based on observed acceptance ratio\n if Adap_scale_cov == 'yes':\n scalem = (1 / 9) + ((8 / 9) * R)\n\n # for next beta\n Sm, Postm, Lm = Sm1, Postm1, Lm1\n\n # save to trace\n mytrace.append([Sm, Lm, np.ones(len(Wm)), 'notValid', 1, 'notValid'])\n\n print(\"evidence = %.10f\" % evidence)\n\n return mytrace\n"
] |
[
[
"numpy.log",
"numpy.asarray",
"numpy.random.SeedSequence",
"numpy.array",
"numpy.random.default_rng"
]
] |
lightnerdevtech/pyADVISE
|
[
"7496729b57a777c5ac73d1ac08589b9794a77955"
] |
[
"pyADVISE/data_step.py"
] |
[
"import pandas as pd\npath='C:/Users/alightner/Documents/Source_Updates/ESDB/Database/'\n\n\nimport country_converter as coco\ncc = coco.CountryConverter()\n\n\ndef transform_to_codes(data, col, new_col_name, name_type='ISO3'): \n data.replace('Congo, Dem. Rep.', 'DR Congo', inplace=True)\n data[col_name] = cc.convert(names =list(data[col]), to='ISO3', not_found=None)\n\n return data\n\ndef merge_country_name(data, left_on='country_id', right_on='country_id',country_name=['country_name'], file=path): \n\n # read excel file, select vars of interest [[ ]]\n df_countries = pd.read_sas(file+'countries.sas7bdat')\n \n \n \n # decode string vars from sas\n for i in country_name: \n df_countries[i] = df_countries[i].str.decode('UTF-8') \n \n \n \n # merge data on column of choice \n df = pd.merge(data, df_countries[['country_id']+ country_name], left_on=left_on, right_on=right_on, how='left')\n \n # print the names which do not merge \n print(df[~df[right_on].notnull()][left_on].unique())\n\n \n return df \n\n\n\ndef merge_country_class(data, class_type='World Bank Income'):\n '''Provide data, the variable you would like to merge on, and the type of income \n category the user would like to examine. Need to expand to other types of income groups.'''\n \n \n # set to shared file location in the future file = \n file1 = 'C:/Users/alightner/Documents/Source Updates/Gen Data'\n \n \n # bring in relevant data sources\n codes = pd.read_sas(file1 + '/country_classification_values.sas7bdat')\n values = pd.read_sas(file1 + '/classification_values.sas7bdat')\n \n \n # change classification value name in values to UTU-08 \n values['classification_value_name'] = values['classification_value_name'].str.decode('UTF-8')\n \n \n # if class == 'World Bank Income' then just merge these codes \n if class_type =='World Bank Income': \n # keep only the WB codes (first 4 observations)\n values = values.iloc[0:4, :]\n # keep only the country code values where classif.. is bewteen 0 and 4. \n codes = codes[codes['classification_value_id'].between(0,4)]\n \n \n # merge codes to dataset provided. \n classif = pd.merge(codes, values, on='classification_value_id', how='left')\n # rename class_year to year to limit repetitiveness \n classif.rename(index=str, columns={\"classification_year\": \"year\"}, inplace=True)\n \n # select only the max year\n max_year = max(list(classif['year'].unique()))\n \n # select the most recent year \n classif = classif[classif['year']==max_year]\n \n # drop year\n classif.drop('year', axis=1, inplace=True)\n \n # merge datasets \n df = pd.merge(data, classif, on=['country_id'], how='left')\n \n \n \n return df\n\n\ndef merge_series_names(data, include_vars=['series_name'], file='C:/Users/alightner/Documents/Source Updates/029 ILO/'): \n\n # read excel file, select vars of interest [[ ]]\n df_series = pd.read_excel(file+'Mappings/mapping series.xlsx')\n \n # merge data series and data provided \n df = pd.merge(data, df_series[['series_id']+include_vars], on='series_id', how='left')\n \n return df\n\n###########################################################\n###########################################################\n############ ACCESS FINISHED DATA \n###########################################################\n###########################################################\n\ndef get_esdb(table_num, path='C:/Users/alightner/Documents/Source_Updates/ESDB/Database/', \n country_sel=None, year_sel=(1960, 2019), series_sel=None, silence=False,\n return_columns = ['series_id', 'country_name', 'country_id', 'year', 'value_start'], \n series_df_short = True):\n '''This function will return a final dataset within a source update file. The user\n specifies only the table number (without the _) if the data is in the same folder in \n typical ASVISE format.'''\n \n \n # define paths to data\n data_path = path+'_'+ table_num+'data.sas7bdat'\n country_path = path+'countries.sas7bdat'\n series_path = path+'series.sas7bdat'\n\n \n ###########################\n # read dataset\n ############################\n \n ### read data \n data = pd.read_sas(data_path) \n countries = pd.read_sas(country_path)[['country_id', 'country_name']]\n series = pd.read_sas(series_path)\n \n\n ### python is case sensitive while sas is not, thus we need to make ssure all column names are lowercase \n datasets = [data, countries, series]\n for df in datasets: \n df.columns = [i.lower() for i in df.columns]\n \n \n \n # select year observations \n data = data[(data['year'].between(year_sel[0], year_sel[1]))]\n \n # select series if asked\n if series_sel != None: \n data = data[(data['series_id'].isin(series_sel))]\n # select countries if prompted\n if country_sel != None: \n data = data[(data['country_id'].isin(country_sel))]\n \n \n \n \n # change country_id, year, periodicity_id source_id, and start_value to int. \n for i in ['series_id', 'country_id', 'year', 'periodicity_id', 'source_id']: \n data[i] = data[i].astype('int')\n \n # set list of variables which need to change from bytes to string\n country_change = ['country_name']\n series_change = ['series_name', 'series_definition']\n\n\n # change series vars from bytes to string\n for i in series_change: \n series[i] = series[i].str.decode('UTF-8')\n\n \n \n # place definitions of variables in dictionary, then drop multiple observations \n if series_sel !=None: \n series = series[series['series_id'].isin(series_sel)]\n \n if series_df_short ==True: \n series = series[['series_id', 'series_name', 'series_definition']]\n \n series_df = series.drop_duplicates(['series_id', 'series_name', 'series_definition'])\n \n series_df['series_id'] = series_df['series_id'].astype('int')\n \n \n \n \n # change countries from bytes \n for i in country_change: \n countries[i] = countries[i].str.decode('UTF-8')\n\n\n # change country_id to int \n countries['country_id'] = countries['country_id'].astype('int')\n \n #################################\n # merge in country and series on respect datasets\n ##################################\n \n if silence==False:\n print('The length of the dataset from '+table_num+ ' is ' +str(len(data)))\n \n\n data1 = pd.merge(data, countries, on='country_id', how='inner')\n\n \n data1= data1[return_columns]\n #data2 = pd.merge(data1, series, on='series_id', how='inner')\n #print('After merging series: ' +str(len(data)))\n \n series_df = series_df.reset_index()\n \n return data1, series_df\n\n\n#####################################################\n##### FIND SOURCE \n######################################################\n\n\ndef find_source(series_sel, path='C:/Users/alightner/Documents/Source_Updates/ESDB/Database/'): \n \n '''this function takes a list of series ids and returns a dataframe of the source_ids along with \n their definitions and names'''\n \n # acces series file \n series_path = path+'series.sas7bdat'\n series = pd.read_sas(series_path)\n \n # make sure column names are lower case \n series.columns = [i.lower() for i in series.columns]\n\n # change from coded to UTf-8\n series_change = ['series_name', 'series_definition']\n # change series vars from bytes to string\n for i in series_change: \n series[i] = series[i].str.decode('UTF-8')\n \n # change series_id to int \n series['series_id'] = series['series_id'].astype('int')\n series['source_id'] = series['source_id'].astype('int')\n \n # return series \n series = series[series['series_id'].isin(series_sel)]\n \n \n \n series = series[['source_id', 'series_id', 'series_name', 'series_definition']]\n \n return series\n\n\ndef get_esdb_by_dict(dictionary, country_list, year_sel=(1990, 2019), silent=False):\n '''this function takes a dictionary where the keys are the the source_ids and the values \n are lists of series_ids associated with the source_ids. \n '''\n # # empty dataframes to be filled \n data = pd.DataFrame()\n series_info = pd.DataFrame()\n \n # loop through and access \n for i in dictionary: \n \n print('Source: '+ str(i))\n\n # turn i into str, add 0 if len(2) \n i = str(i)\n length_i = len(i)\n if length_i==1: \n i = '00'+i\n elif length_i ==2: \n i = '0'+i\n \n # for each source, get data\n data_temp, series_info_temp = get_esdb(i, series_sel=dictionary[int(i)], \n country_sel=country_list, year_sel=(year_sel[0], year_sel[1]))\n\n # append data to main dataset \n data = data.append(data_temp)\n series_info = series_info.append(series_info_temp)\n \n return data, series_info\n \n \n \ndef get_data_from_serieslist(file, country_list, year_sel=(2000, 2017), sheet_name='Data', silence=False): \n \n '''preps a list of series_ids from an excel and a list of countries of interest to use the get_esdb by dict. '''\n \n # list of series\n series_df = pd.read_excel(file, sheet_name=sheet_name)\n series_list = list(series_df.series_id.dropna().astype('int').unique())\n \n # find source_ids for the source \n sources = find_source(series_list)\n \n # generate a dictionary of lists of serires_ids by located by source_id keys\n ind_bysource = { i : sources[sources['source_id']==i]['series_id'].tolist() for i in sources['source_id'].unique()}\n \n # get data using this ind_bysource \n data, series_info = get_esdb_by_dict(dictionary=ind_bysource, country_list=country_list, year_sel=year_sel)\n \n return data, series_info \n\n\n\n\ndef get_ids(data, key_col, value_col): \n \n '''this function takes a dataset and two columns and returns \n a dictionary with the key_col as the keys (unique) and their \n associated unique value_col'''\n \n # unqiue and consise dataset\n data = data[[key_col, value_col]].drop_duplicates()\n \n data = {i[0]: i[1] for i in zip(data[key_col], data[value_col])}\n \n return data\n\n\n\ndef most_recent_data(data, on=['country_id', 'series_id'], date_var='year', ascending=[True, True, False]):\n \n '''takes a dataset and returns most recent, based on the on selection'''\n \n sort_vars = on+[date_var]\n # sort by country_id, descending series_id\n data = data.sort_values(sort_vars, ascending=ascending)\n \n\n # drop duplicates based on the 'on' option \n data = data.drop_duplicates(on)\n \n return data"
] |
[
[
"pandas.merge",
"pandas.read_excel",
"pandas.read_sas",
"pandas.DataFrame"
]
] |
davidmkwon/ray
|
[
"9053be0e639cf35a1b113ca8a4fc378d209ecb75"
] |
[
"rllib/evaluation/tests/test_rollout_worker.py"
] |
[
"from collections import Counter\nimport gym\nfrom gym.spaces import Box, Discrete\nimport numpy as np\nimport os\nimport random\nimport time\nimport unittest\n\nimport ray\nfrom ray.rllib.agents.pg import PGTrainer\nfrom ray.rllib.agents.a3c import A2CTrainer\nfrom ray.rllib.env.vector_env import VectorEnv\nfrom ray.rllib.evaluation.rollout_worker import RolloutWorker\nfrom ray.rllib.evaluation.metrics import collect_metrics\nfrom ray.rllib.evaluation.postprocessing import compute_advantages\nfrom ray.rllib.examples.env.mock_env import MockEnv, MockEnv2\nfrom ray.rllib.examples.env.multi_agent import MultiAgentCartPole\nfrom ray.rllib.examples.policy.random_policy import RandomPolicy\nfrom ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, \\\n STEPS_TRAINED_COUNTER\nfrom ray.rllib.policy.policy import Policy\nfrom ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID, MultiAgentBatch, \\\n SampleBatch\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.test_utils import check, framework_iterator\nfrom ray.tune.registry import register_env\n\n\nclass MockPolicy(RandomPolicy):\n @override(RandomPolicy)\n def compute_actions(self,\n obs_batch,\n state_batches=None,\n prev_action_batch=None,\n prev_reward_batch=None,\n episodes=None,\n explore=None,\n timestep=None,\n **kwargs):\n return np.array([random.choice([0, 1])] * len(obs_batch)), [], {}\n\n @override(Policy)\n def postprocess_trajectory(self,\n batch,\n other_agent_batches=None,\n episode=None):\n assert episode is not None\n super().postprocess_trajectory(batch, other_agent_batches, episode)\n return compute_advantages(\n batch, 100.0, 0.9, use_gae=False, use_critic=False)\n\n\nclass BadPolicy(RandomPolicy):\n @override(RandomPolicy)\n def compute_actions(self,\n obs_batch,\n state_batches=None,\n prev_action_batch=None,\n prev_reward_batch=None,\n episodes=None,\n explore=None,\n timestep=None,\n **kwargs):\n raise Exception(\"intentional error\")\n\n\nclass FailOnStepEnv(gym.Env):\n def __init__(self):\n self.observation_space = gym.spaces.Discrete(1)\n self.action_space = gym.spaces.Discrete(2)\n\n def reset(self):\n raise ValueError(\"kaboom\")\n\n def step(self, action):\n raise ValueError(\"kaboom\")\n\n\nclass MockVectorEnv(VectorEnv):\n def __init__(self, episode_length, num_envs):\n super().__init__(\n observation_space=gym.spaces.Discrete(1),\n action_space=gym.spaces.Discrete(2),\n num_envs=num_envs)\n self.envs = [MockEnv(episode_length) for _ in range(num_envs)]\n\n @override(VectorEnv)\n def vector_reset(self):\n return [e.reset() for e in self.envs]\n\n @override(VectorEnv)\n def reset_at(self, index):\n return self.envs[index].reset()\n\n @override(VectorEnv)\n def vector_step(self, actions):\n obs_batch, rew_batch, done_batch, info_batch = [], [], [], []\n for i in range(len(self.envs)):\n obs, rew, done, info = self.envs[i].step(actions[i])\n obs_batch.append(obs)\n rew_batch.append(rew)\n done_batch.append(done)\n info_batch.append(info)\n return obs_batch, rew_batch, done_batch, info_batch\n\n @override(VectorEnv)\n def get_unwrapped(self):\n return self.envs\n\n\nclass TestRolloutWorker(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n ray.init(num_cpus=5)\n\n @classmethod\n def tearDownClass(cls):\n ray.shutdown()\n\n def test_basic(self):\n ev = RolloutWorker(\n env_creator=lambda _: gym.make(\"CartPole-v0\"),\n policy_spec=MockPolicy)\n batch = ev.sample()\n for key in [\n \"obs\", \"actions\", \"rewards\", \"dones\", \"advantages\",\n \"prev_rewards\", \"prev_actions\"\n ]:\n self.assertIn(key, batch)\n self.assertGreater(np.abs(np.mean(batch[key])), 0)\n\n def to_prev(vec):\n out = np.zeros_like(vec)\n for i, v in enumerate(vec):\n if i + 1 < len(out) and not batch[\"dones\"][i]:\n out[i + 1] = v\n return out.tolist()\n\n self.assertEqual(batch[\"prev_rewards\"].tolist(),\n to_prev(batch[\"rewards\"]))\n self.assertEqual(batch[\"prev_actions\"].tolist(),\n to_prev(batch[\"actions\"]))\n self.assertGreater(batch[\"advantages\"][0], 1)\n ev.stop()\n\n def test_batch_ids(self):\n ev = RolloutWorker(\n env_creator=lambda _: gym.make(\"CartPole-v0\"),\n policy_spec=MockPolicy,\n rollout_fragment_length=1)\n batch1 = ev.sample()\n batch2 = ev.sample()\n self.assertEqual(len(set(batch1[\"unroll_id\"])), 1)\n self.assertEqual(len(set(batch2[\"unroll_id\"])), 1)\n self.assertEqual(\n len(set(SampleBatch.concat(batch1, batch2)[\"unroll_id\"])), 2)\n ev.stop()\n\n def test_global_vars_update(self):\n # Allow for Unittest run.\n ray.init(num_cpus=5, ignore_reinit_error=True)\n for fw in framework_iterator(frameworks=(\"tf2\", \"tf\")):\n agent = A2CTrainer(\n env=\"CartPole-v0\",\n config={\n \"num_workers\": 1,\n # lr = 0.1 - [(0.1 - 0.000001) / 100000] * ts\n \"lr_schedule\": [[0, 0.1], [100000, 0.000001]],\n \"framework\": fw,\n })\n policy = agent.get_policy()\n for i in range(3):\n result = agent.train()\n print(\"{}={}\".format(STEPS_TRAINED_COUNTER,\n result[\"info\"][STEPS_TRAINED_COUNTER]))\n print(\"{}={}\".format(STEPS_SAMPLED_COUNTER,\n result[\"info\"][STEPS_SAMPLED_COUNTER]))\n global_timesteps = policy.global_timestep\n print(\"global_timesteps={}\".format(global_timesteps))\n expected_lr = \\\n 0.1 - ((0.1 - 0.000001) / 100000) * global_timesteps\n lr = policy.cur_lr\n if fw == \"tf\":\n lr = policy._sess.run(lr)\n check(lr, expected_lr, rtol=0.05)\n agent.stop()\n\n def test_no_step_on_init(self):\n register_env(\"fail\", lambda _: FailOnStepEnv())\n for fw in framework_iterator():\n pg = PGTrainer(\n env=\"fail\", config={\n \"num_workers\": 1,\n \"framework\": fw,\n })\n self.assertRaises(Exception, lambda: pg.train())\n pg.stop()\n\n def test_callbacks(self):\n for fw in framework_iterator(frameworks=(\"torch\", \"tf\")):\n counts = Counter()\n pg = PGTrainer(\n env=\"CartPole-v0\", config={\n \"num_workers\": 0,\n \"rollout_fragment_length\": 50,\n \"train_batch_size\": 50,\n \"callbacks\": {\n \"on_episode_start\":\n lambda x: counts.update({\"start\": 1}),\n \"on_episode_step\":\n lambda x: counts.update({\"step\": 1}),\n \"on_episode_end\": lambda x: counts.update({\"end\": 1}),\n \"on_sample_end\":\n lambda x: counts.update({\"sample\": 1}),\n },\n \"framework\": fw,\n })\n pg.train()\n pg.train()\n self.assertGreater(counts[\"sample\"], 0)\n self.assertGreater(counts[\"start\"], 0)\n self.assertGreater(counts[\"end\"], 0)\n self.assertGreater(counts[\"step\"], 0)\n pg.stop()\n\n def test_query_evaluators(self):\n register_env(\"test\", lambda _: gym.make(\"CartPole-v0\"))\n for fw in framework_iterator(frameworks=(\"torch\", \"tf\")):\n pg = PGTrainer(\n env=\"test\",\n config={\n \"num_workers\": 2,\n \"rollout_fragment_length\": 5,\n \"num_envs_per_worker\": 2,\n \"framework\": fw,\n \"create_env_on_driver\": True,\n })\n results = pg.workers.foreach_worker(\n lambda ev: ev.rollout_fragment_length)\n results2 = pg.workers.foreach_worker_with_index(\n lambda ev, i: (i, ev.rollout_fragment_length))\n results3 = pg.workers.foreach_worker(\n lambda ev: ev.foreach_env(lambda env: 1))\n self.assertEqual(results, [10, 10, 10])\n self.assertEqual(results2, [(0, 10), (1, 10), (2, 10)])\n self.assertEqual(results3, [[1, 1], [1, 1], [1, 1]])\n pg.stop()\n\n def test_action_clipping(self):\n from ray.rllib.examples.env.random_env import RandomEnv\n action_space = gym.spaces.Box(-2.0, 1.0, (3, ))\n\n # Clipping: True (clip between Policy's action_space.low/high),\n ev = RolloutWorker(\n env_creator=lambda _: RandomEnv(config=dict(\n action_space=action_space,\n max_episode_len=10,\n p_done=0.0,\n check_action_bounds=True,\n )),\n policy_spec=RandomPolicy,\n policy_config=dict(\n action_space=action_space,\n ignore_action_bounds=True,\n ),\n clip_actions=True,\n batch_mode=\"complete_episodes\")\n sample = ev.sample()\n # Check, whether the action bounds have been breached (expected).\n # We still arrived here b/c we clipped according to the Env's action\n # space.\n self.assertGreater(np.max(sample[\"actions\"]), action_space.high[0])\n self.assertLess(np.min(sample[\"actions\"]), action_space.low[0])\n ev.stop()\n\n # Clipping: False and RandomPolicy produces invalid actions.\n # Expect Env to complain.\n ev2 = RolloutWorker(\n env_creator=lambda _: RandomEnv(config=dict(\n action_space=action_space,\n max_episode_len=10,\n p_done=0.0,\n check_action_bounds=True,\n )),\n policy_spec=RandomPolicy,\n policy_config=dict(\n action_space=action_space,\n ignore_action_bounds=True,\n ),\n clip_actions=False, # <- should lead to Env complaining\n batch_mode=\"complete_episodes\")\n self.assertRaisesRegex(ValueError, r\"Illegal action\", ev2.sample)\n ev2.stop()\n\n # Clipping: False and RandomPolicy produces valid (bounded) actions.\n # Expect \"actions\" in SampleBatch to be unclipped.\n ev3 = RolloutWorker(\n env_creator=lambda _: RandomEnv(config=dict(\n action_space=action_space,\n max_episode_len=10,\n p_done=0.0,\n check_action_bounds=True,\n )),\n policy_spec=RandomPolicy,\n policy_config=dict(action_space=action_space),\n # Should not be a problem as RandomPolicy abides to bounds.\n clip_actions=False,\n batch_mode=\"complete_episodes\")\n sample = ev3.sample()\n self.assertGreater(np.min(sample[\"actions\"]), action_space.low[0])\n self.assertLess(np.max(sample[\"actions\"]), action_space.high[0])\n ev3.stop()\n\n def test_reward_clipping(self):\n # Clipping: True (clip between -1.0 and 1.0).\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv2(episode_length=10),\n policy_spec=MockPolicy,\n clip_rewards=True,\n batch_mode=\"complete_episodes\")\n self.assertEqual(max(ev.sample()[\"rewards\"]), 1)\n result = collect_metrics(ev, [])\n self.assertEqual(result[\"episode_reward_mean\"], 1000)\n ev.stop()\n\n from ray.rllib.examples.env.random_env import RandomEnv\n\n # Clipping in certain range (-2.0, 2.0).\n ev2 = RolloutWorker(\n env_creator=lambda _: RandomEnv(\n dict(\n reward_space=gym.spaces.Box(low=-10, high=10, shape=()),\n p_done=0.0,\n max_episode_len=10,\n )),\n policy_spec=MockPolicy,\n clip_rewards=2.0,\n batch_mode=\"complete_episodes\")\n sample = ev2.sample()\n self.assertEqual(max(sample[\"rewards\"]), 2.0)\n self.assertEqual(min(sample[\"rewards\"]), -2.0)\n self.assertLess(np.mean(sample[\"rewards\"]), 0.5)\n self.assertGreater(np.mean(sample[\"rewards\"]), -0.5)\n ev2.stop()\n\n # Clipping: Off.\n ev2 = RolloutWorker(\n env_creator=lambda _: MockEnv2(episode_length=10),\n policy_spec=MockPolicy,\n clip_rewards=False,\n batch_mode=\"complete_episodes\")\n self.assertEqual(max(ev2.sample()[\"rewards\"]), 100)\n result2 = collect_metrics(ev2, [])\n self.assertEqual(result2[\"episode_reward_mean\"], 1000)\n ev2.stop()\n\n def test_hard_horizon(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv2(episode_length=10),\n policy_spec=MockPolicy,\n batch_mode=\"complete_episodes\",\n rollout_fragment_length=10,\n episode_horizon=4,\n soft_horizon=False)\n samples = ev.sample()\n # Three logical episodes and correct episode resets (always after 4\n # steps).\n self.assertEqual(len(set(samples[\"eps_id\"])), 3)\n for i in range(4):\n self.assertEqual(np.argmax(samples[\"obs\"][i]), i)\n self.assertEqual(np.argmax(samples[\"obs\"][4]), 0)\n # 3 done values.\n self.assertEqual(sum(samples[\"dones\"]), 3)\n ev.stop()\n\n # A gym env's max_episode_steps is smaller than Trainer's horizon.\n ev = RolloutWorker(\n env_creator=lambda _: gym.make(\"CartPole-v0\"),\n policy_spec=MockPolicy,\n batch_mode=\"complete_episodes\",\n rollout_fragment_length=10,\n episode_horizon=6,\n soft_horizon=False)\n samples = ev.sample()\n # 12 steps due to `complete_episodes` batch_mode.\n self.assertEqual(len(samples[\"eps_id\"]), 12)\n # Two logical episodes and correct episode resets (always after 6(!)\n # steps).\n self.assertEqual(len(set(samples[\"eps_id\"])), 2)\n # 2 done values after 6 and 12 steps.\n check(samples[\"dones\"], [\n False, False, False, False, False, True, False, False, False,\n False, False, True\n ])\n ev.stop()\n\n def test_soft_horizon(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv(episode_length=10),\n policy_spec=MockPolicy,\n batch_mode=\"complete_episodes\",\n rollout_fragment_length=10,\n episode_horizon=4,\n soft_horizon=True)\n samples = ev.sample()\n # three logical episodes\n self.assertEqual(len(set(samples[\"eps_id\"])), 3)\n # only 1 hard done value\n self.assertEqual(sum(samples[\"dones\"]), 1)\n ev.stop()\n\n def test_metrics(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv(episode_length=10),\n policy_spec=MockPolicy,\n batch_mode=\"complete_episodes\")\n remote_ev = RolloutWorker.as_remote().remote(\n env_creator=lambda _: MockEnv(episode_length=10),\n policy_spec=MockPolicy,\n batch_mode=\"complete_episodes\")\n ev.sample()\n ray.get(remote_ev.sample.remote())\n result = collect_metrics(ev, [remote_ev])\n self.assertEqual(result[\"episodes_this_iter\"], 20)\n self.assertEqual(result[\"episode_reward_mean\"], 10)\n ev.stop()\n\n def test_async(self):\n ev = RolloutWorker(\n env_creator=lambda _: gym.make(\"CartPole-v0\"),\n sample_async=True,\n policy_spec=MockPolicy)\n batch = ev.sample()\n for key in [\"obs\", \"actions\", \"rewards\", \"dones\", \"advantages\"]:\n self.assertIn(key, batch)\n self.assertGreater(batch[\"advantages\"][0], 1)\n ev.stop()\n\n def test_auto_vectorization(self):\n ev = RolloutWorker(\n env_creator=lambda cfg: MockEnv(episode_length=20, config=cfg),\n policy_spec=MockPolicy,\n batch_mode=\"truncate_episodes\",\n rollout_fragment_length=2,\n num_envs=8)\n for _ in range(8):\n batch = ev.sample()\n self.assertEqual(batch.count, 16)\n result = collect_metrics(ev, [])\n self.assertEqual(result[\"episodes_this_iter\"], 0)\n for _ in range(8):\n batch = ev.sample()\n self.assertEqual(batch.count, 16)\n result = collect_metrics(ev, [])\n self.assertEqual(result[\"episodes_this_iter\"], 8)\n indices = []\n for env in ev.async_env.vector_env.envs:\n self.assertEqual(env.unwrapped.config.worker_index, 0)\n indices.append(env.unwrapped.config.vector_index)\n self.assertEqual(indices, [0, 1, 2, 3, 4, 5, 6, 7])\n ev.stop()\n\n def test_batches_larger_when_vectorized(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv(episode_length=8),\n policy_spec=MockPolicy,\n batch_mode=\"truncate_episodes\",\n rollout_fragment_length=4,\n num_envs=4)\n batch = ev.sample()\n self.assertEqual(batch.count, 16)\n result = collect_metrics(ev, [])\n self.assertEqual(result[\"episodes_this_iter\"], 0)\n batch = ev.sample()\n result = collect_metrics(ev, [])\n self.assertEqual(result[\"episodes_this_iter\"], 4)\n ev.stop()\n\n def test_vector_env_support(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockVectorEnv(episode_length=20, num_envs=8),\n policy_spec=MockPolicy,\n batch_mode=\"truncate_episodes\",\n rollout_fragment_length=10)\n for _ in range(8):\n batch = ev.sample()\n self.assertEqual(batch.count, 10)\n result = collect_metrics(ev, [])\n self.assertEqual(result[\"episodes_this_iter\"], 0)\n for _ in range(8):\n batch = ev.sample()\n self.assertEqual(batch.count, 10)\n result = collect_metrics(ev, [])\n self.assertEqual(result[\"episodes_this_iter\"], 8)\n ev.stop()\n\n def test_truncate_episodes(self):\n ev_env_steps = RolloutWorker(\n env_creator=lambda _: MockEnv(10),\n policy_spec=MockPolicy,\n policy_config={\"_use_trajectory_view_api\": True},\n rollout_fragment_length=15,\n batch_mode=\"truncate_episodes\")\n batch = ev_env_steps.sample()\n self.assertEqual(batch.count, 15)\n self.assertTrue(isinstance(batch, SampleBatch))\n ev_env_steps.stop()\n\n action_space = Discrete(2)\n obs_space = Box(float(\"-inf\"), float(\"inf\"), (4, ), dtype=np.float32)\n ev_agent_steps = RolloutWorker(\n env_creator=lambda _: MultiAgentCartPole({\"num_agents\": 4}),\n policy_spec={\n \"pol0\": (MockPolicy, obs_space, action_space, {}),\n \"pol1\": (MockPolicy, obs_space, action_space, {}),\n },\n policy_config={\"_use_trajectory_view_api\": True},\n policy_mapping_fn=lambda ag: \"pol0\" if ag == 0 else \"pol1\",\n rollout_fragment_length=301,\n count_steps_by=\"env_steps\",\n batch_mode=\"truncate_episodes\",\n )\n batch = ev_agent_steps.sample()\n self.assertTrue(isinstance(batch, MultiAgentBatch))\n self.assertGreater(batch.agent_steps(), 301)\n self.assertEqual(batch.env_steps(), 301)\n ev_agent_steps.stop()\n\n ev_agent_steps = RolloutWorker(\n env_creator=lambda _: MultiAgentCartPole({\"num_agents\": 4}),\n policy_spec={\n \"pol0\": (MockPolicy, obs_space, action_space, {}),\n \"pol1\": (MockPolicy, obs_space, action_space, {}),\n },\n policy_config={\"_use_trajectory_view_api\": True},\n policy_mapping_fn=lambda ag: \"pol0\" if ag == 0 else \"pol1\",\n rollout_fragment_length=301,\n count_steps_by=\"agent_steps\",\n batch_mode=\"truncate_episodes\")\n batch = ev_agent_steps.sample()\n self.assertTrue(isinstance(batch, MultiAgentBatch))\n self.assertLess(batch.env_steps(), 301)\n # When counting agent steps, the count may be slightly larger than\n # rollout_fragment_length, b/c we have up to N agents stepping in each\n # env step and we only check, whether we should build after each env\n # step.\n self.assertGreaterEqual(batch.agent_steps(), 301)\n ev_agent_steps.stop()\n\n def test_complete_episodes(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv(10),\n policy_spec=MockPolicy,\n rollout_fragment_length=5,\n batch_mode=\"complete_episodes\")\n batch = ev.sample()\n self.assertEqual(batch.count, 10)\n ev.stop()\n\n def test_complete_episodes_packing(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv(10),\n policy_spec=MockPolicy,\n rollout_fragment_length=15,\n batch_mode=\"complete_episodes\")\n batch = ev.sample()\n self.assertEqual(batch.count, 20)\n self.assertEqual(\n batch[\"t\"].tolist(),\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n ev.stop()\n\n def test_filter_sync(self):\n ev = RolloutWorker(\n env_creator=lambda _: gym.make(\"CartPole-v0\"),\n policy_spec=MockPolicy,\n sample_async=True,\n observation_filter=\"ConcurrentMeanStdFilter\")\n time.sleep(2)\n ev.sample()\n filters = ev.get_filters(flush_after=True)\n obs_f = filters[DEFAULT_POLICY_ID]\n self.assertNotEqual(obs_f.rs.n, 0)\n self.assertNotEqual(obs_f.buffer.n, 0)\n ev.stop()\n\n def test_get_filters(self):\n ev = RolloutWorker(\n env_creator=lambda _: gym.make(\"CartPole-v0\"),\n policy_spec=MockPolicy,\n sample_async=True,\n observation_filter=\"ConcurrentMeanStdFilter\")\n self.sample_and_flush(ev)\n filters = ev.get_filters(flush_after=False)\n time.sleep(2)\n filters2 = ev.get_filters(flush_after=False)\n obs_f = filters[DEFAULT_POLICY_ID]\n obs_f2 = filters2[DEFAULT_POLICY_ID]\n self.assertGreaterEqual(obs_f2.rs.n, obs_f.rs.n)\n self.assertGreaterEqual(obs_f2.buffer.n, obs_f.buffer.n)\n ev.stop()\n\n def test_sync_filter(self):\n ev = RolloutWorker(\n env_creator=lambda _: gym.make(\"CartPole-v0\"),\n policy_spec=MockPolicy,\n sample_async=True,\n observation_filter=\"ConcurrentMeanStdFilter\")\n obs_f = self.sample_and_flush(ev)\n\n # Current State\n filters = ev.get_filters(flush_after=False)\n obs_f = filters[DEFAULT_POLICY_ID]\n\n self.assertLessEqual(obs_f.buffer.n, 20)\n\n new_obsf = obs_f.copy()\n new_obsf.rs._n = 100\n ev.sync_filters({DEFAULT_POLICY_ID: new_obsf})\n filters = ev.get_filters(flush_after=False)\n obs_f = filters[DEFAULT_POLICY_ID]\n self.assertGreaterEqual(obs_f.rs.n, 100)\n self.assertLessEqual(obs_f.buffer.n, 20)\n ev.stop()\n\n def test_extra_python_envs(self):\n extra_envs = {\"env_key_1\": \"env_value_1\", \"env_key_2\": \"env_value_2\"}\n self.assertFalse(\"env_key_1\" in os.environ)\n self.assertFalse(\"env_key_2\" in os.environ)\n ev = RolloutWorker(\n env_creator=lambda _: MockEnv(10),\n policy_spec=MockPolicy,\n extra_python_environs=extra_envs)\n self.assertTrue(\"env_key_1\" in os.environ)\n self.assertTrue(\"env_key_2\" in os.environ)\n ev.stop()\n\n # reset to original\n del os.environ[\"env_key_1\"]\n del os.environ[\"env_key_2\"]\n\n def test_no_env_seed(self):\n ev = RolloutWorker(\n env_creator=lambda _: MockVectorEnv(episode_length=20, num_envs=8),\n policy_spec=MockPolicy,\n seed=1)\n assert not hasattr(ev.env, \"seed\")\n ev.stop()\n\n def sample_and_flush(self, ev):\n time.sleep(2)\n ev.sample()\n filters = ev.get_filters(flush_after=True)\n obs_f = filters[DEFAULT_POLICY_ID]\n self.assertNotEqual(obs_f.rs.n, 0)\n self.assertNotEqual(obs_f.buffer.n, 0)\n return obs_f\n\n\nif __name__ == \"__main__\":\n import pytest\n import sys\n sys.exit(pytest.main([\"-v\", __file__]))\n"
] |
[
[
"numpy.min",
"numpy.max",
"numpy.argmax",
"numpy.zeros_like",
"numpy.mean"
]
] |
qiaone/GIF
|
[
"2c551e844748c72395fc91fb080c7a2f9c8d5285"
] |
[
"tests/compute_light_stastics.py"
] |
[
"import numpy as np\n\n\nlight_code = []\nfl_param_dict = np.load('/is/cluster/work/pghosh/gif1.0/DECA_inferred/deca_flame_params_camera_corrected.npy',\n allow_pickle=True).item()\nfor i, key in enumerate(fl_param_dict):\n flame_param = fl_param_dict[key]\n light_code.append(flame_param['lit'].flatten())\n\nlight_code = np.array(light_code)\nmean = np.mean(light_code, axis=0)\nstd = np.std(light_code, axis=0)\n\nprint(f'mean : {mean} \\n std: {std}')\nprint(f'most variation in cmp {np.argmax(std)}')\n"
] |
[
[
"numpy.std",
"numpy.argmax",
"numpy.mean",
"numpy.load",
"numpy.array"
]
] |
Kubasinska/Roots
|
[
"fc8915d6d043b527c622e3c78d3c658ffdd23cab"
] |
[
"tests/generate_dynamic_source_arbor.py"
] |
[
"from roots.core import Roots,Roots_math\nfrom roots.kmd import KMDD\nfrom roots.graphmethods import GraphMethods\nfrom roots.swcToolkit import swcToolkit\nfrom roots.root2neuron import Root2Hoc,Root2Py\nfrom roots.microstructures import Microstructures\nimport os\nfrom roots.visualization import swcVisualizer\nimport numpy as np\nimport random\nimport os\n\n#create dictionary to scale diameters of sections corresponding to microstructures\nlabel_scalars={}\n\n#scale\nlabel_scalars['node'] = 1\nlabel_scalars['internode'] = 1/0.65\nlabel_scalars['paranode1'] = 1/0.65\nlabel_scalars['paranode2'] = 1/0.65\nlabel_scalars['interbouton'] = 1\nlabel_scalars['bouton'] = 7\n\ndef return_random_params():\n\t#instantiate Roots core parameters\n\ta = random.choice(np.arange(1,3.0,0.25))\n\tb = random.choice(np.arange(300,475,25))\n\tc = random.choice(np.arange(1.5,3.0,0.25))\n\td = random.choice(np.arange(300,425,25))\n\treturn(a,b,c,d)\n\ndef make_axon(outputdir):\n\t#use Roots to grow axon\n\ta,b,c,d = return_random_params()\n\tsource_point,targets = np.random.rand(300,3)*100.0\n\troot = Roots(source_point, targets, np.pi/a, b, np.pi/c, d, 100, KMDDproperties=dict(zip(['cluster_reduce','tri_edge_length_max','source','open_points'],[0.25,300,source_inner,new_points[:source_index]+new_points[source_index+1:]])))\n\tgraph_nodes = root.grow()\n\tswcname = outputdir+axe_type+'_'+str(a)+'_'+str(b)+'_'+str(c)+'_'+str(d)+'.swc'\n\troot.to_swc(swcname) #save axon to swc\n\treturn(root)\n\ndef add_myelin_boutons(arbor,bouton_branch_list,myelin_branch_list,diam_scalars):\n\t#add myelin and boutons to existing arbor (by interbifurcated region)\n\tmstruct = Microstructures()\n\tarbor,labels = mstruct.add_microstructures_to_arbor(morph,[key for key in morph.keys() if key in mlist],[key for key in morph.keys() if key not in mlist])\n\tarbor = mstruct.apply_microstructures_diameter_scalars(arbor,labels,diam_scalars,replace=False)\n\treturn(arbor,labels)\n\ndef mplot_arbor(arbor):\n\t#plot sectioned arbor in mayavi\n\tvisualizer = swcVisualizer()\n\tvisualizer.mplot_sectioned_arbor(arbor)\n\ndef shift_rotate_arbor(arbor2,shiftxyz,elevation,azimuth):\n\t#shift and rotate sectioned arbor in space\n\tarbor2 = swctool.move_morphology(arbor2,[762.2999880000001, 2337.4, -63.971000000000004])\n\tarbor2 = swctool.rotate_morphology(arbor2,[0,0,0],elevation=90.0)\n\treturn(arbor2)\n\nif __name__ == \"__main__\":\n\troot = make_axon(os.getcwd()+'/')\n\troot,labels = add_myelin_boutons(arbor,list(arbor.keys()),[],label_scalars)\n\troot = shift_rotate_arbor(arbor,[0,0,0],0,0)\n\tmplot_arbor(arbor)\n\tnrn_writer = Root2Py()\n\tnrn_writer.arbor_to_hoc(arbor,labels)\n"
] |
[
[
"numpy.arange",
"numpy.random.rand"
]
] |
h3dema/deepwifi
|
[
"2df0f5b4de7c9cb4c1d26e5629fa689e2a6036e4"
] |
[
"DQL/ddql.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n This module implements Double Deep QL\n\n\"\"\"\nimport numpy as np\nimport logging\n\nfrom DQL.dql import DQL\n\n\n\"\"\"LOG variable for the DDQL module\"\"\"\nLOG = logging.getLogger('DDQL')\nlogging.basicConfig(level=logging.INFO)\n\n\nclass DDQL(DQL):\n\n \"\"\"\n ref.\n \"\"\"\n\n def get_q_max(self, sprime):\n \"\"\" the Q_max is calculated using the target network\n @param sprime: the sequence of next states (s')\n\n @return: the Qmax value used in the TD-error. to avoid overestimation used Q-function to predict the action a' , and\n uses this value to obtain value of the Q(s', a') using the target network\n\n Q_max = Q_target(s', arg max Q(s', a'))\n a'\n \"\"\"\n actions = np.argmax(self.model.predict(sprime), axis=1) # uses Q-network to find the action\n\n # obtain the Q-max for each device\n q_target = self.target.predict(sprime)\n q_max = []\n for i, action in zip(range(len(actions)), actions):\n q_max.append(q_target[i][action])\n\n self.log.debug(\"action (from Q-network):{}\".format(actions))\n self.log.debug(\"s': {} Q max (from Q-target): {}\".format(sprime, q_max))\n return np.array(q_max)\n"
] |
[
[
"numpy.array"
]
] |
Akababa/torch2trt
|
[
"03063b74a7eb40f5aac88d49be6b8b5e4e4e92d7"
] |
[
"torch2trt/conversion_utils.py"
] |
[
"import numpy as np\n\nimport tensorrt as trt\nimport torch\nimport functools\n\nget_output_old = trt.ILayer.get_output\n\n\n# Wrap tensorrt.ILayer.get_output for better error checking and debugging\n@functools.wraps(get_output_old)\ndef __get_output_v2(*args, **kwargs):\n # allargs = tuple(args) + tuple(kwargs.values())\n # print(allargs)\n output = get_output_old(*args, **kwargs)\n print(f\"+ \\\"{output.name}\\\" with shape {output.shape}, dtype {output.dtype}\")\n assert output.shape.__len__() >= 0, \"Invalid ILayer inputs\"\n return output\n\n\ndef wrap_get_output():\n trt.ILayer.get_output = __get_output_v2\n # print(f\"Wrapped {get_output_old} - This should only be called once\")\n\n\ndef unwrap_get_output():\n trt.ILayer.get_output = get_output_old\n\n\ndef torch_dtype_to_trt(dtype):\n assert isinstance(dtype, torch.dtype)\n if dtype == torch.bool:\n return trt.bool\n elif dtype == torch.int8:\n return trt.int8\n elif dtype == torch.int32:\n return trt.int32\n elif dtype == torch.float16:\n return trt.float16\n elif dtype == torch.float32:\n return trt.float32\n else:\n raise TypeError('%s is not supported by tensorrt' % dtype)\n\n\ndef torch_dtype_from_trt(dtype):\n assert isinstance(dtype, trt.DataType)\n if dtype == trt.bool:\n return torch.bool\n elif dtype == trt.int8:\n return torch.int8\n elif dtype == trt.int32:\n return torch.int32\n elif dtype == trt.float16:\n return torch.float16\n elif dtype == trt.float32:\n return torch.float32\n else:\n raise TypeError('%s is not supported by torch' % dtype)\n\n\ndef torch_device_to_trt(device):\n assert isinstance(device, torch.device)\n if device.type == torch.device('cuda').type:\n return trt.TensorLocation.DEVICE\n elif device.type == torch.device('cpu').type:\n print(\"WARNING: on cpu, use model.to(device='cuda') before calling torch2trt\")\n return trt.TensorLocation.HOST\n else:\n return TypeError('%s is not supported by tensorrt' % device)\n\n\ndef torch_device_from_trt(device):\n assert isinstance(device, trt.TensorLocation)\n if device == trt.TensorLocation.DEVICE:\n return torch.device('cuda')\n elif device == trt.TensorLocation.HOST:\n return torch.device('cpu')\n else:\n return TypeError('%s is not supported by torch' % device)\n\n\ndef validate_shape(shape, minoptmax):\n mins, opts, maxs = np.array(minoptmax)\n shape = np.array(shape)\n assert all(mins <= opts), f\"{list(mins)} <= {list(opts)} not satisfied\"\n assert all(opts <= maxs), f\"{list(opts)} <= {list(maxs)} not satisfied\"\n assert all(mins <= shape), f\"{list(mins)} <= {list(shape)} not satisfied\"\n assert all(shape <= maxs), f\"{list(shape)} <= {list(maxs)} not satisfied\"\n"
] |
[
[
"torch.device",
"numpy.array"
]
] |
CuiRuikai/Bidirectional-Residual-Declarative-Network
|
[
"af2601c35777ab967c620b9353dcd230445efebe"
] |
[
"ddn/pytorch/sample_nodes.py"
] |
[
"# SAMPLE DEEP DECLARATIVE NODES\n#\n# Dylan Campbell <dylan.campbell@anu.edu.au>\n# Stephen Gould <stephen.gould@anu.edu.au>\n\nimport torch\nimport math\nfrom ddn.pytorch.node import *\n\nclass SquaredErrorNode(AbstractNode):\n \"\"\"Computes the squared difference between the input and a given target vector.\"\"\"\n def __init__(self, x_target):\n super().__init__()\n self.x_target = x_target\n\n def solve(self, x):\n return 0.5 * ((x - self.x_target) ** 2).sum(dim=-1), None\n\n def gradient(self, x, y=None, v=None, ctx=None):\n if v is None:\n v = x.new_ones(x.size(0)) # b\n return torch.einsum('b,bm->bm', (v, x - self.x_target)),\n\n\nclass UnconstPolynomial(AbstractDeclarativeNode):\n \"\"\"Solves min. f(x, y) = xy^4 + 2x^2y^3 - 12y^2 from Gould et al., 2016. Takes smallest x over the three\n stationary points.\"\"\"\n def __init__(self):\n super().__init__()\n \n def objective(self, x, y):\n return (x * y ** 2.0 + 2 * x ** 2.0 * y - 12) * y ** 2.0\n\n def solve(self, x):\n delta = (9.0 * x ** 4.0 + 96.0 * x).sqrt()\n y_stationary = torch.cat((torch.zeros_like(x), (-3.0 * x ** 2.0 - delta) / (4.0 * x), (-3.0 * x ** 2.0 + delta) / (4.0 * x)), dim=-1)\n y_min_indx = self.objective(x, y_stationary).argmin(dim=-1)\n y = torch.cat([torch.index_select(a, dim=0, index=i).unsqueeze(0) for a, i in zip(y_stationary, y_min_indx)])\n return y, None\n\n def gradient(self, x, y=None, v=None, ctx=None):\n \"\"\"Override base class to compute the analytic gradient of the optimal solution.\"\"\"\n x = x.detach()\n if y is None:\n y, ctx = self.solve(x)\n y = y.detach()\n if v is None:\n v = torch.ones_like(y)\n return torch.einsum('bm,bm->bm', (v, -1.0 * (y ** 3 + 3.0 * x * y ** 2) / (3.0 * x * y ** 2 + 3.0 * x ** 2 * y - 6.0))),\n\nclass GlobalPseudoHuberPool2d(AbstractDeclarativeNode):\n \"\"\"\"\"\"\n def __init__(self):\n super().__init__()\n \n def objective(self, x, alpha, y):\n alpha2 = (alpha * alpha).unsqueeze(-1).expand_as(x)\n z = y.unsqueeze(-1).unsqueeze(-1) - x\n phi = alpha2 * (torch.sqrt(1.0 + torch.pow(z, 2) / alpha2) - 1.0)\n return phi.sum(dim=(-2,-1)) # b\n\n def runOptimisation(self, x, alpha, y):\n with torch.enable_grad():\n opt = torch.optim.LBFGS([y],\n lr=1, # Default: 1\n max_iter=100, # Default: 20\n max_eval=None, # Default: None\n tolerance_grad=1e-05, # Default: 1e-05\n tolerance_change=1e-09, # Default: 1e-09\n history_size=100, # Default: 100\n line_search_fn=None # Default: None, Alternative: \"strong_wolfe\"\n )\n def reevaluate():\n opt.zero_grad()\n f = self.objective(x, alpha, y).sum() # sum over batch elements\n f.backward()\n return f\n opt.step(reevaluate)\n return y\n\n def solve(self, x, alpha):\n x = x.detach()\n y = x.mean([-2, -1]).clone().requires_grad_()\n y = self.runOptimisation(x, alpha, y)\n y = y.detach()\n z = (y.unsqueeze(-1).unsqueeze(-1) - x).clone()\n ctx = {'z': z}\n return y, ctx\n\n def gradient(self, x, alpha, y=None, v=None, ctx=None):\n \"\"\"Override base class to compute the analytic gradient of the optimal solution.\"\"\"\n if y is None:\n y, ctx = self.solve(x, alpha)\n if v is None:\n v = torch.ones_like(y)\n z = ctx['z'] # b x n1 x n2\n alpha2 = (alpha * alpha).unsqueeze(-1).expand_as(z)\n w = torch.pow(1.0 + torch.pow(z, 2) / alpha2, -1.5)\n w_sum = w.sum(dim=-1, keepdim=True).sum(dim=-2, keepdim=True).expand_as(w)\n Dy_at_x = torch.where(w_sum.abs() <= 1e-9, torch.zeros_like(w), w.div(w_sum)) # b x n1 x n2\n return torch.einsum('b,bmn->bmn', (v, Dy_at_x)), None\n\nclass LinFcnOnUnitCircle(EqConstDeclarativeNode):\n \"\"\"\n Solves the problem\n minimize f(x, y) = (1, x)^Ty\n subject to h(y) = ||y||^2 = 1\n for 1d input (x) and 2d output (y).\n \"\"\"\n def __init__(self):\n super().__init__()\n\n def objective(self, x, y):\n return y[:, 0] + y[:, 1] * x[:, 0]\n\n def equality_constraints(self, x, y):\n return torch.einsum('bm,bm->b', (y, y)) - 1.0\n\n def solve(self, x):\n x_aug = torch.cat((torch.ones_like(x), x), dim=-1) # bx2\n t = torch.sqrt(1.0 + torch.pow(x, 2.0))\n y = -1.0 * x_aug / t # bx2\n ctx = {'nu': -0.5 * t} # b\n return y, ctx\n\n def gradient(self, x, y=None, v=None, ctx=None):\n \"\"\"Override base class to compute the analytic gradient of the optimal solution.\"\"\"\n x = x.detach()\n if v is None:\n v = x.new_ones(x.size(0), 2) # bx2\n x_aug = torch.cat((x, -1.0 * torch.ones_like(x)), dim=-1) # bx2\n t = torch.pow(1.0 + torch.pow(x, 2.0), 1.5)\n return torch.einsum('bm,bmn->bn', (v, (x_aug / t).unsqueeze(-1))),\n\nclass ConstLinFcnOnParameterizedCircle(EqConstDeclarativeNode):\n \"\"\"\n Solves the problem\n minimize f(x, y) = (1, 1)^Ty\n subject to h(y) = ||y||^2 = x^2\n for 1d input (x) and 2d output (y).\n \"\"\"\n def __init__(self):\n super().__init__()\n\n def objective(self, x, y):\n return y[:, 0] + y[:, 1]\n\n def equality_constraints(self, x, y):\n return torch.einsum('bm,bm->b', (y, y)) - torch.einsum('b,b->b', (x[:, 0], x[:, 0]))\n\n def solve(self, x):\n y = -1.0 * torch.abs(x[:, 0]).unsqueeze(-1) * x.new_ones(x.size(0), 2) / math.sqrt(2.0) # bx2\n nu = torch.where(x[:, 0] == 0.0, torch.zeros_like(x[:, 0]), 0.5 / y[:, 0])\n ctx = {'nu': nu}\n return y, ctx\n\n def gradient(self, x, y=None, v=None, ctx=None):\n \"\"\"Override base class to compute the analytic gradient of the optimal solution.\"\"\"\n x = x.detach()\n if v is None:\n v = x.new_ones(x.size(0), 2) # bx2\n Dy_at_x = -1.0 * torch.sign(x[:, 0]).unsqueeze(-1) * x.new_ones(x.size(0), 2) / math.sqrt(2.0) # bx2\n return torch.einsum('bm,bmn->bn', (v, Dy_at_x.unsqueeze(-1))), #\n\nclass LinFcnOnParameterizedCircle(EqConstDeclarativeNode):\n \"\"\"\n Solves the problem\n minimize f(x, y) = (1, x_1)^Ty\n subject to h(y) = \\|y\\|^2 = x_2^2\n for 2d input (x) and 2d output (y).\n \"\"\"\n def __init__(self):\n super().__init__()\n\n def objective(self, x, y):\n return y[:, 0] + torch.einsum('b,b->b', (x[:, 0], y[:, 1]))\n\n def equality_constraints(self, x, y):\n return torch.einsum('bm,bm->b', (y, y)) - torch.einsum('b,b->b', (x[:, 1], x[:, 1]))\n\n def solve(self, x):\n y = -1.0 * torch.abs(x[:, 1]).unsqueeze(-1) * torch.cat((torch.ones_like(x[:, 0:1]), x[:, 0:1]), dim=-1) / torch.sqrt(1.0 + torch.pow(x[:, 0], 2.0)).unsqueeze(-1) # bx2\n nu = torch.where(x[:, 1] == 0.0, torch.zeros_like(x[:, 0]), 0.5 / y[:, 0])\n ctx = {'nu': nu}\n return y, ctx\n\n def gradient(self, x, y=None, v=None, ctx=None):\n \"\"\"Override base class to compute the analytic gradient of the optimal solution.\"\"\"\n x = x.detach()\n if v is None:\n v = x.new_ones(x.size(0), 2) # bx2\n a = torch.abs(x[:, 1]).unsqueeze(-1) * torch.cat((x[:, 0:1], -1.0 * torch.ones_like(x[:, 0:1])), dim=-1) / torch.pow(1.0 + torch.pow(x[:, 0], 2.0), 1.5).unsqueeze(-1) # bx2\n b = -1.0 * torch.sign(x[:, 1]).unsqueeze(-1) * torch.cat((torch.ones_like(x[:, 0:1]), x[:, 0:1]), dim=-1) / torch.sqrt(1.0 + torch.pow(x[:, 0], 2.0)).unsqueeze(-1) # bx2\n Dy_at_x = torch.stack((a, b), dim=-1)\n return torch.einsum('bm,bmn->bn', (v, Dy_at_x)),\n\nclass QuadFcnOnSphere(EqConstDeclarativeNode):\n \"\"\"\n Solves the problem\n minimize f(x, y) = 0.5 * y^Ty - x^T y\n subject to h(y) = \\|y\\|^2 = 1\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def objective(self, x, y):\n return 0.5 * torch.einsum('bm,bm->b', (y, y)) - torch.einsum('bm,bm->b', (y, x))\n\n def equality_constraints(self, x, y):\n return torch.einsum('bm,bm->b', (y, y)) - 1.0\n\n def solve(self, x):\n y = x / torch.sqrt(torch.einsum('bm,bm->b', (x, x))).unsqueeze(-1)\n return y, None\n\n def gradient(self, x, y=None, v=None, ctx=None):\n \"\"\"Override base class to compute the analytic gradient of the optimal solution.\"\"\"\n x = x.detach()\n if v is None:\n v = torch.ones_like(x) # bxm\n x_inner = torch.einsum('bm,bm->b', (x, x))\n x_outer = torch.einsum('bm,bn->bmn', (x, x))\n eye_batch = torch.eye(x.size(1), dtype=x.dtype, device=x.device).expand_as(x_outer)\n Dy_at_x = (torch.einsum('b,bmn->bmn', (x_inner, eye_batch)) - x_outer) / torch.pow(torch.einsum('bm,bm->b', (x, x)), 1.5).unsqueeze(-1).unsqueeze(-1)\n return torch.einsum('bm,bmn->bn', (v, Dy_at_x)),\n\nclass QuadFcnOnBall(IneqConstDeclarativeNode):\n \"\"\"\n Solves the (inequality constrained) problem\n minimize f(x, y) = 0.5 * y^Ty - x^T y\n subject to h(y) = \\|y\\|^2 <= 1\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def objective(self, x, y):\n return 0.5 * torch.einsum('bm,bm->b', (y, y)) - torch.einsum('bm,bm->b', (y, x))\n\n def inequality_constraints(self, x, y):\n return torch.einsum('bm,bm->b', (y, y)) - 1.0\n\n def solve(self, x):\n x_norm_sq = torch.einsum('bm,bm->b', (x, x)).unsqueeze(-1) # bx1\n y = torch.where(x_norm_sq <= 1.0, x.clone(), x / torch.sqrt(x_norm_sq))\n return y, None\n\n def gradient(self, x, y=None, v=None, ctx=None):\n \"\"\"Override base class to compute the analytic gradient of the optimal solution.\"\"\"\n x = x.detach()\n if v is None:\n v = torch.ones_like(x) # bxm\n x_inner = torch.einsum('bm,bm->b', (x, x))\n x_outer = torch.einsum('bm,bn->bmn', (x, x))\n eye_batch = torch.eye(x.size(1), dtype=x.dtype, device=x.device).expand_as(x_outer)\n Dy_at_x = torch.where(x_inner.unsqueeze(-1).unsqueeze(-1) <= 1.0, eye_batch,\n (torch.einsum('b,bmn->bmn', (x_inner, eye_batch)) - x_outer) / torch.pow(torch.einsum('bm,bm->b', (x, x)), 1.5).unsqueeze(-1).unsqueeze(-1))\n return torch.einsum('bm,bmn->bn', (v, Dy_at_x)),\n"
] |
[
[
"torch.abs",
"torch.enable_grad",
"torch.sign",
"torch.sqrt",
"torch.einsum",
"torch.zeros_like",
"torch.optim.LBFGS",
"torch.pow",
"torch.stack",
"torch.index_select",
"torch.ones_like"
]
] |
irfanimaduddin/lhires-observing-log
|
[
"74591520da6a99708ffcd8c45eadcd9416ef86c4"
] |
[
"core_script/function.py"
] |
[
"from PyQt5 import QtCore\r\nfrom PyQt5.QtWidgets import *\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport re\r\n\r\nclass pandasModel(QtCore.QAbstractTableModel):\r\n\r\n def __init__(self, data):\r\n QtCore.QAbstractTableModel.__init__(self)\r\n self._data = data\r\n\r\n def rowCount(self, parent=None):\r\n return self._data.shape[0]\r\n\r\n def columnCount(self, parnet=None):\r\n return self._data.shape[1]\r\n\r\n def data(self, index, role=QtCore.Qt.DisplayRole):\r\n if index.isValid():\r\n if role == QtCore.Qt.DisplayRole:\r\n return str(self._data.iloc[index.row(), index.column()])\r\n return None\r\n\r\n def headerData(self, col, orientation, role):\r\n if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:\r\n return self._data.columns[col]\r\n return None\r\n\r\n\r\nclass GetFunction():\r\n def gettable(self):\r\n self.model = pandasModel(self.data)\r\n self.tableview.setModel(self.model)\r\n\r\n def getstart(self):\r\n self.datatype_form.setEnabled(True)\r\n lambda: self.datatype_form.setCurrentIndex(0)\r\n self.objname_form.setEnabled(True)\r\n self.filename_form.setEnabled(True)\r\n self.bin_form.setEnabled(True)\r\n self.exptime_form.setEnabled(True)\r\n self.nexp_form.setEnabled(True)\r\n self.updatelog_button.setEnabled(True)\r\n self.savelog_button.setEnabled(True)\r\n self.resetlog_button.setEnabled(True)\r\n self.startlog_button.setEnabled(False)\r\n\r\n utctime_value = pd.Timestamp.utcnow().strftime('%H:%M:%S')\r\n groove_value = self.groove_form.currentText()\r\n micro_setting_value = np.round(self.micro_setting_form.value(), 2)\r\n feature_value = self.feature_form.text()\r\n observer_value = self.observer_form.toPlainText()\r\n ccdtemp_value = np.round(self.ccdtemp_form.value(), 2)\r\n lamp_value = self.lamp_form.currentText()\r\n datatype_value = 'Start'\r\n objname_value = ''\r\n filename_value = ''\r\n bin_value = ''\r\n exptime_value = ''\r\n nexp_value = ''\r\n ambtemp_value = np.round(self.ambtemp_form.value(), 2)\r\n ambhum_value = np.round(self.ambhum_form.value(), 2)\r\n skycond_value = self.skycond_form.currentText()\r\n comment_value = self.comment_form.text()\r\n\r\n self.general = [groove_value, micro_setting_value, feature_value, observer_value, ccdtemp_value, lamp_value]\r\n self.data_append = [utctime_value, datatype_value, objname_value, filename_value, bin_value, exptime_value, nexp_value, ambtemp_value, ambhum_value, skycond_value, comment_value]\r\n self.data_app_ser = pd.Series(self.data_append, index=self.data.columns)\r\n self.data = self.data.append(self.data_app_ser, ignore_index=True)\r\n GetFunction.gettable(self)\r\n\r\n def getupdate(self):\r\n utctime_value = pd.Timestamp.utcnow().strftime('%H:%M:%S')\r\n datatype_value = self.datatype_form.currentText()\r\n objname_value = self.objname_form.text()\r\n filename_value = self.filename_form.text()\r\n bin_value = self.bin_form.currentText()\r\n exptime_value = np.round(self.exptime_form.value(), 2)\r\n nexp_value = int(self.nexp_form.value())\r\n ambtemp_value = np.round(self.ambtemp_form.value(), 2)\r\n ambhum_value = np.round(self.ambhum_form.value(), 2)\r\n skycond_value = self.skycond_form.currentText()\r\n comment_value = self.comment_form.text()\r\n\r\n self.data_append = [utctime_value, datatype_value, objname_value, filename_value, bin_value, exptime_value, nexp_value, ambtemp_value, ambhum_value, skycond_value, comment_value]\r\n self.data_app_ser = pd.Series(self.data_append, index=self.data.columns)\r\n self.data = self.data.append(self.data_app_ser, ignore_index=True)\r\n GetFunction.gettable(self)\r\n\r\n self.comment_form.setText('')\r\n self.objname_form.setText('')\r\n self.filename_form.setText('')\r\n self.exptime_form.setValue(0.)\r\n self.nexp_form.setValue(1)\r\n\r\n def getendsave(self):\r\n utctime_value = pd.Timestamp.utcnow().strftime('%H:%M:%S')\r\n groove_value = self.groove_form.currentText()\r\n micro_setting_value = np.round(self.micro_setting_form.value(), 2)\r\n feature_value = self.feature_form.text()\r\n observer_value = self.observer_form.toPlainText()\r\n ccdtemp_value = np.round(self.ccdtemp_form.value(), 2)\r\n lamp_value = self.lamp_form.currentText()\r\n datatype_value = 'End'\r\n objname_value = ''\r\n filename_value = ''\r\n bin_value = ''\r\n exptime_value = ''\r\n nexp_value = ''\r\n ambtemp_value = np.round(self.ambtemp_form.value(), 2)\r\n ambhum_value = np.round(self.ambhum_form.value(), 2)\r\n skycond_value = self.skycond_form.currentText()\r\n comment_value = self.comment_form.text()\r\n \r\n if observer_value.find(r\"[\\t\\n\\d,']+\") == -1:\r\n res = re.split(r\"[\\t\\n\\d,']+\", observer_value)\r\n observer_value = ', '.join(res)\r\n else:\r\n observer_value = observer_value\r\n\r\n self.general = [groove_value, micro_setting_value, feature_value, observer_value, ccdtemp_value, lamp_value]\r\n self.data_append = [utctime_value, datatype_value, objname_value, filename_value, bin_value, exptime_value, nexp_value, ambtemp_value, ambhum_value, skycond_value, comment_value]\r\n self.data_app_ser = pd.Series(self.data_append, index=self.data.columns)\r\n self.data = self.data.append(self.data_app_ser, ignore_index=True)\r\n\r\n logdate = pd.Timestamp.utcnow().strftime('%Y%m%d')\r\n tomorrow = pd.Timestamp.utcnow() + pd.Timedelta(1, unit='day')\r\n tomorrow_date = tomorrow.strftime('%d')\r\n logfilename = f\"{logdate}{tomorrow_date} - Log Pengamatan\"\r\n name = QFileDialog.getSaveFileName(self, 'Save File', directory=f\"C:/Users/User/Desktop/{logfilename}\", filter='*.csv')\r\n if(name[0] == ''):\r\n pass\r\n else:\r\n GetFunction.gettable(self)\r\n\r\n datelog = pd.Timestamp.utcnow().strftime('%a, %b %d %Y')\r\n #Create a new Logbook file\r\n file = open(name[0], \"w\")\r\n file.write(\"############################################################################################################\\n\")\r\n file.write(' 10\" LHIRES TELESCOPE OBSERVATION LOG \\n')\r\n file.write(\"\\n by: Irfan Imaduddin \\n version: 2.0 \\n contact: irfanimaduddin@gmail.com\\n\")\r\n file.write(\"============================================================================================================\\n\\n\")\r\n #Save location and telescope parameters to logbook file\r\n file.write(f\"Date of Observation: {datelog} \\t\\t\\t\\t\\t\\t\\t\\t\\t\\tObserver(s): {observer_value} \\n\")\r\n file.write(\"Observation location: Rumah Teleskop GOTO, Bosscha Observatory \\n\")\r\n file.write(\"- Longitude (deg): 107.61677 E \\n- Latitude (deg): 6.82472 S \\n- Altitude: 1327 m \\n\\n\")\r\n file.write('10\" LHIRES Telescope f/9.8\\n')\r\n file.write('- OTA: Meade 10\" f/9.8 (D = 254mm, F = 2500mm)\\n')\r\n file.write(f\"- Spectrograph: LHIRES III, centered on {feature_value} (micrometer= {micro_setting_value} mm, {groove_value})\\n\")\r\n file.write(f\"- Comparison lamp: {lamp_value}\\n\")\r\n file.write(f\"- CCD: SBIG ST-402ME \\t T= {ccdtemp_value} °C\\n\")\r\n file.write(\"- Slit viewer: ZWO ASI120MM-S\\n\")\r\n file.write(\"\\n\\n\")\r\n self.data.to_csv(file, sep=' ', index = False)\r\n file.close()\r\n\r\n def getreset(self):\r\n self.groove_form.setCurrentIndex(0)\r\n self.micro_setting_form.setValue(9.)\r\n self.feature_form.setText('')\r\n self.observer_form.setText('')\r\n self.ccdtemp_form.setValue(-10.)\r\n self.ambtemp_form.setValue(20.)\r\n self.ambhum_form.setValue(50.)\r\n self.skycond_form.setCurrentIndex(0)\r\n self.comment_form.setText('')\r\n self.datatype_form.setEnabled(False)\r\n self.datatype_form.setCurrentIndex(0)\r\n self.objname_form.setEnabled(False)\r\n self.objname_form.setText('')\r\n self.filename_form.setEnabled(False)\r\n self.filename_form.setText('')\r\n self.bin_form.setEnabled(False)\r\n self.bin_form.setCurrentIndex(0)\r\n self.exptime_form.setEnabled(False)\r\n self.exptime_form.setValue(0.)\r\n self.nexp_form.setEnabled(False)\r\n self.nexp_form.setValue(1)\r\n self.updatelog_button.setEnabled(False)\r\n self.savelog_button.setEnabled(False)\r\n self.resetlog_button.setEnabled(False)\r\n self.startlog_button.setEnabled(True)\r\n\r\n self.general = pd.DataFrame(columns=['groove', 'micro', 'feature', 'observer', 'ccdtemp'])\r\n self.data = pd.DataFrame(columns=['utctime', 'datatype', 'objname', 'filename', 'bin', 'exptime', 'N', 'ambtemp', 'ambhum', 'skycond', 'comment'])\r\n self.model = pandasModel(self.data)\r\n self.tableview.setModel(self.model)\r\n\r\n\r\nclass MsgFunction():\r\n def emptyfeature():\r\n msg = QMessageBox()\r\n msg.setWindowTitle(\"Error Found!\")\r\n msg.setText(\"You haven't input the spectral feature.\")\r\n msg.setIcon(QMessageBox.Critical)\r\n msg.setStandardButtons(QMessageBox.Ok)\r\n msg.setDefaultButton(QMessageBox.Ok)\r\n x = msg.exec_()\r\n\r\n def emptyobserver():\r\n msg = QMessageBox()\r\n msg.setWindowTitle(\"Error Found!\")\r\n msg.setText(\"You haven't input observer name(s).\")\r\n msg.setIcon(QMessageBox.Information)\r\n msg.setStandardButtons(QMessageBox.Ok)\r\n msg.setDefaultButton(QMessageBox.Ok)\r\n x = msg.exec_()\r\n\r\n def emptyfilename():\r\n msg = QMessageBox()\r\n msg.setWindowTitle(\"Error Found!\")\r\n msg.setText(\"File name is empty.\")\r\n msg.setIcon(QMessageBox.Critical)\r\n msg.setStandardButtons(QMessageBox.Ok)\r\n msg.setDefaultButton(QMessageBox.Ok)\r\n x = msg.exec_()\r\n \r\n def emptyobject():\r\n msg = QMessageBox()\r\n msg.setWindowTitle(\"Error Found!\")\r\n msg.setText(\"Object name is empty.\")\r\n msg.setIcon(QMessageBox.Critical)\r\n msg.setStandardButtons(QMessageBox.Ok)\r\n msg.setDefaultButton(QMessageBox.Ok)\r\n x = msg.exec_()\r\n \r\n def notsaved():\r\n msg = QMessageBox()\r\n msg.setWindowTitle(\"Error Found!\")\r\n msg.setText(\"You haven't save the log file.\")\r\n msg.setIcon(QMessageBox.Critical)\r\n msg.setStandardButtons(QMessageBox.Ok)\r\n msg.setDefaultButton(QMessageBox.Ok)\r\n x = msg.exec_()"
] |
[
[
"pandas.Timedelta",
"pandas.Series",
"pandas.DataFrame",
"pandas.Timestamp.utcnow"
]
] |
specter119/py4cytoscape
|
[
"11f968a8ab6518354406c9ed8321f331355b54f0"
] |
[
"py4cytoscape/style_values.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\"\"\"Functions for retrieving current values for visual properties.\n\nI. General functions for getting node, edge and network properties\nII. Specific functions for getting particular node, edge and network properties\n\"\"\"\n\n\"\"\"Copyright 2020 The Cytoscape Consortium\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated \ndocumentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the \nrights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit \npersons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the \nSoftware.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO \nTHE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE \nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, \nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n# External library imports\nimport sys\nimport pandas as df\n\n# Internal module imports\nfrom . import networks\nfrom . import network_views\nfrom . import commands\n\n# Internal module convenience imports\nfrom .exceptions import CyError\nfrom .py4cytoscape_utils import *\nfrom .py4cytoscape_logger import cy_log\n\n\n# ==============================================================================\n# I. General Functions\n# ------------------------------------------------------------------------------\n\n@cy_log\ndef get_node_property(node_names=None, visual_property=None, network=None, base_url=DEFAULT_BASE_URL):\n \"\"\"Get values for any node property of the specified nodes.\n\n Args:\n nodes_names (str or list or int or None): List of nodes or None. If node list:\n ``list`` of node names or SUIDs, comma-separated string of node names or SUIDs, or scalar node name\n or SUID. Node names should be found in the ``name`` column of the ``node table``. If list is None,\n default is all nodes.\n visual_property (str): Name of a visual property. See ``get_visual_property_names``\n network (SUID or str or None): Name or SUID of a network. Default is the\n \"current\" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://127.0.0.1:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: as a collection of {node-name: prop_value} for each node in node_names parameter\n\n Raises:\n CyError: if network name, node name or property name doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_node_property(visual_property='NODE_LABEL')\n {'YIL070C': 'MAM33', 'YHR198C': 'YHR198C', ...}\n >>> get_node_property(visual_property='NODE_LABEL', node_names=['YIL070C', 'YHR198C'])\n {'YIL070C': 'MAM33', 'YHR198C': 'YHR198C'}\n >>> get_node_property(visual_property='NODE_LABEL', node_names='YIL070C, YHR198C')\n {'YIL070C': 'MAM33', 'YHR198C': 'YHR198C'}\n >>> get_node_property(visual_property='NODE_LABEL', node_names=[391173, 391172, 391175])\n {391173: 'RPL11B', 391172: 'SXM1', 391175: 'MPT1'}\n >>> get_node_property(visual_property='NODE_LABEL', node_names='391173, 391172, 391175')\n {391173: 'RPL11B', 391172: 'SXM1', 391175: 'MPT1'}\n >>> get_node_property(visual_property='NODE_LABEL', node_names='YER112W', network='galFiltered.sif')\n {'YER112W': 'LSM4'}\n >>> get_node_property(visual_property='NODE_LABEL', node_names=391173, network='galFiltered.sif')\n {391173: 'RPL11B'}\n \"\"\"\n net_suid = networks.get_network_suid(network, base_url=base_url)\n view_suid = network_views.get_network_views(net_suid, base_url=base_url)[0]\n\n if visual_property is None:\n raise CyError('Invalid visual property ... visual property must be non-null')\n\n if node_names is None:\n res = commands.cyrest_get('networks/' + str(net_suid) + '/views/' + str(view_suid) + '/nodes',\n {'visualProperty': visual_property}, base_url=base_url)\n node_suids = [node['SUID'] for node in res]\n node_names = node_suid_to_node_name(node_suids, network=network, base_url=base_url)\n node_props = {name: node['view'][0]['value'] for node, name in zip(res, node_names)}\n return node_props\n else:\n node_names = normalize_list(node_names)\n node_suids = node_name_to_node_suid(node_names, network=network, base_url=base_url, unique_list=True)\n node_props = {node_name: commands.cyrest_get(\n f'networks/{net_suid}/views/{view_suid}/nodes/{node_suid}/{visual_property}', base_url=base_url)['value']\n for node_suid, node_name in zip(node_suids, node_names)}\n return node_props\n\n\n@cy_log\ndef get_edge_property(edge_names=None, visual_property=None, network=None, base_url=DEFAULT_BASE_URL):\n \"\"\"Get values for any edge property of the specified edges.\n\n This method retrieves the actual property of the node, given the current visual style, factoring together\n any default, mapping and bypass setting.\n\n Args:\n edge_names (str or list or int or None): List of edges or None. If node list:\n ``list`` of edge names or SUIDs, comma-separated string of edge names or SUIDs, or scalar edge name\n or SUID. Edge names should be found in the ``name`` column of the ``edge table``. If list is None,\n default is all edges.\n visual_property (str): Name of a visual property. See ``get_visual_property_names``\n network (SUID or str or None): Name or SUID of a network. Default is the\n \"current\" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://127.0.0.1:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: as a collection of {edge-name: prop_value} for each edge in edge_names parameter\n\n Raises:\n CyError: if network name, edge name or property name doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_edge_property(visual_property='EDGE_LABEL')\n {'YJR022W (pp) YNL050C': 'pp', 'YKR026C (pp) YGL122C': 'pp', ...}\n >>> get_edge_property(visual_property='EDGE_LABEL', edge_names=['YCL067C (pd) YIL015W', 'YCR084C (pp) YCL067C'])\n {'YCL067C (pd) YIL015W': 'pd', 'YCR084C (pp) YCL067C': 'pp'}\n >>> get_edge_property(visual_property='EDGE_LABEL', edge_names='YCL067C (pd) YIL015W, YCR084C (pp) YCL067C')\n {'YCL067C (pd) YIL015W': 'pd', 'YCR084C (pp) YCL067C': 'pp'}\n >>> get_edge_property(visual_property='EDGE_LABEL', edge_names=[393222, 393223])\n {393222: 'pd', 393223: 'pp'}\n >>> get_edge_property(visual_property='EDGE_LABEL', edge_names='393222, 393223')\n {393222: 'pd', 393223: 'pp'}\n >>> get_edge_property(visual_property='EDGE_LABEL', edge_names=393222)\n {393222: 'pd'}\n >>> get_edge_property(visual_property='EDGE_LABEL', edge_names='YDR277C (pp) YJR022W', network='galFiltered.sif')\n {'YDR277C (pp) YJR022W': 'pp'}\n \"\"\"\n net_suid = networks.get_network_suid(network, base_url=base_url)\n view_suid = network_views.get_network_views(net_suid, base_url=base_url)[0]\n\n if visual_property is None:\n raise CyError('Invalid visual property ... visual property must be non-null')\n\n if edge_names is None:\n res = commands.cyrest_get(f'networks/{net_suid}/views/{view_suid}/edges',\n {'visualProperty': visual_property}, base_url=base_url)\n edge_suids = [edge['SUID'] for edge in res]\n edge_names = edge_suid_to_edge_name(edge_suids, network=network, base_url=base_url)\n edge_props = {name: edge['view'][0]['value'] for edge, name in zip(res, edge_names)}\n return edge_props\n else:\n edge_names = normalize_list(edge_names)\n edge_suids = edge_name_to_edge_suid(edge_names, network=network, base_url=base_url, unique_list=True)\n edge_props = {edge_name: commands.cyrest_get(\n f'networks/{net_suid}/views/{view_suid}/edges/{edge_suid}/{visual_property}', base_url=base_url)['value']\n for edge_suid, edge_name in zip(edge_suids, edge_names)}\n return edge_props\n\n\n@cy_log\ndef get_network_property(visual_property, network=None, base_url=DEFAULT_BASE_URL):\n \"\"\"Get values for any network property.\n\n This method retrieves the actual property of the network, given the current visual style, factoring together any\n default, mapping and bypass setting.\n\n Args:\n visual_property (str): Name of a visual property. See ``get_visual_property_names``\n network (SUID or str or None): Name or SUID of a network. Default is the\n \"current\" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://127.0.0.1:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n float: value of visual property\n\n Raises:\n CyError: if network name or property name doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_network_property('NETWORK_SCALE_FACTOR')\n 0.6299925248514752\n >>> get_network_property(visual_property='NETWORK_SCALE_FACTOR', network='galFiltered.sif')\n 0.6299925248514752\n \"\"\"\n net_suid = networks.get_network_suid(network, base_url=base_url)\n view_suid = network_views.get_network_views(net_suid, base_url=base_url)[0]\n\n if visual_property is None:\n raise CyError('Invalid visual property ... visual property must be non-null')\n\n res = commands.cyrest_get(f'networks/{net_suid}/views/{view_suid}/network/{visual_property}', base_url=base_url)\n return res['value']\n\n\n# ==============================================================================\n# II. Specific Functions\n# ==============================================================================\n# II.a. Node Properties\n# Pattern: call getNodeProperty()\n# ------------------------------------------------------------------------------\n\n@cy_log\ndef get_node_color(node_names=None, network=None, base_url=DEFAULT_BASE_URL):\n \"\"\"Retrieve the actual fill color of specified nodes.\n\n Args:\n nodes_names (str or list or int or None): List of nodes or None. If node list:\n ``list`` of node names or SUIDs, comma-separated string of node names or SUIDs, or scalar node name\n or SUID. Node names should be found in the ``name`` column of the ``node table``. If list is None,\n default is all nodes.\n network (SUID or str or None): Name or SUID of a network. Default is the\n \"current\" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://127.0.0.1:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: as a collection of {node-name: fill-color} for each node in node_names parameter\n\n Raises:\n CyError: if network name or node doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_node_color()\n {'YOR215C': '#FFFFEC', 'YBL026W': '#FCFDFE', 'YOL149W': '#FFFFE3', ...}\n >>> get_node_color(['YOR215C', 'YBL026W', 'YOL149W'])\n {'YOR215C': '#FFFFEC', 'YBL026W': '#FCFDFE', 'YOL149W': '#FFFFE3'}\n >>> get_node_color('YOR215C, YBL026W, YOL149W')\n {'YOR215C': '#FFFFEC', 'YBL026W': '#FCFDFE', 'YOL149W': '#FFFFE3'}\n >>> get_node_color([395406, 395407, 395404])\n {395406: '#FFFFEC', 395407: '#FCFDFE', 395404: '#FFFFE3'}\n >>> get_node_color('395406, 395407, 395404')\n {395406: '#FFFFEC', 395407: '#FCFDFE', 395404: '#FFFFE3'}\n >>> get_node_color(395406)\n {395406: '#FFFFEC'}\n >>> get_node_color(node_names='YOR215C', network='galFiltered.sif')\n {'YYOR215C': '#FFFFEC'}\n \"\"\"\n res = get_node_property(node_names, \"NODE_FILL_COLOR\", network=network, base_url=base_url)\n return res\n\n\n@cy_log\ndef get_node_size(node_names=None, network=None, base_url=DEFAULT_BASE_URL):\n \"\"\"Retrieve the actual size of specified nodes.\n\n Args:\n nodes_names (str or list or int or None): List of nodes or None. If node list:\n ``list`` of node names or SUIDs, comma-separated string of node names or SUIDs, or scalar node name\n or SUID. Node names should be found in the ``name`` column of the ``node table``. If list is None,\n default is all nodes.\n network (SUID or str or None): Name or SUID of a network. Default is the\n \"current\" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://127.0.0.1:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: as a collection of {node-name: size} for each node in node_names parameter\n\n Raises:\n CyError: if network name or node doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_node_size()\n {'YOR215C': 50.0, 'YBL026W': 50.0, 'YOL149W': 50.0, ...}\n >>> get_node_size(['YOR215C', 'YBL026W', 'YOL149W'])\n {'YOR215C': 50.0, 'YBL026W': 50.0, 'YOL149W': 50.0}\n >>> get_node_size('YOR215C, YBL026W, YOL149W')\n {'YOR215C': 50.0, 'YBL026W': 50.0, 'YOL149W': 50.0}\n >>> get_node_size([395406, 395407, 395404])\n {395406: 50.0, 395407: 50.0, 395404: 50.0}\n >>> get_node_size('395406, 395407, 395404')\n {395406: 50.0, 395407: 50.0, 395404: 50.0}\n >>> get_node_size(395406)\n {395406: 50.0}\n >>> get_node_size(node_names='YOR215C', network='galFiltered.sif')\n {'YYOR215C': 50.0}\n \"\"\"\n res = get_node_property(node_names, \"NODE_SIZE\", network=network, base_url=base_url)\n return res\n\n\n@cy_log\ndef get_node_width(node_names=None, network=None, base_url=DEFAULT_BASE_URL):\n \"\"\"Retrieve the actual width of specified nodes.\n\n Args:\n nodes_names (str or list or int or None): List of nodes or None. If node list:\n ``list`` of node names or SUIDs, comma-separated string of node names or SUIDs, or scalar node name\n or SUID. Node names should be found in the ``name`` column of the ``node table``. If list is None,\n default is all nodes.\n network (SUID or str or None): Name or SUID of a network. Default is the\n \"current\" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://127.0.0.1:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: as a collection of {node-name: width} for each node in node_names parameter\n\n Raises:\n CyError: if network name or node doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_node_width()\n {'YOR215C': 50.0, 'YBL026W': 50.0, 'YOL149W': 50.0, ...}\n >>> get_node_width(['YOR215C', 'YBL026W', 'YOL149W'])\n {'YOR215C': 50.0, 'YBL026W': 50.0, 'YOL149W': 50.0}\n >>> get_node_width('YOR215C, YBL026W, YOL149W')\n {'YOR215C': 50.0, 'YBL026W': 50.0, 'YOL149W': 50.0}\n >>> get_node_width([395406, 395407, 395404])\n {395406: 50.0, 395407: 50.0, 395404: 50.0}\n >>> get_node_width('395406, 395407, 395404')\n {395406: 50.0, 395407: 50.0, 395404: 50.0}\n >>> get_node_width(395406)\n {395406: 50.0}\n >>> get_node_width(node_names='YOR215C', network='galFiltered.sif')\n {'YYOR215C': 46.470588235294116}\n \"\"\"\n res = get_node_property(node_names, \"NODE_WIDTH\", network=network, base_url=base_url)\n return res\n\n\n@cy_log\ndef get_node_height(node_names=None, network=None, base_url=DEFAULT_BASE_URL):\n \"\"\"Retrieve the actual height of specified nodes.\n\n Args:\n nodes_names (str or list or int or None): List of nodes or None. If node list:\n ``list`` of node names or SUIDs, comma-separated string of node names or SUIDs, or scalar node name\n or SUID. Node names should be found in the ``name`` column of the ``node table``. If list is None,\n default is all nodes.\n network (SUID or str or None): Name or SUID of a network. Default is the\n \"current\" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://127.0.0.1:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: as a collection of {node-name: height} for each node in node_names parameter\n\n Raises:\n CyError: if network name or node doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_node_height()\n {'YOR215C': 50.0, 'YBL026W': 50.0, 'YOL149W': 50.0, ...}\n >>> get_node_height(['YOR215C', 'YBL026W', 'YOL149W'])\n {'YOR215C': 50.0, 'YBL026W': 50.0, 'YOL149W': 50.0}\n >>> get_node_height('YOR215C, YBL026W, YOL149W')\n {'YOR215C': 50.0, 'YBL026W': 50.0, 'YOL149W': 50.0}\n >>> get_node_height([395406, 395407, 395404])\n {395406: 50.0, 395407: 50.0, 395404: 50.0}\n >>> get_node_height('395406, 395407, 395404')\n {395406: 50.0, 395407: 50.0, 395404: 50.0}\n >>> get_node_height(395406)\n {395406: 50.0}\n >>> get_node_height(node_names='YOR215C', network='galFiltered.sif')\n {'YYOR215C': 46.470588235294116}\n \"\"\"\n res = get_node_property(node_names, \"NODE_HEIGHT\", network=network, base_url=base_url)\n return res\n\n\n@cy_log\ndef get_node_position(node_names=None, network=None, base_url=DEFAULT_BASE_URL):\n \"\"\"Retrieve the actual x,y position of specified nodes.\n\n Args:\n nodes_names (str or list or int or None): List of nodes or None. If node list:\n ``list`` of node names or SUIDs, comma-separated string of node names or SUIDs, or scalar node name\n or SUID. Node names should be found in the ``name`` column of the ``node table``. If list is None,\n default is all nodes.\n network (SUID or str or None): Name or SUID of a network. Default is the\n \"current\" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://127.0.0.1:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dataframe: with index as node_names values and columns x and y containing coordinates\n\n Raises:\n CyError: if network name or node doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_node_position()\n x y\n YIL052C 2628.866343678256 1180.9601936051579\n YDL215C 1723.7108261001308 2230.935871095392\n YLR432W 1660.9524948013027 2387.6488532731264\n ...\n >>> get_node_position(['YDR429C', 'YMR005W', 'YDR142C'])\n x y\n YDR429C 2628.866343678256 1180.9601936051579\n YMR005W 1723.7108261001308 2230.935871095392\n YDR142C 1660.9524948013027 2387.6488532731264\n >>> get_node_position([432646, 432647, 432644])\n x y\n 432646 2628.866343678256 1180.9601936051579\n 432647 1723.7108261001308 2230.935871095392\n 432644 1660.9524948013027 2387.6488532731264\n >>> get_node_position(node_names='YER112W', network='galFiltered.sif')\n x y\n YER112W 2151.8481399429043 2326.677814454767\n \"\"\"\n x_location = get_node_property(node_names, \"NODE_X_LOCATION\", network=network, base_url=base_url)\n x_values = [x_location[node_name] for node_name in x_location]\n x_names = [node_name for node_name in x_location]\n\n y_location = get_node_property(node_names, \"NODE_Y_LOCATION\", network=network, base_url=base_url)\n y_values = [y_location[node_name] for node_name in y_location]\n y_names = [node_name for node_name in y_location]\n\n # Verify that both property calls return locations for the same set of nodes ... necessary because\n # two calls are not atomic. (This stands virtually no chance of failing, but non-atomic call must be checked.)\n name_skew = [[x_name, y_name] for x_name, y_name in zip(x_names, y_names) if x_name != y_name]\n if name_skew != []:\n raise CyError(f'Inconsistent node sets returned: \"{name_skew}\"')\n\n data = df.DataFrame(index=y_names, data={'x': x_values, 'y': y_values})\n # TODO: Verify that this is what R returns, too\n\n return data\n\n\n# ==============================================================================\n# II.b. Edge Properties\n# Pattern: call getEdgeProperty()\n# ------------------------------------------------------------------------------\n\n@cy_log\ndef get_edge_line_width(edge_names=None, network=None, base_url=DEFAULT_BASE_URL):\n \"\"\"Retrieve the actual line width of specified edge.\n\n Args:\n edge_names (str or list or int or None): List of edges or None. If edge list:\n ``list`` of edge names or SUIDs, comma-separated string of edge names or SUIDs, or scalar node edge\n or SUID. Edge names should be found in the ``name`` column of the ``edge table``. If list is None,\n default is all edges.\n network (SUID or str or None): Name or SUID of a network. Default is the\n \"current\" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://127.0.0.1:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: as a collection of {edge-name: width} for each edge in edge_names parameter\n\n Raises:\n CyError: if network name or node doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_edge_line_width()\n {'YLR197W (pp) YOR310C': 2.0, 'YIL074C (pp) YNL311C': 2.0, ...}\n >>> get_edge_line_width(['YHR084W (pd) YFL026W', 'YHR084W (pd) YDR461W', 'YMR255W (pp) YGL122C'])\n {'YHR084W (pd) YFL026W': 2.0, 'YHR084W (pd) YDR461W': 2.0, 'YMR255W (pp) YGL122C': 2.0}\n >>> get_edge_line_width('YHR084W (pd) YFL026W, YHR084W (pd) YDR461W, YMR255W (pp) YGL122C')\n {'YHR084W (pd) YFL026W': 2.0, 'YHR084W (pd) YDR461W': 2.0, 'YMR255W (pp) YGL122C': 2.0}\n >>> get_edge_line_width([421382, 421383, 421380])\n {421382: 2.0, 421383: 2.0, 421380: 2.0}\n >>> get_edge_line_width('421382, 421383, 421380')\n {421382: 2.0, 421383: 2.0, 421380: 2.0}\n >>> get_edge_line_width(421382)\n {421382: 2.0}\n >>> get_edge_line_width(edge_names='YOR355W (pp) YNL091W', network='galFiltered.sif')\n {'YOR355W (pp) YNL091W': 2.0}\n \"\"\"\n res = get_edge_property(edge_names, \"EDGE_WIDTH\", network=network, base_url=base_url)\n return res\n\n\n@cy_log\ndef get_edge_color(edge_names=None, network=None, base_url=DEFAULT_BASE_URL):\n \"\"\"Retrieve the actual line color of specified edges.\n\n Args:\n edge_names (str or list or int or None): List of edges or None. If edge list:\n ``list`` of edge names or SUIDs, comma-separated string of edge names or SUIDs, or scalar node edge\n or SUID. Edge names should be found in the ``name`` column of the ``edge table``. If list is None,\n default is all edges.\n network (SUID or str or None): Name or SUID of a network. Default is the\n \"current\" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://127.0.0.1:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: as a collection of {edge-name: line-color} for each edge in edge_names parameter\n\n Raises:\n CyError: if network name or edge doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_edge_color()\n {'YMR117C (pp) YCL032W': '#808080', 'YMR255W (pp) YGL122C': '#808080', 'YNL214W (pp) YGL153W': '#808080', ...}\n >>> get_edge_color(['YHR084W (pd) YFL026W', 'YHR084W (pd) YDR461W', 'YMR255W (pp) YGL122C'])\n {'YHR084W (pd) YFL026W': '#808080', 'YHR084W (pd) YDR461W': '#808080', 'YMR255W (pp) YGL122C': '#808080'}\n >>> get_edge_color('YHR084W (pd) YFL026W, YHR084W (pd) YDR461W, YMR255W (pp) YGL122C')\n {'YHR084W (pd) YFL026W': '#808080', 'YHR084W (pd) YDR461W': '#808080', 'YMR255W (pp) YGL122C': '#808080'}\n >>> get_edge_color([421382, 421383, 421380])\n {421382: '#808080', 421383: '#808080', 421380: '#808080'}\n >>> get_edge_color('421382, 421383, 421380')\n {421382: '#808080', 421383: '#808080', 421380: '#808080'}\n >>> get_edge_color(421382)\n {421382: '#808080'}\n >>> get_edge_color(edge_names='YOR355W (pp) YNL091W', network='galFiltered.sif')\n {'YOR355W (pp) YNL091W': '#808080'}\n \"\"\"\n res = get_edge_property(edge_names, \"EDGE_PAINT\", network=network, base_url=base_url)\n return res\n\n\n@cy_log\ndef get_edge_line_style(edge_names=None, network=None, base_url=DEFAULT_BASE_URL):\n \"\"\"Retrieve the actual line style of specified edges.\n\n Args:\n edge_names (str or list or int or None): List of edges or None. If edge list:\n ``list`` of edge names or SUIDs, comma-separated string of edge names or SUIDs, or scalar node edge\n or SUID. Edge names should be found in the ``name`` column of the ``edge table``. If list is None,\n default is all edges.\n network (SUID or str or None): Name or SUID of a network. Default is the\n \"current\" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://127.0.0.1:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: as a collection of {edge-name: line-style} for each edge in edge_names parameter\n\n Raises:\n CyError: if network name or edge doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_edge_line_style()\n {'YMR117C (pp) YCL032W': 'SOLID', 'YMR255W (pp) YGL122C': 'SOLID', 'YNL214W (pp) YGL153W': 'SOLID', ...}\n >>> get_edge_line_style(['YHR084W (pd) YFL026W', 'YHR084W (pd) YDR461W', 'YMR255W (pp) YGL122C'])\n {'YHR084W (pd) YFL026W': 'SOLID', 'YHR084W (pd) YDR461W': 'SOLID', 'YMR255W (pp) YGL122C': 'SOLID'}\n >>> get_edge_line_style('YHR084W (pd) YFL026W, YHR084W (pd) YDR461W, YMR255W (pp) YGL122C')\n {'YHR084W (pd) YFL026W': 'SOLID', 'YHR084W (pd) YDR461W': 'SOLID', 'YMR255W (pp) YGL122C': 'SOLID'}\n >>> get_edge_line_style([421382, 421383, 421380])\n {421382: 'SOLID', 421383: 'SOLID', 421380: 'SOLID'}\n >>> get_edge_line_style('421382, 421383, 421380')\n {421382: 'SOLID', 421383: 'SOLID', 421380: 'SOLID'}\n >>> get_edge_line_style(421382)\n {421382: 'SOLID'}\n >>> get_edge_line_style(edge_names='YOR355W (pp) YNL091W', network='galFiltered.sif')\n {'YOR355W (pp) YNL091W': 'SOLID'}\n \"\"\"\n res = get_edge_property(edge_names, \"EDGE_LINE_TYPE\", network=network, base_url=base_url)\n return res\n\n\n@cy_log\ndef get_edge_target_arrow_shape(edge_names=None, network=None, base_url=DEFAULT_BASE_URL):\n \"\"\"Retrieve the actual target arrow shape of specified edges.\n\n Args:\n edge_names (str or list or int or None): List of edges or None. If edge list:\n ``list`` of edge names or SUIDs, comma-separated string of edge names or SUIDs, or scalar node edge\n or SUID. Edge names should be found in the ``name`` column of the ``edge table``. If list is None,\n default is all edges.\n network (SUID or str or None): Name or SUID of a network. Default is the\n \"current\" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://127.0.0.1:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: as a collection of {edge-name: arrow-shape} for each edge in edge_names parameter\n\n Raises:\n CyError: if network name or edge doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_edge_target_arrow_shape()\n {'YMR117C (pp) YCL032W': 'NONE', 'YMR255W (pp) YGL122C': 'NONE', 'YNL214W (pp) YGL153W': 'NONE', ...}\n >>> get_edge_target_arrow_shape(['YHR084W (pd) YFL026W', 'YHR084W (pd) YDR461W', 'YMR255W (pp) YGL122C'])\n {'YHR084W (pd) YFL026W': 'NONE', 'YHR084W (pd) YDR461W': 'NONE', 'YMR255W (pp) YGL122C': 'NONE'}\n >>> get_edge_target_arrow_shape('YHR084W (pd) YFL026W, YHR084W (pd) YDR461W, YMR255W (pp) YGL122C')\n {'YHR084W (pd) YFL026W': 'NONE', 'YHR084W (pd) YDR461W': 'NONE', 'YMR255W (pp) YGL122C': 'NONE'}\n >>> get_edge_target_arrow_shape([421382, 421383, 421380])\n {421382: 'NONE', 421383: 'NONE', 421380: 'NONE'}\n >>> get_edge_target_arrow_shape('421382, 421383, 421380')\n {421382: 'NONE', 421383: 'NONE', 421380: 'NONE'}\n >>> get_edge_target_arrow_shape(421382)\n {421382: 'NONE'}\n >>> get_edge_target_arrow_shape(edge_names='YOR355W (pp) YNL091W', network='galFiltered.sif')\n {'YOR355W (pp) YNL091W': 'NONE'}\n \"\"\"\n res = get_edge_property(edge_names, \"EDGE_TARGET_ARROW_SHAPE\", network=network, base_url=base_url)\n return res\n\n\n# ==============================================================================\n# II.c. Network Properties\n# Pattern: call getNetworkProperty()\n# ------------------------------------------------------------------------------\n\n@cy_log\ndef get_network_center(network=None, base_url=DEFAULT_BASE_URL):\n \"\"\"Retrieve the center of specified network.\n\n Args:\n network (SUID or str or None): Name or SUID of a network. Default is the\n \"current\" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://127.0.0.1:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n dict: as {x: x-coord, y: y-coord} for center of network\n\n Raises:\n CyError: if network name doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_network_center()\n {'x': 2628.866343678256, 'y': 1180.9601936051579}\n >>> get_network_center(network='galFiltered.sif')\n {'x': 2628.866343678256, 'y': 1180.9601936051579}\n \"\"\"\n x_coordinate = get_network_property('NETWORK_CENTER_X_LOCATION', network=network, base_url=base_url)\n y_coordinate = get_network_property('NETWORK_CENTER_Y_LOCATION', network=network, base_url=base_url)\n\n return {'x': x_coordinate, 'y': y_coordinate}\n\n\n@cy_log\ndef get_network_zoom(network=None, base_url=DEFAULT_BASE_URL):\n \"\"\"Retrieve the scale factor of specified network.\n\n Args:\n network (SUID or str or None): Name or SUID of a network. Default is the\n \"current\" network active in Cytoscape.\n base_url (str): Ignore unless you need to specify a custom domain,\n port or version to connect to the CyREST API. Default is http://127.0.0.1:1234\n and the latest version of the CyREST API supported by this version of py4cytoscape.\n\n Returns:\n float: for zoom factor\n\n Raises:\n CyError: if network name doesn't exist\n requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error\n\n Examples:\n >>> get_network_zoom()\n 0.6299925248514752\n >>> get_network_zoom(network='galFiltered.sif')\n 0.6299925248514752\n \"\"\"\n res = get_network_property('NETWORK_SCALE_FACTOR', network=network, base_url=base_url)\n return res\n"
] |
[
[
"pandas.DataFrame"
]
] |
flaxandteal/intro-to-fea-in-python
|
[
"c074e995efe0799dcc7f9e1e89e0d9e196f2c644"
] |
[
"003-scripting-tasks/snippet/rainfall.py"
] |
[
"from matplotlib import pyplot\nfrom mpl_toolkits.mplot3d import Axes3D, axes3d\nimport numpy\nimport datetime\nimport os\n\n# Load rainfall data\nX, Y, rainfall = numpy.loadtxt(\"rainfall-radar.csv\").T\n\n\n# Some clever 3D plotting\nfig = pyplot.figure()\naxes = Axes3D(fig)\naxes.plot_trisurf(X, Y, rainfall)\n\n# Save image labelled by day data created\ncreation_timestamp = os.path.getctime(\"rainfall-radar.csv\")\nd = datetime.date.fromtimestamp(creation_timestamp)\nimage_filename = \"%d-%d-%d-rainfall.png\" % (d.year, d.month, d.day)\npyplot.savefig(image_filename)\n"
] |
[
[
"numpy.loadtxt",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure"
]
] |
Mufabo/robustsp
|
[
"c5f765600bdf087c159f75d6d10aeeacff5d4e1d"
] |
[
"robustsp/LocationScale/MLocHub.py"
] |
[
"'''\nMloc_HUB computes Huber's M-estimate of\nlocation, i.e.,\n\nmu_hat = arg min_mu SUM_i rho_HUB(y_i - mu)\n\n\n INPUTS: \n y: real valued data vector of size N x 1\n c: tuning constant c>=0 . default = 1.345\n default tuning for 95 percent efficiency under \n the Gaussian model end\n max_iters: Number of iterations. default = 1000\n tol_err: convergence error tolerance. default = 1e-5\n\n OUTPUT: \n mu_hat: Hbers's M-estimate of location\n'''\nimport numpy as np\nfrom robustsp.AuxiliaryFunctions.madn import madn\nfrom robustsp.AuxiliaryFunctions.whub import whub\n\ndef MLocHUB(y,c=1.345, max_iters = 1000, tol_err = 1e-5):\n\n y = np.asarray(y) # ensure that y is a ndarray\n \n # previously computed scale estimate\n sigma_0 = madn(y)\n # initial robust location estimate \n mu_n = np.median(y)\n # computes huber weights\n \n for n in range(max_iters+1):\n w_n = whub(np.absolute(y - mu_n)/sigma_0,c) # compute weights\n mu_n_plus1 = np.sum(w_n*y)/(np.sum(w_n)) # compute weighted average\n if np.absolute(mu_n_plus1 - mu_n)/sigma_0 > tol_err: # breaking condition\n mu_n = mu_n_plus1 # update estimate of mean\n n = n+1 # increment iteration counter \n else:\n break\n \n return mu_n # final estimate"
] |
[
[
"numpy.asarray",
"numpy.median",
"numpy.absolute",
"numpy.sum"
]
] |
tarakapoor/thyroid_deep_learning
|
[
"28fad8cf1b297d79bbff98960b2baa2d7cea6a75"
] |
[
"mobilenet_dataset.py"
] |
[
"print(\"\\nTHYROID DATASET\\n\")\nimport pandas as pd\nfrom PIL import Image\nimport os\nimport os.path\n\nimport numpy as np\nimport re\nfrom pathlib import Path\nimport tables\nimport cv2\nimport h5py\nimport math\nimport random\n\n#data aug\nfrom albumentations.pytorch import ToTensorV2\nfrom albumentations.pytorch import ToTensor\nimport albumentations as A\n\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torch\nfrom torch.utils.data import DataLoader\nfrom sklearn.utils import resample\n\n\nimport mobilenet_preprocess\n\ndef load_datasets_new(project_home_dir, labelpath, phase, cv_phase, allimgs, frametype):\n \"\"\"Load images and labels for given phase and cvphase, stack 3 different frames based on frametype (adjacent, equally spaced).\n Keyword arguments:\n phase -- train, val, trainval or test (which data to use)\n cv_phase -- cross validation fold (0 to 4)\n frametype -- adjacent, equalspaced (how to stack frames)\n Return (for given cross validation fold and train/val/trainval/test phase) lists of all images (stacked), labels, patient IDs and frame numbers within patient.\"\"\"\n\n print(\"passed in list allimgs:\", np.shape(allimgs))\n \n colnames = ['Labels for each frame', 'Annot_IDs', 'size_A', 'size_B', 'size_C', 'location_r_l_', 'study_dttm', 'age', 'sex', 'final_diagnoses', 'ePAD ID', 'foldNum']\n label_data = pd.read_csv(labelpath, names=colnames)\n \n annot_ids = label_data.Annot_IDs.tolist() #list of annotation ids from csv file\n labels = label_data.final_diagnoses.tolist() #list of labels from csv file\n \n foldNums = label_data.foldNum.tolist() #list of what folder for train test split\n \n annot_ids.pop(0)\n labels.pop(0)\n foldNums.pop(0)\n \n correct_order_labels = []\n \n for i in range(len(allimgs)):\n correct_order_labels.append(labels[i])\n \n print('Num Images: {}\\n Labels: {}\\n'.format(len(allimgs), len(correct_order_labels), len(annot_ids)))\n cur_imgs = []\n cur_labels = []\n cur_annot_ids = []\n\n test_folder = cv_phase #0, 1, 2, 3, 4\n if(cv_phase < 4):\n val_folder = cv_phase+1\n elif(cv_phase == 4):\n val_folder = 0\n \n print(\"CROSS VALIDATION PHASE:\", test_folder)\n #split by foldnum group\n for g in range(len(allimgs)):\n fnum = int(foldNums[g])\n \n if (phase == \"train\"):\n if not (fnum == test_folder or fnum == val_folder):\n cur_imgs.append(allimgs[g])\n cur_labels.append(correct_order_labels[g])\n cur_annot_ids.append(annot_ids[g])\n elif (phase == \"val\"):\n if (fnum == val_folder):\n cur_imgs.append(allimgs[g])\n cur_labels.append(correct_order_labels[g])\n cur_annot_ids.append(annot_ids[g])\n elif (phase == \"trainval\"):\n if not (fnum == test_folder): #train and val images\n cur_imgs.append(allimgs[g])\n cur_labels.append(correct_order_labels[g])\n cur_annot_ids.append(annot_ids[g])\n #else: #test phase\n elif (phase == \"test\"):\n if (fnum == test_folder):\n cur_imgs.append(allimgs[g])\n cur_labels.append(correct_order_labels[g])\n cur_annot_ids.append(annot_ids[g])\n \n \n #label frame number of each image\n cur_frame_num = []\n distinct_patient_ids = []\n patientframenum = 0\n\n #if 3 images in a row from same patient, stack them (instead of rgb channels)\n for ind in range(len(cur_imgs)):\n if(not (cur_annot_ids[ind] in distinct_patient_ids)):\n distinct_patient_ids.append(cur_annot_ids[ind])\n patientframenum = 1\n cur_frame_num.append(patientframenum) #add index of first image in current frame stack within patient to list for order\n patientframenum += 1\n print(len(cur_imgs), len(cur_labels), len(cur_annot_ids), len(cur_frame_num))\n\n \n #stack:\n cur_imgs_stack = []\n cur_labels_stack = []\n cur_annot_ids_stack = []\n cur_frame_num_stack = []\n \n distinct_patient_ids = []\n dist = 10\n t = 0\n \n print(\"about to stack!\")\n \n if(frametype == \"adjacent\"):\n #ADJACENT frame stacking method\n #if 3 images in a row (adjacent) from same patient, stack them (instead of rgb channels)\n while (t < len(cur_imgs)-2):\n if(cur_annot_ids[t] not in distinct_patient_ids):\n distinct_patient_ids.append(cur_annot_ids[t])\n if(cur_annot_ids[t] == cur_annot_ids[t+1] and cur_annot_ids[t] == cur_annot_ids[t+2]):\n img = np.stack((cur_imgs[t], cur_imgs[t+1], cur_imgs[t+2]))\n cur_imgs_stack.append(img)\n cur_labels_stack.append(cur_labels[t])\n cur_annot_ids_stack.append(cur_annot_ids[t])\n cur_frame_num_stack.append(cur_frame_num[t]) #add index of first image in current frame stack within patient to list for order\n if(cur_labels[t] != cur_labels[t+1] or cur_labels[t] != cur_labels[t+2]):\n print(\"inconsistent labels in train group of 3 images!\")\n t += 3 #every option needs t + 1 to go to next frame \n \n else:\n #EQUAL SPACED frame stacking method\n while (t < len(cur_imgs) - (2*dist)):\n if(cur_annot_ids[t] not in distinct_patient_ids):\n annot_id = cur_annot_ids[t]\n #last index of this patient id in the list of ids\n last_id = len(cur_annot_ids) - cur_annot_ids[::-1].index(annot_id) - 1\n #print(\"num in this patient\", last_id - t)\n dist = (last_id - t) // 3\n distinct_patient_ids.append(cur_annot_ids[t])\n print(\"PATIENT\", cur_annot_ids[t], \"num frames:\", last_id-t)\n\n #if 3 images equally spaced from same patient, stack them (instead of rgb channels)\n if(t <= (last_id - (2*dist))):\n if(cur_annot_ids[t] == cur_annot_ids[t+dist] and cur_annot_ids[t] == cur_annot_ids[t+(2*dist)]):\n img = np.stack((cur_imgs[t], cur_imgs[t+dist], cur_imgs[t+(2*dist)]))\n cur_imgs_stack.append(img)\n cur_labels_stack.append(cur_labels[t])\n cur_annot_ids_stack.append(cur_annot_ids[t])\n cur_frame_num_stack.append(cur_frame_num[t]) #add index of first image in current frame stack within patient to list for order\n\n if(cur_labels[t] != cur_labels[t+dist] or cur_labels[t] != cur_labels[t+(2*dist)]):\n print(\"inconsistent labels in train group of 3 images!\")\n print(\"t:\", t, \"dist:\", dist, \"last id\", last_id, \"cur labels:\", cur_labels[t], cur_annot_ids[t], cur_labels[t+(2*dist)], cur_annot_ids[t+(2*dist)])\n else: #end of patient?\n g = 0\n if(cur_annot_ids[t] != cur_annot_ids[t + (2*dist) + 1]):\n t += (2*dist) #go to next patient!\n else:\n print(\"not at end of patient?\")\n while(cur_annot_ids[t] == cur_annot_ids[t + (2*dist) + 1 + g]):\n g += 1\n print(\"final difference:\", g)\n t += (2*dist) + g\n t += 1 #every option needs t + 1 to go to next frame\n \n print(\"\\n\\nnum patients:\", len(distinct_patient_ids))\n print(\"done stacking!\")\n #shuffle all lists with same order for TRAIN ONLY\n if (phase == 'train' or phase == 'trainval'):# or phase == 'test'):\n temp = list(zip(cur_imgs_stack, cur_labels_stack, cur_annot_ids_stack, cur_frame_num_stack)) \n random.shuffle(temp) \n cur_imgs_stack, cur_labels_stack, cur_annot_ids_stack, cur_frame_num_stack = zip(*temp)\n \n if (phase == 'train' or phase == 'trainval'):\n #for class weights (imbalance of classes)\n neg, pos = np.bincount(cur_labels_stack)#intlabels)\n print(\"0s\", neg, \"1s\", pos)\n total_lbls = neg + pos\n print(total_lbls == len(cur_labels_stack))\n print('Labels:\\n Total: {}\\n Positive: {} ({:.2f}% of total)\\n'.format(total_lbls, pos, 100 * pos / total_lbls))\n\n #https://www.tensorflow.org/tutorials/structured_data/imbalanced_data\n # Scaling by total/2 helps keep the loss to a similar magnitude.\n # The sum of the weights of all examples stays the same.\n weight_for_0 = (1 / neg)*(total_lbls)/2.0 \n weight_for_1 = (1 / pos)*(total_lbls)/2.0\n\n #class_weight = {0: weight_for_0, 1: weight_for_1}\n class_weight = [weight_for_0, weight_for_1]\n print('Weight for class 0: {:.2f}\\nWeight for class 1: {:.2f}'.format(weight_for_0, weight_for_1))\n samples_weight = []\n samples_weight = np.array([class_weight[int(m)] for m in cur_labels_stack])\n print(\"in train phase for load_dataset: samples_weight shape =\", np.shape(samples_weight))\n \n f=open(project_home_dir + \"samplesweight.csv\",'w', newline ='\\n')\n count = 0\n for s in zip(samples_weight):\n count += 1\n f.write(str(s[0])+\",\")\n f.close()\n \n #done preprocessing!\n print(\"len\", phase, \"=\", len(cur_imgs))\n print(\"len\", phase, \"(stacked)\", len(cur_imgs_stack), len(cur_labels_stack), len(cur_annot_ids_stack), len(cur_frame_num_stack), np.shape(cur_imgs_stack))\n return(cur_imgs_stack, cur_labels_stack, cur_annot_ids_stack, cur_frame_num_stack) #return train/test imgs and labels and frame index #s\n\n\n\n#only max area frame +- 1 frame from each patient\ndef load_datasets_single_frame(project_home_dir, labelpath, phase, cv_phase, allimgs, largestpatinds):\n \"\"\"Load images and labels for given phase and cvphase, only stack largest frame per patient +- 1 frame.\n Keyword arguments:\n phase -- train, val, trainval or test (which data to use)\n cv_phase -- cross validation fold (0 to 4)\n largest_pat_inds -- list with 1 at index of largest image in each patient (from transform_and_crop_largest function in mobilenet_preprocess); use this image +- 1 image for stacking.\n Return (for given cross validation fold and train/val/trainval/test phase) lists of all images (stacked), labels, patient IDs and frame numbers within patient.\"\"\"\n\n print(\"passed in list allimgs:\", np.shape(allimgs))\n \n colnames = ['Labels for each frame', 'Annot_IDs', 'size_A', 'size_B', 'size_C', 'location_r_l_', 'study_dttm', 'age', 'sex', 'final_diagnoses', 'ePAD ID', 'foldNum']\n label_data = pd.read_csv(labelpath, names=colnames)\n \n annot_ids = label_data.Annot_IDs.tolist() #list of annotation ids from csv file\n labels = label_data.final_diagnoses.tolist() #list of labels from csv file\n \n foldNums = label_data.foldNum.tolist() #list of what folder for train test split\n \n annot_ids.pop(0)\n labels.pop(0)\n foldNums.pop(0)\n \n correct_order_labels = []\n \n selannotids = []\n selimgs = []\n selfoldnums = []\n\n #only use largest in patient\n for i in range(len(allimgs)-1):\n if(largestpatinds[i] == 1 or largestpatinds[i+1] == 1 or largestpatinds[i-1] == 1): #largest, and before and after (3 per patient)\n correct_order_labels.append(labels[i])\n selannotids.append(annot_ids[i])\n selimgs.append(allimgs[i])\n selfoldnums.append(foldNums[i])\n \n print('Num TOTAL Images: {}\\n patients: {}\\n'.format(len(allimgs), len(annot_ids)))\n print('Num SELECTED (max) Images: {}\\n Labels: {}\\n Patients: {}\\n'.format(len(selimgs), len(correct_order_labels), len(selannotids)))\n \n allimgs = selimgs\n annot_ids = selannotids\n foldNums = selfoldnums\n \n cur_imgs = []\n cur_labels = []\n cur_annot_ids = []\n\n test_folder = cv_phase #0, 1, 2, 3, 4\n if(cv_phase < 4):\n val_folder = cv_phase+1\n elif(cv_phase == 4):\n val_folder = 0\n \n print(\"CROSS VALIDATION PHASE:\", test_folder)\n #split by foldnum group\n for g in range(len(allimgs)):\n fnum = int(foldNums[g])\n \n if (phase == \"train\"):\n if not (fnum == test_folder or fnum == val_folder):\n cur_imgs.append(allimgs[g])\n cur_labels.append(correct_order_labels[g])\n cur_annot_ids.append(annot_ids[g])\n elif (phase == \"val\"):\n if (fnum == val_folder):\n cur_imgs.append(allimgs[g])\n cur_labels.append(correct_order_labels[g])\n cur_annot_ids.append(annot_ids[g])\n elif (phase == \"trainval\"):\n if not (fnum == test_folder): #train and val images\n cur_imgs.append(allimgs[g])\n cur_labels.append(correct_order_labels[g])\n cur_annot_ids.append(annot_ids[g])\n #else: #test phase\n elif (phase == \"test\"):\n if (fnum == test_folder):\n cur_imgs.append(allimgs[g])\n cur_labels.append(correct_order_labels[g])\n cur_annot_ids.append(annot_ids[g])\n \n #stack:\n cur_imgs_stack = []\n cur_labels_stack = []\n cur_annot_ids_stack = []\n #label frame number of each image (for future ordering)\n cur_frame_num = []\n \n distinct_patient_ids = []\n t = 0\n patientframenum = 1\n \n print(\"about to stack!\")\n for t in range(len(cur_imgs)):\n if(cur_annot_ids[t] not in distinct_patient_ids):\n annot_id = cur_annot_ids[t]\n distinct_patient_ids.append(cur_annot_ids[t])\n patientframenum = 1\n \n img = np.stack((cur_imgs[t], cur_imgs[t], cur_imgs[t]))\n cur_imgs_stack.append(img)\n cur_labels_stack.append(cur_labels[t])\n cur_annot_ids_stack.append(cur_annot_ids[t])\n cur_frame_num.append(patientframenum) #add index of first image in current frame stack within patient to list for order\n patientframenum += 1\n\n print(\"done stacking!\")\n #shuffle all lists with same order for TRAIN ONLY\n if (phase == 'train' or phase == 'trainval'):\n temp = list(zip(cur_imgs_stack, cur_labels_stack, cur_annot_ids_stack)) \n random.shuffle(temp) \n cur_imgs_stack, cur_labels_stack, cur_annot_ids_stack = zip(*temp)\n \n if (phase == 'train' or phase == 'trainval'):\n #for class weights (imbalance of classes)\n neg, pos = np.bincount(cur_labels_stack)\n print(\"0s\", neg, \"1s\", pos)\n total_lbls = neg + pos\n print(total_lbls == len(cur_labels_stack))\n print('Labels:\\n Total: {}\\n Positive: {} ({:.2f}% of total)\\n'.format(total_lbls, pos, 100 * pos / total_lbls))\n #https://www.tensorflow.org/tutorials/structured_data/imbalanced_data\n # Scaling by total/2 helps keep the loss to a similar magnitude.\n # The sum of the weights of all examples stays the same.\n weight_for_0 = (1 / neg)*(total_lbls)/2.0 \n weight_for_1 = (1 / pos)*(total_lbls)/2.0\n\n class_weight = [weight_for_0, weight_for_1]\n print('Weight for class 0: {:.2f}'.format(weight_for_0))\n print('Weight for class 1: {:.2f}'.format(weight_for_1))\n samples_weight = []\n samples_weight = np.array([class_weight[int(m)] for m in cur_labels_stack])\n print(\"in train phase for load_dataset: samples_weight shape =\", np.shape(samples_weight))\n f=open(project_home_dir + \"samplesweight.csv\",'w', newline ='\\n')\n count = 0\n for s in zip(samples_weight):\n count += 1\n f.write(str(s[0])+\",\")\n f.close()\n \n #done preprocessing!\n print(\"len\", phase, \"=\", len(cur_imgs))\n print(\"len\", phase, \"stacked =\", len(cur_imgs_stack))\n print(\"len\", phase, \"(stacked)\", len(cur_imgs_stack), len(cur_labels_stack), len(cur_annot_ids_stack), np.shape(cur_imgs_stack), len(cur_frame_num))\n return(cur_imgs_stack, cur_labels_stack, cur_annot_ids_stack, cur_frame_num) #return train/test imgs and labels and frame index #s\n\n\nclass DatasetThyroid3StackedNew(data.Dataset):\n def __init__(self, imgpath, maskpath, labelpath, project_home_dir, phase, cvphase, frametype, transform=None):\n \"\"\"Data loader for CNN Model.\"\"\"\n super(DatasetThyroid3StackedNew, self).__init__()\n \n h5py.File(imgpath).keys()\n colnames = ['Labels for each frame', 'Annot_IDs', 'size_A', 'size_B', 'size_C', 'location_r_l_', 'study_dttm', 'age', 'sex', 'final_diagnoses', 'ePAD ID', 'foldNum']\n imgs, largestpatinds = mobilenet_preprocess.transform_and_crop_largest(h5py.File(imgpath)['img'], h5py.File(maskpath)['img'], pd.read_csv(labelpath, names=colnames).Annot_IDs.tolist())\n\n self.phase = phase\n \n print(\"frametype\", frametype)\n if(frametype == \"singleframe\"):\n self.imgs, self.all_labels, self.all_annot_ids, self.all_frame_nums = load_datasets_single_frame(project_home_dir, labelpath, phase, cvphase, imgs, largestpatinds)\n if(frametype == \"adjacent\" or frametype == \"equalspaced\"):\n self.imgs, self.all_labels, self.all_annot_ids, self.all_frame_nums = load_datasets_new(project_home_dir, labelpath, phase, cvphase, imgs, frametype)\n print(\"done reading in images and labels for\", phase, \"!!!\\n\\n\")\n \n imgs = []\n \n self.transform = transform\n print(\"all frames:\", len(self.imgs), np.shape(self.imgs))\n\n\n \n #getitem is called 'batch_size' number of times in one iteration of the epoch\n def __getitem__(self, i):\n img_frame = self.imgs[i] #3 stacked frames (rgb) from same patient OR same image x3\n annot_id = self.all_annot_ids[i]\n frame_num = self.all_frame_nums[i]\n \n #create label for image\n label = torch.LongTensor(1)\n label[0] = int(self.all_labels[i])\n\n if(self.transform):\n #make height, width, channels instead of [3,224,224] which is channels, height, width\n img_frame = np.transpose(img_frame, (1,2,0)).astype(np.float32)\n input1 = self.transform(image=img_frame)['image']\n else:\n #doing this in albumentations transform\n input1 = torch.from_numpy(img_frame).float()\n print(\"no transform\")\n \n return {'input': input1, 'label': label, 'annot_id': annot_id, 'frame_num': frame_num}\n\n def __len__(self):\n return len(self.all_annot_ids)\n\n \n "
] |
[
[
"torch.LongTensor",
"pandas.read_csv",
"torch.from_numpy",
"numpy.stack",
"numpy.shape",
"numpy.bincount",
"numpy.transpose"
]
] |
hereagain-Y/DeepTCR
|
[
"0fa6fff45231d4caa05ef17fb7066b88ac018185"
] |
[
"ancillary_analysis/supervised/other/alpha_v_beta.py"
] |
[
"\"\"\"Figure 2D\"\"\"\n\n\"\"\"This script is used to benchmark the performance of the Supervised Sequence Classifier\nwith either the alpha chain, beta chain, or both provided to the model.\"\"\"\n\nfrom DeepTCR.DeepTCR import DeepTCR_SS\nfrom multiprocessing import Pool\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_auc_score, roc_curve\nimport os\nimport numpy as np\n\np = Pool(80)\ndir_results = 'alpha_v_beta_results'\nif not os.path.exists(dir_results):\n os.makedirs(dir_results)\n\nDTCR = DeepTCR_SS('alpha_v_beta_SS')\n\nantigens = ['ATP6AP1-KLG_G3W',\n 'GNL3L-R4C',\n 'MART1-A2L',\n 'YFV-LLW']\n\nopt = ['alpha','beta','alpha_beta']\n\nfolds = 100\ngraph_seed = 0\nseeds = np.array(range(folds))\nfor a in antigens:\n y_pred_list = []\n y_test_list = []\n for o in opt:\n if o == 'alpha':\n DTCR = DeepTCR_SS('alpha_v_beta_SS')\n DTCR.Get_Data(directory='../../Data/Zhang/'+a,aa_column_alpha=0,p=p)\n elif o == 'beta':\n DTCR = DeepTCR_SS('alpha_v_beta_SS')\n DTCR.Get_Data(directory='../../Data/Zhang/'+a,aa_column_beta=1,p=p)\n elif o == 'alpha_beta':\n DTCR = DeepTCR_SS('alpha_v_beta_SS')\n DTCR.Get_Data(directory='../../Data/Zhang/'+a,aa_column_alpha=0,aa_column_beta=1,p=p)\n\n DTCR.Monte_Carlo_CrossVal(folds=folds,weight_by_class=True,graph_seed=graph_seed,seeds=seeds)\n y_pred_list.append(DTCR.y_pred)\n y_test_list.append(DTCR.y_test)\n\n plt.figure()\n plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate',fontsize=24)\n plt.ylabel('True Positive Rate',fontsize=24)\n for ii, o in enumerate(opt, 0):\n y_test = y_test_list[ii]\n y_pred = y_pred_list[ii]\n roc_score = roc_auc_score(y_test[:, 1], y_pred[:, 1])\n fpr, tpr, _ = roc_curve(y_test[:, 1], y_pred[:, 1])\n plt.plot(fpr, tpr, lw=2, label='%s (area = %0.4f)' % (o, roc_score))\n\n plt.legend(loc=\"lower right\",fontsize=14)\n plt.title(a,fontsize=22)\n plt.xticks(fontsize=18)\n plt.yticks(fontsize=18)\n plt.tight_layout()\n plt.savefig(os.path.join(dir_results,a+ '_AUC.eps'))\n plt.close()\n\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"sklearn.metrics.roc_auc_score",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure"
]
] |
worldlife123/maskrcnn-benchmark
|
[
"6c8bc908c2b7299ca6ffb292ae2680ac354d0eec"
] |
[
"tools/train_lr_net.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nr\"\"\"\nBasic training script for PyTorch\n\"\"\"\n\n# Set up custom environment before nearly anything else is imported\n# NOTE: this should be the first import (no not reorder)\nfrom maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip\n\nimport argparse\nimport os\n\nimport torch\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.data import make_data_loader\nfrom maskrcnn_benchmark.solver import make_lr_scheduler\nfrom maskrcnn_benchmark.solver import make_optimizer\nfrom maskrcnn_benchmark.engine.inference import inference\nfrom maskrcnn_benchmark.engine.trainer_lr import do_train\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.utils.collect_env import collect_env_info\nfrom maskrcnn_benchmark.utils.comm import synchronize, get_rank\nfrom maskrcnn_benchmark.utils.imports import import_file\nfrom maskrcnn_benchmark.utils.logger import setup_logger\nfrom maskrcnn_benchmark.utils.miscellaneous import mkdir, save_config\n\n# See if we can use apex.DistributedDataParallel instead of the torch default,\n# and enable mixed-precision via apex.amp\ntry:\n from apex import amp\nexcept ImportError:\n raise ImportError('Use APEX for multi-precision via apex.amp')\n\nfrom tensorboardX import SummaryWriter\n\ndef train(cfg, local_rank, distributed):\n model = build_detection_model(cfg)\n device = torch.device(cfg.MODEL.DEVICE)\n model.to(device)\n\n optimizer = make_optimizer(cfg, model)\n scheduler = make_lr_scheduler(cfg, optimizer)\n\n # Initialize mixed-precision training\n use_mixed_precision = cfg.DTYPE == \"float16\"\n amp_opt_level = 'O1' if use_mixed_precision else 'O0'\n model, optimizer = amp.initialize(model, optimizer, opt_level=amp_opt_level)\n\n if distributed:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[local_rank], output_device=local_rank,\n # this should be removed if we update BatchNorm stats\n broadcast_buffers=False,\n )\n\n arguments = {}\n arguments[\"iteration\"] = 0\n\n output_dir = cfg.OUTPUT_DIR\n\n save_to_disk = get_rank() == 0\n checkpointer = DetectronCheckpointer(\n cfg, model, optimizer, scheduler, output_dir, save_to_disk\n )\n extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)\n arguments.update(extra_checkpoint_data)\n\n data_loader = make_data_loader(\n cfg,\n is_train=True,\n is_distributed=distributed,\n start_iter=arguments[\"iteration\"],\n )\n\n checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD\n\n tflogger = SummaryWriter(log_dir=os.path.join(output_dir, \"logs\"))\n\n do_train(\n model,\n data_loader,\n optimizer,\n scheduler,\n checkpointer,\n device,\n checkpoint_period,\n arguments,\n tflogger,\n )\n\n return model\n\n\ndef run_test(cfg, model, distributed):\n if distributed:\n model = model.module\n torch.cuda.empty_cache() # TODO check if it helps\n iou_types = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n iou_types = iou_types + (\"segm\",)\n if cfg.MODEL.KEYPOINT_ON:\n iou_types = iou_types + (\"keypoints\",)\n if cfg.MODEL.DEPTH_ON or cfg.MODEL.BOX3D_ON:\n iou_types = iou_types + (\"depth\",)\n output_folders = [None] * len(cfg.DATASETS.TEST)\n dataset_names = cfg.DATASETS.TEST\n if cfg.OUTPUT_DIR:\n for idx, dataset_name in enumerate(dataset_names):\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\", dataset_name)\n mkdir(output_folder)\n output_folders[idx] = output_folder\n data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)\n for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):\n inference(\n model,\n data_loader_val,\n dataset_name=dataset_name,\n iou_types=iou_types,\n box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,\n device=cfg.MODEL.DEVICE,\n expected_results=cfg.TEST.EXPECTED_RESULTS,\n expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n output_folder=output_folder,\n input_targets=cfg.TEST.INPUT_TARGETS,\n )\n synchronize()\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch Object Detection Training\")\n parser.add_argument(\n \"--config-file\",\n default=\"\",\n metavar=\"FILE\",\n help=\"path to config file\",\n type=str,\n )\n parser.add_argument(\"--local_rank\", type=int, default=0)\n parser.add_argument(\n \"--skip-test\",\n dest=\"skip_test\",\n help=\"Do not test the final model\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n\n torch.multiprocessing.set_sharing_strategy('file_system')\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n args.distributed = num_gpus > 1\n\n if args.distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n synchronize()\n\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n output_dir = cfg.OUTPUT_DIR\n if output_dir:\n mkdir(output_dir)\n\n logger = setup_logger(\"maskrcnn_benchmark\", output_dir, get_rank())\n logger.info(\"Using {} GPUs\".format(num_gpus))\n logger.info(args)\n\n logger.info(\"Collecting env info (might take some time)\")\n logger.info(\"\\n\" + collect_env_info())\n\n logger.info(\"Loaded configuration file {}\".format(args.config_file))\n with open(args.config_file, \"r\") as cf:\n config_str = \"\\n\" + cf.read()\n logger.info(config_str)\n logger.info(\"Running with config:\\n{}\".format(cfg))\n\n output_config_path = os.path.join(cfg.OUTPUT_DIR, 'config.yml')\n logger.info(\"Saving config into: {}\".format(output_config_path))\n # save overloaded model config in the output directory\n save_config(cfg, output_config_path)\n\n model = train(cfg, args.local_rank, args.distributed)\n\n if not args.skip_test:\n run_test(cfg, model, args.distributed)\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.distributed.init_process_group",
"torch.cuda.set_device",
"torch.cuda.empty_cache",
"torch.multiprocessing.set_sharing_strategy",
"torch.device",
"torch.nn.parallel.DistributedDataParallel"
]
] |
shaanrockz/PyZPK
|
[
"95d9f8880a56e5a3666109f653998de78fc9a466"
] |
[
"pyzpk/main.py"
] |
[
"import torch\n\ndef sum(x,y):\n \n return torch.add(x, y)"
] |
[
[
"torch.add"
]
] |
oikosohn/compound-loss-pytorch
|
[
"f53491f498434565c07761db99cea8b7079c14fe"
] |
[
"jaccard_ce_loss_smp.py"
] |
[
"import segmentation_models_pytorch as smp\nimport torch.nn as nn\n\nclass JaccardCELoss(smp.utils.base.Loss):\n def __init__(self, alpha=1.0, beta=0.5, **kwargs):\n super().__init__(**kwargs)\n self.alpha = alpha\n self.beta = beta\n\n self.jaccardloss=smp.losses.JaccardLoss(mode='multiclass')\n self.jaccardloss.__name__ = 'jaccard_loss'\n\n self.celoss = nn.CrossEntropyLoss()\n self.celoss.__name__ = 'ce_loss'\n\n def forward(self, y_pred, y_true):\n return self.alpha * self.celoss.forward(y_pred, y_true) - self.beta * self.jaccardloss.forward(y_pred, y_true)"
] |
[
[
"torch.nn.CrossEntropyLoss"
]
] |
Milad-Rakhsha/Friction-Contact
|
[
"59d17b231c5dd764c741c941e5443141d43ec7e8"
] |
[
"test_project.py"
] |
[
"# =============================================================================\n# SIMULATION-BASED ENGINEERING LAB (SBEL) - http://sbel.wisc.edu\n# University of Wisconsin-Madison\n#\n# Copyright (c) 2020 SBEL\n# All rights reserved.\n#\n# Use of this source code is governed by a BSD-style license that can be found\n# at https://opensource.org/licenses/BSD-3-Clause\n#\n# =============================================================================\n# Contributors: Nic Olsen\n# =============================================================================\n#!/usr/bin/env python3\nfrom solvers import project\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nmu = 2.0\nnb = 50\nbound = 100\npoints = 2 * bound * np.random.rand(3 * nb) - bound\nfriction = np.array([mu] * int(points.shape[0] / 3))\n\nproj = project(points, friction)\nproj2 = project(proj, friction)\nerr_norm = np.linalg.norm(proj - proj2)\nprint(\"Error norm ||P(x) - P(P(x))||\", err_norm)\n\nfig1 = plt.figure(1)\nax = fig1.add_subplot(111, projection='3d')\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_zlabel('z')\nax.scatter([points[3*i+1] for i in range(nb)],\\\n [points[3*i+2] for i in range(nb)],\\\n [points[3*i] for i in range(nb)], color='r')\n\nfig2 = plt.figure(2)\nax = fig2.add_subplot(111, projection='3d')\nax.set_xlabel('x')\nax.set_ylabel('y')\nax.set_zlabel('z')\n\nax.scatter([points[3*i+1] for i in range(nb)],\\\n [points[3*i+2] for i in range(nb)],\\\n [points[3*i] for i in range(nb)], color='r')\n\nax.scatter([proj[3*i+1] for i in range(nb)],\\\n [proj[3*i+2] for i in range(nb)],\\\n [proj[3*i] for i in range(nb)], color='b')\n\nx = y = np.linspace(-bound, bound, 100)\nX, Y = np.meshgrid(x, y)\nZ = 1.0 / mu * np.sqrt(X * X + Y * Y)\nax.plot_surface(X, Y, Z)\nplt.show()\n"
] |
[
[
"numpy.sqrt",
"numpy.meshgrid",
"numpy.linspace",
"numpy.linalg.norm",
"numpy.random.rand",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
cta-rta/rta-sci-grb
|
[
"aa7de6aded51edfad4864ba99e2f345aaf68abfe"
] |
[
"runCatVisibility.py"
] |
[
"# *******************************************************************************\n# Copyright (C) 2020 INAF\n#\n# This software is distributed under the terms of the BSD-3-Clause license\n#\n# Authors:\n# Ambra Di Piano <ambra.dipiano@inaf.it>\n# *******************************************************************************\n\nimport argparse\nimport warnings\nimport yaml\nimport os\nimport logging\nfrom os.path import join, isfile, isdir\nimport numpy as np\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.time import Time\nfrom astropy.io import fits\nfrom astropy.table import Table\nfrom lib.visibility import Visibility, complete_irf_name\nfrom astropy.coordinates import solar_system_ephemeris\n\n# parse command line inputs\nparser = argparse.ArgumentParser(description='This software runs the CTA visibility for a given configuration pass via YAML configuration file. The output is saved as NPY binary file.')\nparser.add_argument('-f', '--config', required=True, type=str, help='configuration yaml file')\n# configuration file\ncf = parser.parse_args().config\n# load params configuration from cf\nwith open(cf) as f:\n cfg = yaml.load(f, Loader=yaml.FullLoader)\n\n# set ephemeris\nif cfg['ephemeris'] == 'jpl':\n solar_system_ephemeris.set('jpl') \nelif cfg['ephemeris'] not in ('default', 'jpl'):\n raise ValueError(\"selected ephemeris not implemented, please use either default or jpl.\")\n\n# ----------------------------------------------------------------------------- catalog\nif '$' in cfg['path']['catalog']:\n catalog = os.path.expandvars(cfg['path']['catalog'])\nelse:\n catalog = cfg['path']['catalog']\nif cfg['path']['output'] == None:\n output = 'visibility_output.npy'\nelif '$' in cfg['path']['output']:\n output = os.path.expandvars(cfg['path']['output'])\nelse:\n output = cfg['path']['output']\n\nif not isdir(catalog):\n raise ValueError('Please correctly select a catalog folder') \n\nif cfg['path']['filename'] == None:\n runids = [f for f in os.listdir(catalog) if '.fits' in f and isfile(join(catalog, f))]\n if len(runids) == 0:\n raise ValueError('No valid FITS file found') \nelif type(cfg['path']['filename']) == str:\n if not isfile(join(catalog, cfg['path']['filename'])):\n raise ValueError(f'Specified template {runid} does not exist in catalog')\n runids = [cfg['path']['filename']]\nelse:\n runids = cfg['path']['filename']\n for runid in runids:\n if not isfile(join(catalog, runid)):\n raise ValueError(f'Specified template {runid} does not exist in catalog')\nrunids = sorted(runids)\n\n# -------------------------------------------------------------------------log the configuration\n\nlogname = output.replace('.npy','.log')\nlogging.basicConfig(filename=logname, filemode='w+', level=logging.DEBUG, format='%(asctime)s %(message)s')\nlogging.info('#################')\nlogging.info('# CONFIGURATION #')\nlogging.info(f'#################\\n\\n{yaml.dump(cfg)}')\nlogging.info('##############')\nlogging.info('# VISIBILITY #')\nlogging.info('##############')\n\n# ----------------------------------------------------------------------------- loop runid\n\ndata = {}\n# ignore warnings\nwith warnings.catch_warnings():\n warnings.filterwarnings('ignore')\n for runid in runids:\n logging.info('------------------------------------------------------------------ #')\n print(f'Processing {runid}')\n logging.info(f'Processing {runid}')\n logging.info('----------')\n data[f'{runid.replace(\".fits\", \"\")}'] = {}\n # load template\n with fits.open(join(catalog, runid)) as hdul:\n hdr = hdul[0].header\n # source coordinates\n source_radec = SkyCoord(ra=hdr['RA'] * u.deg, dec=hdr['DEC'] * u.deg, frame='icrs')\n # source trigger time and afterglow duration\n try:\n trigger = Time(hdr['GRBJD'] * u.day, format='jd')\n except KeyError:\n raise ValueError('This catalog cannot be processed. The headers do not contain a \"GRBJD\" trigger time keyword.')\n\n try:\n times = np.array(hdul['TIMES (AFTERGLOW)'].data.tolist())\n except KeyError:\n times = np.array(hdul['TIMES'].data.tolist())\n afterglow_duration = Time((times[-1] - times[1])[0] / 86400, format='jd')\n\n # --------------------------------------------------------------------------- loop sites\n for site in cfg['sites_list']:\n logging.info(f'{site} site')\n # initialise\n visibility = Visibility()\n # set ephemeris\n if cfg['ephemeris'] == 'jpl':\n visibility.set_jpl_ephemeris()\n # visibility points in JD and AltAz\n visibility.visibility_points(trigger, afterglow_duration, cfg['total_points'])\n visibility.visibility_altaz(source_radec, cfg['sites_list'][site])\n # find nights account for Moon (use default Moon thresholds)\n if cfg['setup']['moon_sep'] == None:\n nights = visibility.get_nighttime(twilight=cfg['setup']['twilight'])\n else: \n nights = visibility.get_nighttime_moonlight(twilight=cfg['setup']['twilight'], moon_sep=cfg['setup']['moon_sep'], fov_rad=cfg['setup']['fov_rad'], moonpha=0, max_moonpha=cfg['setup']['moon_pha'])\n #del visibility\n # within each night find IRFs\n #print(nights)\n if nights['start'][0] < 0:\n logging.info('................Not visible either to moonlight or daylight conditions')\n #del nights, irfs, site_coords\n \n logging.info('Observability windows:') \n data[f'{runid.replace(\".fits\", \"\")}'][f'{site}'] = {}\n for i in range(len(nights['start'])):\n #print('Night', i+1, 'of', len(nights['start']))\n logging.info(f'................Night {i+1} of {len(nights[\"start\"])} in [{nights[\"start\"][i]}, {nights[\"stop\"][i]}]')\n data[f'{runid.replace(\".fits\", \"\")}'][f'{site}'][f'night{i+1:02d}'] = {'start': nights[\"start\"][i], 'stop': nights[\"stop\"][i]}\n #print(nights['start'][i], nights['stop'][i])\n t_start = Time(nights['start'][i], format='jd')\n night_duration = Time(nights['stop'][i] - nights['start'][i], format='jd')\n # initialise\n visibility = Visibility()\n # set ephemeris\n if cfg['ephemeris'] == 'jpl':\n visibility.set_jpl_ephemeris()\n # visibility points in JD and AltAz\n visibility.visibility_points(t_start, night_duration, cfg['window_points'])\n visibility.visibility_altaz(source_radec, cfg['sites_list'][site])\n # IRFs and relative time intervals\n irfs = visibility.associate_irf_zenith_angle(cfg['setup']['thresholds'], cfg['setup']['zenith_angles'])\n if irfs['start'][0] < 0:\n logging.info('................Not visible due to low altitude')\n data[f'{runid.replace(\".fits\", \"\")}'][f'{site}'][f'night{i+1:02d}']['irfs'] = irfs\n else:\n logging.info('................Altitude intervals:')\n for n in range(len(irfs['zref'])):\n logging.info(f'................Zenith Ref. {irfs[\"zref\"][n]} in [{irfs[\"start\"][n]}, {irfs[\"stop\"][n]}]')\n data[f'{runid.replace(\".fits\", \"\")}'][f'{site}'][f'night{i+1:02d}']['irfs'] = irfs\n #del visibility\n #print(irfs)\n #print(irfs['start'][0], irfs['stop'][-1])\n #del nights, irfs, site_coords, night_duration\n #del afterglow_duration\nnp.save(output, data)\n\n\nprint(\"\\n\\nExit\\n\\n\")\n\n#os.system(f\"cat {logname}\")"
] |
[
[
"numpy.save"
]
] |
yanglinGEM/ReDet
|
[
"80cb7b0643c57aa2641ad6a1fc5eada4e575fcfe"
] |
[
"mmdet/models/necks/re_fpn.py"
] |
[
"import e2cnn.nn as enn\nimport math\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport warnings\nfrom e2cnn import gspaces\nfrom mmcv.cnn import constant_init, kaiming_init, xavier_init\n\nfrom ..registry import NECKS\n\n# Set default Orientation=8, .i.e, the group C8\n# One can change it by passing the env Orientation=xx\nOrientation = 8\n# keep similar computation or similar params\n# One can change it by passing the env fixparams=True\nfixparams = False\nif 'Orientation' in os.environ:\n Orientation = int(os.environ['Orientation'])\nif 'fixparams' in os.environ:\n fixparams = True\n\ngspace = gspaces.Rot2dOnR2(N=Orientation)\n\n\ndef regular_feature_type(gspace: gspaces.GSpace, planes: int):\n \"\"\" build a regular feature map with the specified number of channels\"\"\"\n assert gspace.fibergroup.order() > 0\n\n N = gspace.fibergroup.order()\n if fixparams:\n planes *= math.sqrt(N)\n planes = planes / N\n planes = int(planes)\n return enn.FieldType(gspace, [gspace.regular_repr] * planes)\n\n\ndef trivial_feature_type(gspace: gspaces.GSpace, planes: int):\n \"\"\" build a trivial feature map with the specified number of channels\"\"\"\n\n if fixparams:\n planes *= math.sqrt(gspace.fibergroup.order())\n\n planes = int(planes)\n return enn.FieldType(gspace, [gspace.trivial_repr] * planes)\n\n\nFIELD_TYPE = {\n \"trivial\": trivial_feature_type,\n \"regular\": regular_feature_type,\n}\n\n\ndef convnxn(inplanes, outplanes, kernel_size=3, stride=1, padding=0, groups=1, bias=False, dilation=1):\n in_type = FIELD_TYPE['regular'](gspace, inplanes)\n out_type = FIELD_TYPE['regular'](gspace, outplanes)\n return enn.R2Conv(in_type, out_type, kernel_size,\n stride=stride,\n padding=padding,\n groups=groups,\n bias=bias,\n dilation=dilation,\n sigma=None,\n frequencies_cutoff=lambda r: 3 * r, )\n\n\ndef ennReLU(inplanes, inplace=True):\n in_type = FIELD_TYPE['regular'](gspace, inplanes)\n return enn.ReLU(in_type, inplace=inplace)\n\n\ndef ennInterpolate(inplanes, scale_factor, mode='nearest', align_corners=False):\n in_type = FIELD_TYPE['regular'](gspace, inplanes)\n return enn.R2Upsampling(in_type, scale_factor, mode=mode, align_corners=align_corners)\n\n\ndef ennMaxPool(inplanes, kernel_size, stride=1, padding=0):\n in_type = FIELD_TYPE['regular'](gspace, inplanes)\n return enn.PointwiseMaxPool(in_type, kernel_size=kernel_size, stride=stride, padding=padding)\n\n\ndef build_conv_layer(cfg, *args, **kwargs):\n layer = convnxn(*args, **kwargs)\n return layer\n\n\ndef build_norm_layer(cfg, num_features, postfix=''):\n in_type = FIELD_TYPE['regular'](gspace, num_features)\n return 'bn' + str(postfix), enn.InnerBatchNorm(in_type)\n\n\nclass ConvModule(enn.EquivariantModule):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n bias='auto',\n conv_cfg=None,\n norm_cfg=None,\n activation='relu',\n inplace=True,\n order=('conv', 'norm', 'act')):\n super(ConvModule, self).__init__()\n assert conv_cfg is None or isinstance(conv_cfg, dict)\n assert norm_cfg is None or isinstance(norm_cfg, dict)\n self.in_type = enn.FieldType(gspace, [gspace.regular_repr] * in_channels)\n self.out_type = enn.FieldType(gspace, [gspace.regular_repr] * out_channels)\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.activation = activation\n self.inplace = inplace\n self.order = order\n assert isinstance(self.order, tuple) and len(self.order) == 3\n assert set(order) == set(['conv', 'norm', 'act'])\n\n self.with_norm = norm_cfg is not None\n self.with_activatation = activation is not None\n # if the conv layer is before a norm layer, bias is unnecessary.\n if bias == 'auto':\n bias = False if self.with_norm else True\n self.with_bias = bias\n\n if self.with_norm and self.with_bias:\n warnings.warn('ConvModule has norm and bias at the same time')\n # build convolution layer\n self.conv = build_conv_layer(\n conv_cfg,\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias)\n # export the attributes of self.conv to a higher level for convenience\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.dilation = dilation\n self.transposed = False\n self.output_padding = padding\n self.groups = groups\n\n # build normalization layers\n if self.with_norm:\n # norm layer is after conv layer\n if order.index('norm') > order.index('conv'):\n norm_channels = out_channels\n else:\n norm_channels = in_channels\n if conv_cfg != None and conv_cfg['type'] == 'ORConv':\n norm_channels = int(norm_channels * 8)\n self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)\n self.add_module(self.norm_name, norm)\n\n # build activation layer\n if self.with_activatation:\n # TODO: introduce `act_cfg` and supports more activation layers\n if self.activation not in ['relu']:\n raise ValueError('{} is currently not supported.'.format(\n self.activation))\n if self.activation == 'relu':\n self.activate = ennReLU(out_channels, inplace=self.inplace)\n\n # Use msra init by default\n self.init_weights()\n\n @property\n def norm(self):\n return getattr(self, self.norm_name)\n\n def init_weights(self):\n nonlinearity = 'relu' if self.activation is None else self.activation\n # kaiming_init(self.conv, nonlinearity=nonlinearity)\n # if self.with_norm:\n # constant_init(self.norm, 1, bias=0)\n\n def forward(self, x, activate=True, norm=True):\n for layer in self.order:\n if layer == 'conv':\n x = self.conv(x)\n elif layer == 'norm' and norm and self.with_norm:\n x = self.norm(x)\n elif layer == 'act' and activate and self.with_activatation:\n x = self.activate(x)\n return x\n\n def evaluate_output_shape(self, input_shape):\n return input_shape\n\n\n@NECKS.register_module\nclass ReFPN(nn.Module):\n\n def __init__(self,\n in_channels,\n out_channels,\n num_outs,\n start_level=0,\n end_level=-1,\n add_extra_convs=False,\n extra_convs_on_inputs=True,\n relu_before_extra_convs=False,\n no_norm_on_lateral=False,\n conv_cfg=None,\n norm_cfg=None,\n activation=None):\n super(ReFPN, self).__init__()\n assert isinstance(in_channels, list)\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.num_ins = len(in_channels)\n self.num_outs = num_outs\n self.activation = activation\n self.relu_before_extra_convs = relu_before_extra_convs\n self.no_norm_on_lateral = no_norm_on_lateral\n self.fp16_enabled = False\n if end_level == -1:\n self.backbone_end_level = self.num_ins\n assert num_outs >= self.num_ins - start_level\n else:\n # if end_level < inputs, no extra level is allowed\n self.backbone_end_level = end_level\n assert end_level <= len(in_channels)\n assert num_outs == end_level - start_level\n self.start_level = start_level\n self.end_level = end_level\n self.add_extra_convs = add_extra_convs\n self.extra_convs_on_inputs = extra_convs_on_inputs\n\n self.lateral_convs = nn.ModuleList()\n self.up_samples = nn.ModuleList()\n self.fpn_convs = nn.ModuleList()\n\n for i in range(self.start_level, self.backbone_end_level):\n l_conv = ConvModule(\n in_channels[i],\n out_channels,\n 1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,\n activation=self.activation,\n inplace=False)\n up_sample = ennInterpolate(out_channels, 2)\n fpn_conv = ConvModule(\n out_channels,\n out_channels,\n 3,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n activation=self.activation,\n inplace=False)\n\n self.lateral_convs.append(l_conv)\n self.up_samples.append(up_sample)\n self.fpn_convs.append(fpn_conv)\n\n # add extra conv layers (e.g., RetinaNet)\n extra_levels = num_outs - self.backbone_end_level + self.start_level\n if add_extra_convs and extra_levels >= 1:\n for i in range(extra_levels):\n if i == 0 and self.extra_convs_on_inputs:\n in_channels = self.in_channels[self.backbone_end_level - 1]\n else:\n in_channels = out_channels\n extra_fpn_conv = ConvModule(\n in_channels,\n out_channels,\n 3,\n stride=2,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n activation=self.activation,\n inplace=False)\n self.fpn_convs.append(extra_fpn_conv)\n\n self.max_pools = nn.ModuleList()\n self.relus = nn.ModuleList()\n\n used_backbone_levels = len(self.lateral_convs)\n if self.num_outs > used_backbone_levels:\n # use max pool to get more levels on top of outputs\n # (e.g., Faster R-CNN, Mask R-CNN)\n if not self.add_extra_convs:\n for i in range(self.num_outs - used_backbone_levels):\n self.max_pools.append(ennMaxPool(out_channels, 1, stride=2))\n # add conv layers on top of original feature maps (RetinaNet)\n else:\n for i in range(used_backbone_levels + 1, self.num_outs):\n self.relus.append(ennReLU(out_channels))\n\n # default init_weights for conv(msra) and norm in ConvModule\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n\n def forward(self, inputs):\n assert len(inputs) == len(self.in_channels)\n\n # build laterals\n laterals = [\n lateral_conv(inputs[i + self.start_level])\n for i, lateral_conv in enumerate(self.lateral_convs)\n ]\n\n # build top-down path\n used_backbone_levels = len(laterals)\n for i in range(used_backbone_levels - 1, 0, -1):\n # laterals[i - 1] += F.interpolate(\n # laterals[i], scale_factor=2, mode='nearest')\n laterals[i - 1] += self.up_samples[i](laterals[i])\n\n # build outputs\n # part 1: from original levels\n outs = [\n self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)\n ]\n # part 2: add extra levels\n if self.num_outs > len(outs):\n # use max pool to get more levels on top of outputs\n # (e.g., Faster R-CNN, Mask R-CNN)\n if not self.add_extra_convs:\n for i in range(self.num_outs - used_backbone_levels):\n outs.append(self.max_pools[i](outs[-1]))\n # add conv layers on top of original feature maps (RetinaNet)\n else:\n if self.extra_convs_on_inputs:\n orig = inputs[self.backbone_end_level - 1]\n outs.append(self.fpn_convs[used_backbone_levels](orig))\n else:\n outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))\n for i in range(used_backbone_levels + 1, self.num_outs):\n if self.relu_before_extra_convs:\n outs.append(self.fpn_convs[i](self.relus[i](outs[-1])))\n else:\n outs.append(self.fpn_convs[i](outs[-1]))\n\n # convert to tensor\n outs = [out.tensor for out in outs]\n\n return tuple(outs)\n"
] |
[
[
"torch.nn.ModuleList"
]
] |
fossabot/mljar-supervised
|
[
"b77b945b093ea25edef41bbb0f58acd63b269bbe"
] |
[
"supervised/utils/shap.py"
] |
[
"import os\nimport logging\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import OneHotEncoder\n\nfrom supervised.algorithms.registry import (\n BINARY_CLASSIFICATION,\n MULTICLASS_CLASSIFICATION,\n REGRESSION,\n)\nimport shap\n\n\nlogger = logging.getLogger(__name__)\nfrom supervised.utils.config import LOG_LEVEL\n\nlogger.setLevel(LOG_LEVEL)\nimport warnings\n\n\nclass PlotSHAP:\n @staticmethod\n def is_available(algorithm, X_train, y_train, ml_task):\n # disable CatBoost for now\n # see https://github.com/catboost/catboost/issues/1216\n if algorithm.algorithm_short_name in [\"Baseline\", \"CatBoost\"]:\n return False\n if (\n algorithm.algorithm_short_name == \"Xgboost\"\n and algorithm.learner_params[\"booster\"] == \"gblinear\"\n ):\n # Xgboost gblinear is not supported by SHAP\n return False\n # disable for large number of columns\n if X_train.shape[1] > 500:\n warnings.warn(\n \"Disable SHAP explanations because of number of columns > 500.\"\n )\n return False\n if ml_task == MULTICLASS_CLASSIFICATION and len(np.unique(y_train)) > 100:\n warnings.warn(\n \"Disable SHAP explanations because of large number of classes (> 100).\"\n )\n return False\n if X_train.shape[0] < 20:\n warnings.warn(\n \"Disable SHAP explanations because of small number of samples (< 20).\"\n )\n return False\n return True\n\n @staticmethod\n def get_explainer(algorithm, X_train):\n\n explainer = None\n if algorithm.algorithm_short_name in [\n \"Xgboost\",\n \"Decision Tree\",\n \"Random Forest\",\n \"LightGBM\",\n \"Extra Trees\",\n \"CatBoost\",\n ]:\n explainer = shap.TreeExplainer(algorithm.model)\n elif algorithm.algorithm_short_name in [\"Linear\"]:\n explainer = shap.LinearExplainer(algorithm.model, X_train)\n elif algorithm.algorithm_short_name in [\"Neural Network\"]:\n explainer = shap.KernelExplainer(algorithm.model, X_train)\n\n return explainer\n\n @staticmethod\n def get_sample(X_validation, y_validation):\n # too many samples in the data, down-sample it\n SAMPLES_LIMIT = 1000\n if X_validation.shape[0] > SAMPLES_LIMIT:\n sample = y_validation.index.tolist()\n np.random.shuffle(sample)\n sample = sample[:SAMPLES_LIMIT]\n X_vald = X_validation.iloc[sample]\n y_vald = y_validation.iloc[sample]\n else:\n X_vald = X_validation\n y_vald = y_validation\n return X_vald, y_vald\n\n def get_predictions(algorithm, X_vald, y_vald, ml_task):\n # compute predictions on down-sampled data\n predictions = algorithm.predict(X_vald)\n\n if ml_task == MULTICLASS_CLASSIFICATION:\n oh = OneHotEncoder(sparse=False)\n y_encoded = oh.fit_transform(np.array(y_vald).reshape(-1, 1))\n residua = np.sum(np.abs(np.array(y_encoded) - predictions), axis=1)\n else:\n residua = np.abs(np.array(y_vald) - predictions)\n\n df_preds = pd.DataFrame(\n {\"res\": residua, \"lp\": range(residua.shape[0]), \"target\": np.array(y_vald)},\n index=X_vald.index,\n )\n df_preds = df_preds.sort_values(by=\"res\", ascending=False)\n\n return df_preds\n\n @staticmethod\n def summary(shap_values, X_vald, model_file_path, learner_name, class_names):\n fig = plt.gcf()\n classes = None\n if class_names is not None and len(class_names):\n classes = class_names\n\n shap.summary_plot(\n shap_values, X_vald, plot_type=\"bar\", show=False, class_names=classes\n )\n fig.tight_layout(pad=2.0)\n fig.savefig(os.path.join(model_file_path, f\"{learner_name}_shap_summary.png\"))\n plt.close(\"all\")\n\n vals = None\n if isinstance(shap_values, list):\n for sh in shap_values:\n v = np.abs(sh).mean(0)\n vals = v if vals is None else vals + v\n else:\n vals = np.abs(shap_values).mean(0)\n feature_importance = pd.DataFrame(\n list(zip(X_vald.columns, vals)), columns=[\"feature\", \"shap_importance\"]\n )\n feature_importance.sort_values(\n by=[\"shap_importance\"], ascending=False, inplace=True\n )\n feature_importance.to_csv(\n os.path.join(model_file_path, f\"{learner_name}_shap_importance.csv\"),\n index=False,\n )\n\n @staticmethod\n def dependence(shap_values, X_vald, model_file_path, learner_name, file_postfix=\"\"):\n fig = plt.figure(figsize=(14, 7))\n plots_cnt = np.min([9, X_vald.shape[1]])\n cols_cnt = 3\n rows_cnt = 3\n if plots_cnt < 4:\n rows_cnt = 1\n elif plots_cnt < 7:\n rows_cnt = 2\n for i in range(plots_cnt):\n ax = fig.add_subplot(rows_cnt, cols_cnt, i + 1)\n shap.dependence_plot(\n f\"rank({i})\",\n shap_values,\n X_vald,\n show=False,\n title=f\"Importance #{i+1}\",\n ax=ax,\n )\n\n fig.tight_layout(pad=2.0)\n fig.savefig(\n os.path.join(\n model_file_path, f\"{learner_name}_shap_dependence{file_postfix}.png\"\n )\n )\n plt.close(\"all\")\n\n @staticmethod\n def compute(\n algorithm,\n X_train,\n y_train,\n X_validation,\n y_validation,\n model_file_path,\n learner_name,\n class_names,\n ml_task,\n ):\n if not PlotSHAP.is_available(algorithm, X_train, y_train, ml_task):\n return\n try:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n explainer = PlotSHAP.get_explainer(algorithm, X_train)\n X_vald, y_vald = PlotSHAP.get_sample(X_validation, y_validation)\n shap_values = explainer.shap_values(X_vald)\n\n # fix problem with 1 or 2 dimensions for binary classification\n expected_value = explainer.expected_value\n if ml_task == BINARY_CLASSIFICATION and isinstance(shap_values, list):\n shap_values = shap_values[1]\n expected_value = explainer.expected_value[1]\n\n # Summary SHAP plot\n PlotSHAP.summary(\n shap_values, X_vald, model_file_path, learner_name, class_names\n )\n # Dependence SHAP plots\n if ml_task == MULTICLASS_CLASSIFICATION:\n for t in np.unique(y_vald):\n PlotSHAP.dependence(\n shap_values[t],\n X_vald,\n model_file_path,\n learner_name,\n f\"_class_{class_names[t]}\",\n )\n else:\n PlotSHAP.dependence(shap_values, X_vald, model_file_path, learner_name)\n\n # Decision SHAP plots\n df_preds = PlotSHAP.get_predictions(algorithm, X_vald, y_vald, ml_task)\n\n if ml_task == REGRESSION:\n PlotSHAP.decisions_regression(\n df_preds,\n shap_values,\n expected_value,\n X_vald,\n y_vald,\n model_file_path,\n learner_name,\n )\n elif ml_task == BINARY_CLASSIFICATION:\n PlotSHAP.decisions_binary(\n df_preds,\n shap_values,\n expected_value,\n X_vald,\n y_vald,\n model_file_path,\n learner_name,\n )\n else:\n PlotSHAP.decisions_multiclass(\n df_preds,\n shap_values,\n expected_value,\n X_vald,\n y_vald,\n model_file_path,\n learner_name,\n class_names,\n )\n except Exception as e:\n logger.info(\n f\"Exception while producing SHAP explanations. {str(e)}\\nContinuing ...\"\n )\n\n @staticmethod\n def decisions_regression(\n df_preds,\n shap_values,\n expected_value,\n X_vald,\n y_vald,\n model_file_path,\n learner_name,\n ):\n fig = plt.gcf()\n shap.decision_plot(\n expected_value,\n shap_values[df_preds.lp[:10], :],\n X_vald.loc[df_preds.index[:10]],\n show=False,\n )\n fig.tight_layout(pad=2.0)\n fig.savefig(\n os.path.join(model_file_path, f\"{learner_name}_shap_worst_decisions.png\")\n )\n plt.close(\"all\")\n\n fig = plt.gcf()\n shap.decision_plot(\n expected_value,\n shap_values[df_preds.lp[-10:], :],\n X_vald.loc[df_preds.index[-10:]],\n show=False,\n )\n fig.tight_layout(pad=2.0)\n fig.savefig(\n os.path.join(model_file_path, f\"{learner_name}_shap_best_decisions.png\")\n )\n plt.close(\"all\")\n\n @staticmethod\n def decisions_binary(\n df_preds,\n shap_values,\n expected_value,\n X_vald,\n y_vald,\n model_file_path,\n learner_name,\n ):\n # classes are from 0 ...\n for t in np.unique(y_vald):\n fig = plt.gcf()\n shap.decision_plot(\n expected_value,\n shap_values[df_preds[df_preds.target == t].lp[:10], :],\n X_vald.loc[df_preds[df_preds.target == t].index[:10]],\n show=False,\n )\n fig.tight_layout(pad=2.0)\n fig.savefig(\n os.path.join(\n model_file_path,\n f\"{learner_name}_shap_class_{t}_worst_decisions.png\",\n )\n )\n plt.close(\"all\")\n\n fig = plt.gcf()\n shap.decision_plot(\n expected_value,\n shap_values[df_preds[df_preds.target == t].lp[-10:], :],\n X_vald.loc[df_preds[df_preds.target == t].index[-10:]],\n show=False,\n )\n fig.tight_layout(pad=2.0)\n fig.savefig(\n os.path.join(\n model_file_path, f\"{learner_name}_shap_class_{t}_best_decisions.png\"\n )\n )\n plt.close(\"all\")\n\n @staticmethod\n def decisions_multiclass(\n df_preds,\n shap_values,\n expected_value,\n X_vald,\n y_vald,\n model_file_path,\n learner_name,\n class_names,\n ):\n\n for decision_type in [\"worst\", \"best\"]:\n m = 1 if decision_type == \"worst\" else -1\n for i in range(4):\n\n fig = plt.gcf()\n shap.multioutput_decision_plot(\n list(expected_value),\n shap_values,\n row_index=df_preds.lp.iloc[m * i],\n show=False,\n legend_labels=class_names,\n title=f\"It should be {class_names[df_preds.target.iloc[m*i]]}\",\n )\n fig.tight_layout(pad=2.0)\n fig.savefig(\n os.path.join(\n model_file_path,\n f\"{learner_name}_sample_{i}_{decision_type}_decisions.png\",\n )\n )\n plt.close(\"all\")\n"
] |
[
[
"numpy.abs",
"numpy.min",
"numpy.unique",
"sklearn.preprocessing.OneHotEncoder",
"numpy.random.shuffle",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure"
]
] |
dbcooney/Social-Dilemmas-of-Sociality-due-to-Beneficial-and-Costly-Contagion
|
[
"934e041d12170af97f297b3da455ee8557f516a3"
] |
[
"Scripts/socialoptimumfigure.py"
] |
[
"\"\"\"\nCode used to generate Figure 1, which illustrates the Cobb-Douglas utility function, the\nendemic equilibriua for the good and bad contagion, and the population utility ahieved at \nequilibrium and the result socially-optimal sociality strategy. \n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.font_manager\n\n#from matplotlib import rc\n#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})\n## for Palatino and other serif fonts use:\n#rc('font',**{'family':'serif','serif':['Palatino']})\n#rc('text', usetex=True)\n\nplt.rcParams['text.usetex'] = True \nplt.rcParams['text.latex.preamble'] = [r'\\usepackage[cm]{sfmath}']\nplt.rcParams['font.family'] = 'sans-serif'\nplt.rcParams['font.sans-serif'] = 'cm'\n\nalpha1 = 0.25 \nalpha2 = 0.5 \nalpha3 = 0.75 \n\nc1 = 0.5\nc2 = 1.\nc3 = 2.\n\n\"\"\"\nDefining expressions for the Cobb-Douglas utility, as well as the equilibrium fraction\nof individuals infected by the good contagion and susceptible to the bad contagions. \n\"\"\"\n\ndef Utility(Ig,Sb,alpha):\n\treturn (Ig ** alpha) * (Sb ** (1.0 - alpha))\n\t\ndef Equilibrium_Ig(Rr):\n\treturn 1. - (1. / Rr)\n\t\ndef Equilibrium_Sb(Rr,c):\n\treturn min(1.,1. / (c * Rr))\n\t\ndef Utility_eq(Rr,c,alpha):\n\treturn Utility(Equilibrium_Ig(Rr),Equilibrium_Sb(Rr,c),alpha)\n\t\nEquilibrium_Sb_vec = np.vectorize(Equilibrium_Sb)\nUtility_eq_vec = np.vectorize(Utility_eq)\n\nvalue_step = 0.01\nvalue_array = np.arange(0.0,1.0 + value_step,value_step)\nvalue_list = [value for value in value_array]\nvalue_length = len(value_list)\n\n\nR_range = np.arange(1.,5.,value_step)\n\n\n\"\"\"\nCalculating Cobb-Douglas utility for range of levels of the good and bad contagion.\n\"\"\"\n\nutility_matrix1 = np.zeros([value_length,value_length])\nutility_matrix2 = np.zeros([value_length,value_length])\nutility_matrix3 = np.zeros([value_length,value_length])\n\nfor Sb in value_list:\n\tfor Ig in value_list:\n\t\tutility_matrix1[value_list.index(Sb),value_list.index(Ig)] = Utility(Ig,Sb,alpha1)\n\t\tutility_matrix2[value_list.index(Sb),value_list.index(Ig)] = Utility(Ig,Sb,alpha2)\n\t\tutility_matrix3[value_list.index(Sb),value_list.index(Ig)] = Utility(Ig,Sb,alpha3)\n\n\n\"\"\"\nGenerating heatmap of Cobb-Douglas utility as a function of Ig and Sb.\n\"\"\"\n\nd = np.linspace(0.,1.,200)\nIg,Sb = np.meshgrid(d,d)\nz1 = Utility(Ig,Sb,alpha1)\nz2 = Utility(Ig,Sb,alpha2)\nz3 = Utility(Ig,Sb,alpha3)\n\ncmap2 = matplotlib.colors.ListedColormap([\"k\",\"k\"])\n\nfig,axs = plt.subplots(3,3, sharey = 'row', figsize = (12,12))\n\n#im1 = axs[0,0].imshow(np.flipud(utility_matrix1), \"viridis\")\nim1 = axs[0,0].contourf(Ig,Sb,z1,levels = 100, cmap = \"viridis\")\nim1a = axs[0,0].contour(Ig,Sb,z1,[0.2,0.5,0.8],cmap = cmap2, linewidths = 3., linestyles = 'dashed')\nmanual_locations = [(0.8,0.2), (0.825,0.5), (0.85,0.8)]\naxs[0,0].clabel(im1a,inline=1, fontsize=16,manual = manual_locations, fmt='%1.1f')\nprint(z1)\n#im4 = axs[1, 1].imshow(Z, interpolation='nearest')\nfig.colorbar(im1, ax=axs[0, 0],fraction=0.056, pad=0.04)\n\n#ax.colorbar(ax,fraction=0.04, pad=0.04)\n\nstate_labels = [0.0,0.2,0.4,0.6,0.8,1.0]\nr_state_labels = [1.0,0.8,0.6,0.4,0.2,0.0]\n\naxs[0,0].set_xlabel(r\"Probability Informed $\\overline{I}^g$\", fontsize = 16.)\naxs[0,0].set_ylabel(r\"Probability Healthy $\\overline{S}^b$\", fontsize = 16.)\n\n\n\n\n\n#im2 = axs[0,1].imshow(np.flipud(utility_matrix2), \"viridis\")\nim2 = axs[0,1].contourf(Ig,Sb,z2,levels = 100, cmap = \"viridis\")\n#im4 = axs[1, 1].imshow(Z, interpolation='nearest')\nim2a = axs[0,1].contour(Ig,Sb,z2,[0.2,0.5,0.8],cmap = cmap2, linewidths = 3., linestyles = 'dashed')\nmanual_locations = [(0.175,0.2), (0.5,0.5), (0.75,0.75)]\naxs[0,1].clabel(im2a,inline=1, fontsize=16, manual = manual_locations, fmt='%1.1f')\nfig.colorbar(im2, ax=axs[0, 1],fraction=0.056, pad=0.04)\n\n\n\naxs[0,1].set_xlabel(r\"Probability Informed $\\overline{I}^g$\", fontsize = 16.)#\n\nim3 = axs[0,2].contourf(Ig,Sb,z3,levels = 100, cmap = \"viridis\")\nim3a = axs[0,2].contour(Ig,Sb,z3,[0.2,0.5,0.8],cmap = cmap2, linewidths = 3., linestyles = 'dashed')\nmanual_locations = [(0.15,0.4), (0.5,0.5), (0.9,0.6)]\naxs[0,2].clabel(im3a,inline=1, fontsize=16,manual = manual_locations, fmt='%1.1f')\n\nfig.colorbar(im3, ax=axs[0, 2],fraction=0.056, pad=0.04)\n\n\n\n\"\"\"\nPlotting the endemic equilibria for the good and bad contagion for several cases of the \nrelative infectiousness of the good and bad contagion (as characterized by the parameter\n$c$).\n\"\"\"\n\naxs[0,2].set_xlabel(r\"Probability Informed $\\overline{I}^g$\", fontsize = 16.)#\n\naxs[1,0].plot(R_range,Equilibrium_Ig(R_range), lw = 3., color = 'b', label = r\"$\\overline{I}^g$\")\naxs[1,0].plot(R_range,Equilibrium_Sb_vec(R_range,c1), lw = 3., color = 'g', label = r\"$\\overline{S}^b$\")\n\naxs[1,1].plot(R_range,Equilibrium_Ig(R_range), lw = 3., color = 'b')\naxs[1,1].plot(R_range,Equilibrium_Sb_vec(R_range,c2), lw = 3., color = 'g')\n\naxs[1,2].plot(R_range,Equilibrium_Ig(R_range), lw = 3., color = 'b')\naxs[1,2].plot(R_range,Equilibrium_Sb_vec(R_range,c3), lw = 3., color = 'g')\n\naxs[1,0].legend(loc = \"lower center\")\n\naxs[1,0].set_ylabel(r\"Equilibrium States $\\overline{I}^g$, $\\overline{S}^b$\", fontsize = 16.)\n\n\n\"\"\"\nPlots of the Cobb-Douglas utility achieved at the endemic equilibrium for a several\ncases of the relative infectiousness parameter $c$ and relative weight $\\alpha$ placed\non the good and bad contagion in the Cobb-Douglas formula. \n\"\"\"\n\naxs[2,0].plot(R_range,Utility_eq_vec(R_range,c1,alpha1), lw = 3., color = 'b')\naxs[2,1].plot(R_range,Utility_eq_vec(R_range,c2,alpha2), lw = 3., color = 'b')\naxs[2,2].plot(R_range,Utility_eq_vec(R_range,c3,alpha3), lw = 3., color = 'b')\n\naxs[2,0].set_ylabel(r\"Utility $U(\\overline{I}^g,\\overline{S}^b)$\", fontsize = 16.)\naxs[2,1].set_xlabel(r\"Good Contagion Reproduction Number $\\mathcal{R}^g$\", fontsize = 16.)\n\naxs[2,0].axis([1,5,0.,1.])\naxs[2,1].axis([1,5,0.,1.])\naxs[2,2].axis([1,5,0.,1.])\n\n\naxs[0,0].set_title(r\"$\\alpha = 0.25$, $c = 0.5$\", fontsize =16.)\naxs[0,1].set_title(r\"$\\alpha = 0.5$, $c = 1$\", fontsize =16.)\naxs[0,2].set_title(r\"$\\alpha = 0.75$, $c = 2$\", fontsize =16.)\n\npanel_label_x = 1.\npanel_label_y = 1.075\naxs[0,0].text(panel_label_x, panel_label_y, 'a', transform=axs[0,0].transAxes,\n fontsize=16, fontweight='bold', va='top', ha='right')\naxs[0,1].text(panel_label_x, panel_label_y, 'b', transform=axs[0,1].transAxes,\n fontsize=16, fontweight='bold', va='top', ha='right')\naxs[0,2].text(panel_label_x, panel_label_y, 'c', transform=axs[0,2].transAxes,\n fontsize=16, fontweight='bold', va='top', ha='right')\naxs[1,0].text(panel_label_x, panel_label_y, 'd', transform=axs[1,0].transAxes,\n fontsize=16, fontweight='bold', va='top', ha='right')\naxs[1,1].text(panel_label_x, panel_label_y, 'e', transform=axs[1,1].transAxes,\n fontsize=16, fontweight='bold', va='top', ha='right')\naxs[1,2].text(panel_label_x, panel_label_y, 'f', transform=axs[1,2].transAxes,\n fontsize=16, fontweight='bold', va='top', ha='right')\naxs[2,0].text(panel_label_x, panel_label_y, 'g', transform=axs[2,0].transAxes,\n fontsize=16, fontweight='bold', va='top', ha='right')\naxs[2,1].text(panel_label_x, panel_label_y, 'h', transform=axs[2,1].transAxes,\n fontsize=16, fontweight='bold', va='top', ha='right')\naxs[2,2].text(panel_label_x, panel_label_y, 'i', transform=axs[2,2].transAxes,\n fontsize=16, fontweight='bold', va='top', ha='right')\n\nplt.tight_layout()\nplt.savefig(\"socialoptimumfigure.png\")\n\nplt.figure(2)\n\nplt.contourf(Ig,Sb,z1,levels = 100, cmap = \"viridis\")\nplt.contour(Ig,Sb,z1,[0.8],cmap = cmap2)\n\n\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.contourf",
"numpy.linspace",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.vectorize",
"matplotlib.pyplot.contour",
"numpy.meshgrid",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
jimgoo/zipline
|
[
"67e3355c67e34ccad000d411a834262841a2c232"
] |
[
"zipline/data/us_equity_pricing.py"
] |
[
"# Copyright 2015 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom abc import ABCMeta, abstractmethod, abstractproperty\nfrom errno import ENOENT\nfrom functools import partial\nfrom os import remove\nfrom os.path import exists\nimport sqlite3\nimport warnings\n\nfrom bcolz import (\n carray,\n ctable,\n)\nfrom collections import namedtuple\nimport logbook\nimport numpy as np\nfrom numpy import (\n array,\n int64,\n float64,\n full,\n iinfo,\n integer,\n issubdtype,\n nan,\n uint32,\n zeros,\n)\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n read_csv,\n Timestamp,\n NaT,\n isnull,\n)\nfrom pandas.tslib import iNaT\nfrom six import (\n iteritems,\n with_metaclass,\n viewkeys,\n)\n\nfrom zipline.utils.functional import apply\nfrom zipline.utils.preprocess import call\nfrom zipline.utils.input_validation import (\n coerce_string,\n preprocess,\n expect_element,\n verify_indices_all_unique,\n)\nfrom zipline.utils.sqlite_utils import group_into_chunks\nfrom zipline.utils.memoize import lazyval\nfrom zipline.utils.cli import maybe_show_progress\nfrom ._equities import _compute_row_slices, _read_bcolz_data\nfrom ._adjustments import load_adjustments_from_sqlite\n\n\nlogger = logbook.Logger('UsEquityPricing')\n\nOHLC = frozenset(['open', 'high', 'low', 'close'])\nUS_EQUITY_PRICING_BCOLZ_COLUMNS = (\n 'open', 'high', 'low', 'close', 'volume', 'day', 'id'\n)\nSQLITE_ADJUSTMENT_COLUMN_DTYPES = {\n 'effective_date': integer,\n 'ratio': float,\n 'sid': integer,\n}\nSQLITE_ADJUSTMENT_TABLENAMES = frozenset(['splits', 'dividends', 'mergers'])\n\nSQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES = {\n 'sid': integer,\n 'ex_date': integer,\n 'declared_date': integer,\n 'record_date': integer,\n 'pay_date': integer,\n 'amount': float,\n}\n\nSQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES = {\n 'sid': integer,\n 'ex_date': integer,\n 'declared_date': integer,\n 'record_date': integer,\n 'pay_date': integer,\n 'payment_sid': integer,\n 'ratio': float,\n}\nUINT32_MAX = iinfo(uint32).max\n\n\nclass NoDataOnDate(Exception):\n \"\"\"\n Raised when a spot price can be found for the sid and date.\n \"\"\"\n pass\n\n\ndef check_uint32_safe(value, colname):\n if value >= UINT32_MAX:\n raise ValueError(\n \"Value %s from column '%s' is too large\" % (value, colname)\n )\n\n\n@expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})\ndef winsorise_uint32(df, invalid_data_behavior, column, *columns):\n \"\"\"Drops any record where a value would not fit into a uint32.\n\n Parameters\n ----------\n df : pd.DataFrame\n The dataframe to winsorise.\n invalid_data_behavior : {'warn', 'raise', 'ignore'}\n What to do when data is outside the bounds of a uint32.\n *columns : iterable[str]\n The names of the columns to check.\n\n Returns\n -------\n truncated : pd.DataFrame\n ``df`` with values that do not fit into a uint32 zeroed out.\n \"\"\"\n columns = list((column,) + columns)\n mask = df[columns] > UINT32_MAX\n\n if invalid_data_behavior != 'ignore':\n mask |= df[columns].isnull()\n else:\n # we are not going to generate a warning or error for this so just use\n # nan_to_num\n df[columns] = np.nan_to_num(df[columns])\n\n mv = mask.values\n if mv.any():\n if invalid_data_behavior == 'raise':\n raise ValueError(\n '%d values out of bounds for uint32: %r' % (\n mv.sum(), df[mask.any(axis=1)],\n ),\n )\n if invalid_data_behavior == 'warn':\n warnings.warn(\n 'Ignoring %d values because they are out of bounds for'\n ' uint32: %r' % (\n mv.sum(), df[mask.any(axis=1)],\n ),\n stacklevel=3, # one extra frame for `expect_element`\n )\n\n df[mask] = 0\n return df\n\n\n@expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})\ndef to_ctable(raw_data, invalid_data_behavior):\n if isinstance(raw_data, ctable):\n # we already have a ctable so do nothing\n return raw_data\n\n winsorise_uint32(raw_data, invalid_data_behavior, 'volume', *OHLC)\n processed = (raw_data[list(OHLC)] * 1000).astype('uint32')\n dates = raw_data.index.values.astype('datetime64[s]')\n check_uint32_safe(dates.max().view(np.int64), 'day')\n processed['day'] = dates.astype('uint32')\n processed['volume'] = raw_data.volume.astype('uint32')\n return ctable.fromdataframe(processed)\n\n\nclass BcolzDailyBarWriter(object):\n \"\"\"\n Class capable of writing daily OHLCV data to disk in a format that can be\n read efficiently by BcolzDailyOHLCVReader.\n\n Parameters\n ----------\n filename : str\n The location at which we should write our output.\n calendar : pandas.DatetimeIndex\n Calendar to use to compute asset calendar offsets.\n\n See Also\n --------\n zipline.data.us_equity_pricing.BcolzDailyBarReader\n \"\"\"\n _csv_dtypes = {\n 'open': float64,\n 'high': float64,\n 'low': float64,\n 'close': float64,\n 'volume': float64,\n }\n\n def __init__(self, filename, calendar):\n self._filename = filename\n self._calendar = calendar\n\n @property\n def progress_bar_message(self):\n return \"Merging daily equity files:\"\n\n def progress_bar_item_show_func(self, value):\n return value if value is None else str(value[0])\n\n def write(self,\n data,\n assets=None,\n show_progress=False,\n invalid_data_behavior='warn'):\n \"\"\"\n Parameters\n ----------\n data : iterable[tuple[int, pandas.DataFrame or bcolz.ctable]]\n The data chunks to write. Each chunk should be a tuple of sid\n and the data for that asset.\n assets : set[int], optional\n The assets that should be in ``data``. If this is provided\n we will check ``data`` against the assets and provide better\n progress information.\n show_progress : bool, optional\n Whether or not to show a progress bar while writing.\n invalid_data_behavior : {'warn', 'raise', 'ignore'}, optional\n What to do when data is encountered that is outside the range of\n a uint32.\n\n Returns\n -------\n table : bcolz.ctable\n The newly-written table.\n \"\"\"\n ctx = maybe_show_progress(\n ((sid, to_ctable(df, invalid_data_behavior)) for sid, df in data),\n show_progress=show_progress,\n item_show_func=self.progress_bar_item_show_func,\n label=self.progress_bar_message,\n length=len(assets) if assets is not None else None,\n )\n with ctx as it:\n return self._write_internal(it, assets)\n\n def write_csvs(self,\n asset_map,\n show_progress=False,\n invalid_data_behavior='warn'):\n \"\"\"Read CSVs as DataFrames from our asset map.\n\n Parameters\n ----------\n asset_map : dict[int -> str]\n A mapping from asset id to file path with the CSV data for that\n asset\n show_progress : bool\n Whether or not to show a progress bar while writing.\n invalid_data_behavior : {'warn', 'raise', 'ignore'}\n What to do when data is encountered that is outside the range of\n a uint32.\n \"\"\"\n read = partial(\n read_csv,\n parse_dates=['day'],\n index_col='day',\n dtype=self._csv_dtypes,\n )\n return self.write(\n ((asset, read(path)) for asset, path in iteritems(asset_map)),\n assets=viewkeys(asset_map),\n show_progress=show_progress,\n invalid_data_behavior=invalid_data_behavior,\n )\n\n def _write_internal(self, iterator, assets):\n \"\"\"\n Internal implementation of write.\n\n `iterator` should be an iterator yielding pairs of (asset, ctable).\n \"\"\"\n total_rows = 0\n first_row = {}\n last_row = {}\n calendar_offset = {}\n\n # Maps column name -> output carray.\n columns = {\n k: carray(array([], dtype=uint32))\n for k in US_EQUITY_PRICING_BCOLZ_COLUMNS\n }\n\n earliest_date = None\n calendar = self._calendar\n\n if assets is not None:\n @apply\n def iterator(iterator=iterator, assets=set(assets)):\n for asset_id, table in iterator:\n if asset_id not in assets:\n raise ValueError('unknown asset id %r' % asset_id)\n yield asset_id, table\n\n for asset_id, table in iterator:\n nrows = len(table)\n for column_name in columns:\n if column_name == 'id':\n # We know what the content of this column is, so don't\n # bother reading it.\n columns['id'].append(\n full((nrows,), asset_id, dtype='uint32'),\n )\n continue\n\n columns[column_name].append(table[column_name])\n\n if earliest_date is None:\n earliest_date = table[\"day\"][0]\n else:\n earliest_date = min(earliest_date, table[\"day\"][0])\n\n # Bcolz doesn't support ints as keys in `attrs`, so convert\n # assets to strings for use as attr keys.\n asset_key = str(asset_id)\n\n # Calculate the index into the array of the first and last row\n # for this asset. This allows us to efficiently load single\n # assets when querying the data back out of the table.\n first_row[asset_key] = total_rows\n last_row[asset_key] = total_rows + nrows - 1\n total_rows += nrows\n\n # Calculate the number of trading days between the first date\n # in the stored data and the first date of **this** asset. This\n # offset used for output alignment by the reader.\n asset_first_day = table['day'][0]\n calendar_offset[asset_key] = calendar.get_loc(\n Timestamp(asset_first_day, unit='s', tz='UTC'),\n )\n\n # This writes the table to disk.\n full_table = ctable(\n columns=[\n columns[colname]\n for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS\n ],\n names=US_EQUITY_PRICING_BCOLZ_COLUMNS,\n rootdir=self._filename,\n mode='w',\n )\n\n full_table.attrs['first_trading_day'] = (\n earliest_date // 1e6\n if earliest_date is not None else\n iNaT\n )\n full_table.attrs['first_row'] = first_row\n full_table.attrs['last_row'] = last_row\n full_table.attrs['calendar_offset'] = calendar_offset\n full_table.attrs['calendar'] = calendar.asi8.tolist()\n full_table.flush()\n return full_table\n\n\nclass DailyBarReader(with_metaclass(ABCMeta)):\n \"\"\"\n Reader for OHCLV pricing data at a daily frequency.\n \"\"\"\n @abstractmethod\n def load_raw_arrays(self, columns, start_date, end_date, assets):\n pass\n\n @abstractmethod\n def spot_price(self, sid, day, colname):\n pass\n\n @abstractproperty\n def last_available_dt(self):\n pass\n\n\nclass BcolzDailyBarReader(DailyBarReader):\n \"\"\"\n Reader for raw pricing data written by BcolzDailyOHLCVWriter.\n\n A Bcolz CTable is comprised of Columns and Attributes.\n\n Columns\n -------\n The table with which this loader interacts contains the following columns:\n\n ['open', 'high', 'low', 'close', 'volume', 'day', 'id'].\n\n The data in these columns is interpreted as follows:\n\n - Price columns ('open', 'high', 'low', 'close') are interpreted as 1000 *\n as-traded dollar value.\n - Volume is interpreted as as-traded volume.\n - Day is interpreted as seconds since midnight UTC, Jan 1, 1970.\n - Id is the asset id of the row.\n\n The data in each column is grouped by asset and then sorted by day within\n each asset block.\n\n The table is built to represent a long time range of data, e.g. ten years\n of equity data, so the lengths of each asset block is not equal to each\n other. The blocks are clipped to the known start and end date of each asset\n to cut down on the number of empty values that would need to be included to\n make a regular/cubic dataset.\n\n When read across the open, high, low, close, and volume with the same\n index should represent the same asset and day.\n\n Parameters\n ----------\n table : bcolz.ctable\n The ctable contaning the pricing data, with attrs corresponding to the\n Attributes list below.\n read_all_threshold : int\n The number of equities at which;\n below, the data is read by reading a slice from the carray\n per asset.\n above, the data is read by pulling all of the data for all assets\n into memory and then indexing into that array for each day and\n asset pair.\n Used to tune performance of reads when using a small or large number\n of equities.\n\n Attributes\n ----------\n The table with which this loader interacts contains the following\n attributes:\n\n first_row : dict\n Map from asset_id -> index of first row in the dataset with that id.\n last_row : dict\n Map from asset_id -> index of last row in the dataset with that id.\n calendar_offset : dict\n Map from asset_id -> calendar index of first row.\n calendar : list[int64]\n Calendar used to compute offsets, in asi8 format (ns since EPOCH).\n\n We use first_row and last_row together to quickly find ranges of rows to\n load when reading an asset's data into memory.\n\n We use calendar_offset and calendar to orient loaded blocks within a\n range of queried dates.\n\n See Also\n --------\n zipline.data.us_equity_pricing.BcolzDailyBarWriter\n \"\"\"\n def __init__(self, table, read_all_threshold=3000):\n self._maybe_table_rootdir = table\n # Cache of fully read np.array for the carrays in the daily bar table.\n # raw_array does not use the same cache, but it could.\n # Need to test keeping the entire array in memory for the course of a\n # process first.\n self._spot_cols = {}\n self.PRICE_ADJUSTMENT_FACTOR = 0.001\n self._read_all_threshold = read_all_threshold\n\n @lazyval\n def _table(self):\n maybe_table_rootdir = self._maybe_table_rootdir\n if isinstance(maybe_table_rootdir, ctable):\n return maybe_table_rootdir\n return ctable(rootdir=maybe_table_rootdir, mode='r')\n\n @lazyval\n def _calendar(self):\n return DatetimeIndex(self._table.attrs['calendar'], tz='UTC')\n\n @lazyval\n def _first_rows(self):\n return {\n int(asset_id): start_index\n for asset_id, start_index in iteritems(\n self._table.attrs['first_row'],\n )\n }\n\n @lazyval\n def _last_rows(self):\n return {\n int(asset_id): end_index\n for asset_id, end_index in iteritems(\n self._table.attrs['last_row'],\n )\n }\n\n @lazyval\n def _calendar_offsets(self):\n return {\n int(id_): offset\n for id_, offset in iteritems(\n self._table.attrs['calendar_offset'],\n )\n }\n\n @lazyval\n def first_trading_day(self):\n try:\n return Timestamp(\n self._table.attrs['first_trading_day'],\n unit='ms',\n tz='UTC'\n )\n except KeyError:\n return None\n\n @property\n def last_available_dt(self):\n return self._calendar[-1]\n\n def _compute_slices(self, start_idx, end_idx, assets):\n \"\"\"\n Compute the raw row indices to load for each asset on a query for the\n given dates after applying a shift.\n\n Parameters\n ----------\n start_idx : int\n Index of first date for which we want data.\n end_idx : int\n Index of last date for which we want data.\n assets : pandas.Int64Index\n Assets for which we want to compute row indices\n\n Returns\n -------\n A 3-tuple of (first_rows, last_rows, offsets):\n first_rows : np.array[intp]\n Array with length == len(assets) containing the index of the first\n row to load for each asset in `assets`.\n last_rows : np.array[intp]\n Array with length == len(assets) containing the index of the last\n row to load for each asset in `assets`.\n offset : np.array[intp]\n Array with length == (len(asset) containing the index in a buffer\n of length `dates` corresponding to the first row of each asset.\n\n The value of offset[i] will be 0 if asset[i] existed at the start\n of a query. Otherwise, offset[i] will be equal to the number of\n entries in `dates` for which the asset did not yet exist.\n \"\"\"\n # The core implementation of the logic here is implemented in Cython\n # for efficiency.\n return _compute_row_slices(\n self._first_rows,\n self._last_rows,\n self._calendar_offsets,\n start_idx,\n end_idx,\n assets,\n )\n\n def load_raw_arrays(self, columns, start_date, end_date, assets):\n # Assumes that the given dates are actually in calendar.\n start_idx = self._calendar.get_loc(start_date)\n end_idx = self._calendar.get_loc(end_date)\n first_rows, last_rows, offsets = self._compute_slices(\n start_idx,\n end_idx,\n assets,\n )\n read_all = len(assets) > self._read_all_threshold\n return _read_bcolz_data(\n self._table,\n (end_idx - start_idx + 1, len(assets)),\n list(columns),\n first_rows,\n last_rows,\n offsets,\n read_all,\n )\n\n def _spot_col(self, colname):\n \"\"\"\n Get the colname from daily_bar_table and read all of it into memory,\n caching the result.\n\n Parameters\n ----------\n colname : string\n A name of a OHLCV carray in the daily_bar_table\n\n Returns\n -------\n array (uint32)\n Full read array of the carray in the daily_bar_table with the\n given colname.\n \"\"\"\n try:\n col = self._spot_cols[colname]\n except KeyError:\n col = self._spot_cols[colname] = self._table[colname]\n return col\n\n def get_last_traded_dt(self, asset, day):\n volumes = self._spot_col('volume')\n\n if day >= asset.end_date:\n # go back to one day before the asset ended\n search_day = self._calendar[\n self._calendar.searchsorted(asset.end_date) - 1\n ]\n else:\n search_day = day\n\n while True:\n try:\n ix = self.sid_day_index(asset, search_day)\n except NoDataOnDate:\n return None\n if volumes[ix] != 0:\n return search_day\n prev_day_ix = self._calendar.get_loc(search_day) - 1\n if prev_day_ix > -1:\n search_day = self._calendar[prev_day_ix]\n else:\n return None\n\n def sid_day_index(self, sid, day):\n \"\"\"\n Parameters\n ----------\n sid : int\n The asset identifier.\n day : datetime64-like\n Midnight of the day for which data is requested.\n\n Returns\n -------\n int\n Index into the data tape for the given sid and day.\n Raises a NoDataOnDate exception if the given day and sid is before\n or after the date range of the equity.\n \"\"\"\n try:\n day_loc = self._calendar.get_loc(day)\n except:\n raise NoDataOnDate(\"day={0} is outside of calendar={1}\".format(\n day, self._calendar))\n offset = day_loc - self._calendar_offsets[sid]\n if offset < 0:\n raise NoDataOnDate(\n \"No data on or before day={0} for sid={1}\".format(\n day, sid))\n ix = self._first_rows[sid] + offset\n if ix > self._last_rows[sid]:\n raise NoDataOnDate(\n \"No data on or after day={0} for sid={1}\".format(\n day, sid))\n return ix\n\n def spot_price(self, sid, day, colname):\n \"\"\"\n Parameters\n ----------\n sid : int\n The asset identifier.\n day : datetime64-like\n Midnight of the day for which data is requested.\n colname : string\n The price field. e.g. ('open', 'high', 'low', 'close', 'volume')\n\n Returns\n -------\n float\n The spot price for colname of the given sid on the given day.\n Raises a NoDataOnDate exception if the given day and sid is before\n or after the date range of the equity.\n Returns -1 if the day is within the date range, but the price is\n 0.\n \"\"\"\n ix = self.sid_day_index(sid, day)\n price = self._spot_col(colname)[ix]\n if price == 0:\n return -1\n if colname != 'volume':\n return price * 0.001\n else:\n return price\n\n\nclass PanelDailyBarReader(DailyBarReader):\n \"\"\"\n Reader for data passed as Panel.\n\n DataPanel Structure\n -------\n items : Int64Index\n Asset identifiers. Must be unique.\n major_axis : DatetimeIndex\n Dates for data provided provided by the Panel. Must be unique.\n minor_axis : ['open', 'high', 'low', 'close', 'volume']\n Price attributes. Must be unique.\n\n Attributes\n ----------\n The table with which this loader interacts contains the following\n attributes:\n\n panel : pd.Panel\n The panel from which to read OHLCV data.\n first_trading_day : pd.Timestamp\n The first trading day in the dataset.\n \"\"\"\n @preprocess(panel=call(verify_indices_all_unique))\n def __init__(self, calendar, panel):\n\n panel = panel.copy()\n if 'volume' not in panel.minor_axis:\n # Fake volume if it does not exist.\n panel.loc[:, :, 'volume'] = int(1e9)\n\n self.first_trading_day = panel.major_axis[0]\n self._calendar = calendar\n\n self.panel = panel\n\n @property\n def last_available_dt(self):\n return self._calendar[-1]\n\n def load_raw_arrays(self, columns, start_date, end_date, assets):\n columns = list(columns)\n cal = self._calendar\n index = cal[cal.slice_indexer(start_date, end_date)]\n shape = (len(index), len(assets))\n results = []\n for col in columns:\n outbuf = zeros(shape=shape)\n for i, asset in enumerate(assets):\n data = self.panel.loc[asset, start_date:end_date, col]\n data = data.reindex_axis(index).values\n outbuf[:, i] = data\n results.append(outbuf)\n return results\n\n def spot_price(self, sid, day, colname):\n \"\"\"\n Parameters\n ----------\n sid : int\n The asset identifier.\n day : datetime64-like\n Midnight of the day for which data is requested.\n colname : string\n The price field. e.g. ('open', 'high', 'low', 'close', 'volume')\n\n Returns\n -------\n float\n The spot price for colname of the given sid on the given day.\n Raises a NoDataOnDate exception if the given day and sid is before\n or after the date range of the equity.\n Returns -1 if the day is within the date range, but the price is\n 0.\n \"\"\"\n return self.panel.loc[sid, day, colname]\n\n def get_last_traded_dt(self, sid, dt):\n \"\"\"\n Parameters\n ----------\n sid : int\n The asset identifier.\n dt : datetime64-like\n Midnight of the day for which data is requested.\n\n Returns\n -------\n pd.Timestamp : The last know dt for the asset and dt;\n NaT if no trade is found before the given dt.\n \"\"\"\n while dt in self.panel.major_axis:\n freq = self.panel.major_axis.freq\n if not isnull(self.panel.loc[sid, dt, 'close']):\n return dt\n dt -= freq\n else:\n return NaT\n\n\nclass SQLiteAdjustmentWriter(object):\n \"\"\"\n Writer for data to be read by SQLiteAdjustmentReader\n\n Parameters\n ----------\n conn_or_path : str or sqlite3.Connection\n A handle to the target sqlite database.\n daily_bar_reader : BcolzDailyBarReader\n Daily bar reader to use for dividend writes.\n overwrite : bool, optional, default=False\n If True and conn_or_path is a string, remove any existing files at the\n given path before connecting.\n\n See Also\n --------\n zipline.data.us_equity_pricing.SQLiteAdjustmentReader\n \"\"\"\n\n def __init__(self,\n conn_or_path,\n daily_bar_reader,\n calendar,\n overwrite=False):\n if isinstance(conn_or_path, sqlite3.Connection):\n self.conn = conn_or_path\n elif isinstance(conn_or_path, str):\n if overwrite and exists(conn_or_path):\n try:\n remove(conn_or_path)\n except OSError as e:\n if e.errno != ENOENT:\n raise\n self.conn = sqlite3.connect(conn_or_path)\n self.uri = conn_or_path\n else:\n raise TypeError(\"Unknown connection type %s\" % type(conn_or_path))\n\n self._daily_bar_reader = daily_bar_reader\n self._calendar = calendar\n\n def _write(self, tablename, expected_dtypes, frame):\n if frame is None or frame.empty:\n # keeping the dtypes correct for empty frames is not easy\n frame = DataFrame(\n np.array([], dtype=list(expected_dtypes.items())),\n )\n else:\n if frozenset(frame.columns) != viewkeys(expected_dtypes):\n raise ValueError(\n \"Unexpected frame columns:\\n\"\n \"Expected Columns: %s\\n\"\n \"Received Columns: %s\" % (\n set(expected_dtypes),\n frame.columns.tolist(),\n )\n )\n\n actual_dtypes = frame.dtypes\n for colname, expected in iteritems(expected_dtypes):\n actual = actual_dtypes[colname]\n if not issubdtype(actual, expected):\n raise TypeError(\n \"Expected data of type {expected} for column\"\n \" '{colname}', but got '{actual}'.\".format(\n expected=expected,\n colname=colname,\n actual=actual,\n ),\n )\n\n frame.to_sql(\n tablename,\n self.conn,\n if_exists='append',\n chunksize=50000,\n )\n\n def write_frame(self, tablename, frame):\n if tablename not in SQLITE_ADJUSTMENT_TABLENAMES:\n raise ValueError(\n \"Adjustment table %s not in %s\" % (\n tablename,\n SQLITE_ADJUSTMENT_TABLENAMES,\n )\n )\n if not (frame is None or frame.empty):\n frame = frame.copy()\n frame['effective_date'] = frame['effective_date'].values.astype(\n 'datetime64[s]',\n ).astype('int64')\n return self._write(\n tablename,\n SQLITE_ADJUSTMENT_COLUMN_DTYPES,\n frame,\n )\n\n def write_dividend_payouts(self, frame):\n \"\"\"\n Write dividend payout data to SQLite table `dividend_payouts`.\n \"\"\"\n return self._write(\n 'dividend_payouts',\n SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES,\n frame,\n )\n\n def write_stock_dividend_payouts(self, frame):\n return self._write(\n 'stock_dividend_payouts',\n SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES,\n frame,\n )\n\n def calc_dividend_ratios(self, dividends):\n \"\"\"\n Calculate the ratios to apply to equities when looking back at pricing\n history so that the price is smoothed over the ex_date, when the market\n adjusts to the change in equity value due to upcoming dividend.\n\n Returns\n -------\n DataFrame\n A frame in the same format as splits and mergers, with keys\n - sid, the id of the equity\n - effective_date, the date in seconds on which to apply the ratio.\n - ratio, the ratio to apply to backwards looking pricing data.\n \"\"\"\n if dividends is None:\n return DataFrame(np.array(\n [],\n dtype=[\n ('sid', uint32),\n ('effective_date', uint32),\n ('ratio', float64),\n ],\n ))\n ex_dates = dividends.ex_date.values\n\n sids = dividends.sid.values\n amounts = dividends.amount.values\n\n ratios = full(len(amounts), nan)\n\n daily_bar_reader = self._daily_bar_reader\n\n effective_dates = full(len(amounts), -1, dtype=int64)\n calendar = self._calendar\n for i, amount in enumerate(amounts):\n sid = sids[i]\n ex_date = ex_dates[i]\n day_loc = calendar.get_loc(ex_date, method='bfill')\n prev_close_date = calendar[day_loc - 1]\n try:\n prev_close = daily_bar_reader.spot_price(\n sid, prev_close_date, 'close')\n if prev_close != 0.0:\n ratio = 1.0 - amount / prev_close\n ratios[i] = ratio\n # only assign effective_date when data is found\n effective_dates[i] = ex_date\n except NoDataOnDate:\n logger.warn(\"Couldn't compute ratio for dividend %s\" % {\n 'sid': sid,\n 'ex_date': ex_date,\n 'amount': amount,\n })\n continue\n\n # Create a mask to filter out indices in the effective_date, sid, and\n # ratio vectors for which a ratio was not calculable.\n effective_mask = effective_dates != -1\n effective_dates = effective_dates[effective_mask]\n effective_dates = effective_dates.astype('datetime64[ns]').\\\n astype('datetime64[s]').astype(uint32)\n sids = sids[effective_mask]\n ratios = ratios[effective_mask]\n\n return DataFrame({\n 'sid': sids,\n 'effective_date': effective_dates,\n 'ratio': ratios,\n })\n\n def _write_dividends(self, dividends):\n if dividends is None:\n dividend_payouts = None\n else:\n dividend_payouts = dividends.copy()\n dividend_payouts['ex_date'] = dividend_payouts['ex_date'].values.\\\n astype('datetime64[s]').astype(integer)\n dividend_payouts['record_date'] = \\\n dividend_payouts['record_date'].values.astype('datetime64[s]').\\\n astype(integer)\n dividend_payouts['declared_date'] = \\\n dividend_payouts['declared_date'].values.astype('datetime64[s]').\\\n astype(integer)\n dividend_payouts['pay_date'] = \\\n dividend_payouts['pay_date'].values.astype('datetime64[s]').\\\n astype(integer)\n\n self.write_dividend_payouts(dividend_payouts)\n\n def _write_stock_dividends(self, stock_dividends):\n if stock_dividends is None:\n stock_dividend_payouts = None\n else:\n stock_dividend_payouts = stock_dividends.copy()\n stock_dividend_payouts['ex_date'] = \\\n stock_dividend_payouts['ex_date'].values.\\\n astype('datetime64[s]').astype(integer)\n stock_dividend_payouts['record_date'] = \\\n stock_dividend_payouts['record_date'].values.\\\n astype('datetime64[s]').astype(integer)\n stock_dividend_payouts['declared_date'] = \\\n stock_dividend_payouts['declared_date'].\\\n values.astype('datetime64[s]').astype(integer)\n stock_dividend_payouts['pay_date'] = \\\n stock_dividend_payouts['pay_date'].\\\n values.astype('datetime64[s]').astype(integer)\n self.write_stock_dividend_payouts(stock_dividend_payouts)\n\n def write_dividend_data(self, dividends, stock_dividends=None):\n \"\"\"\n Write both dividend payouts and the derived price adjustment ratios.\n \"\"\"\n\n # First write the dividend payouts.\n self._write_dividends(dividends)\n self._write_stock_dividends(stock_dividends)\n\n # Second from the dividend payouts, calculate ratios.\n dividend_ratios = self.calc_dividend_ratios(dividends)\n self.write_frame('dividends', dividend_ratios)\n\n def write(self,\n splits=None,\n mergers=None,\n dividends=None,\n stock_dividends=None):\n \"\"\"\n Writes data to a SQLite file to be read by SQLiteAdjustmentReader.\n\n Parameters\n ----------\n splits : pandas.DataFrame, optional\n Dataframe containing split data. The format of this dataframe is:\n effective_date : int\n The date, represented as seconds since Unix epoch, on which\n the adjustment should be applied.\n ratio : float\n A value to apply to all data earlier than the effective date.\n For open, high, low, and close those values are multiplied by\n the ratio. Volume is divided by this value.\n sid : int\n The asset id associated with this adjustment.\n mergers : pandas.DataFrame, optional\n DataFrame containing merger data. The format of this dataframe is:\n effective_date : int\n The date, represented as seconds since Unix epoch, on which\n the adjustment should be applied.\n ratio : float\n A value to apply to all data earlier than the effective date.\n For open, high, low, and close those values are multiplied by\n the ratio. Volume is unaffected.\n sid : int\n The asset id associated with this adjustment.\n dividends : pandas.DataFrame, optional\n DataFrame containing dividend data. The format of the dataframe is:\n sid : int\n The asset id associated with this adjustment.\n ex_date : datetime64\n The date on which an equity must be held to be eligible to\n receive payment.\n declared_date : datetime64\n The date on which the dividend is announced to the public.\n pay_date : datetime64\n The date on which the dividend is distributed.\n record_date : datetime64\n The date on which the stock ownership is checked to determine\n distribution of dividends.\n amount : float\n The cash amount paid for each share.\n\n Dividend ratios are calculated as:\n ``1.0 - (dividend_value / \"close on day prior to ex_date\")``\n stock_dividends : pandas.DataFrame, optional\n DataFrame containing stock dividend data. The format of the\n dataframe is:\n sid : int\n The asset id associated with this adjustment.\n ex_date : datetime64\n The date on which an equity must be held to be eligible to\n receive payment.\n declared_date : datetime64\n The date on which the dividend is announced to the public.\n pay_date : datetime64\n The date on which the dividend is distributed.\n record_date : datetime64\n The date on which the stock ownership is checked to determine\n distribution of dividends.\n payment_sid : int\n The asset id of the shares that should be paid instead of\n cash.\n ratio : float\n The ratio of currently held shares in the held sid that\n should be paid with new shares of the payment_sid.\n\n See Also\n --------\n zipline.data.us_equity_pricing.SQLiteAdjustmentReader\n \"\"\"\n self.write_frame('splits', splits)\n self.write_frame('mergers', mergers)\n self.write_dividend_data(dividends, stock_dividends)\n self.conn.execute(\n \"CREATE INDEX splits_sids \"\n \"ON splits(sid)\"\n )\n self.conn.execute(\n \"CREATE INDEX splits_effective_date \"\n \"ON splits(effective_date)\"\n )\n self.conn.execute(\n \"CREATE INDEX mergers_sids \"\n \"ON mergers(sid)\"\n )\n self.conn.execute(\n \"CREATE INDEX mergers_effective_date \"\n \"ON mergers(effective_date)\"\n )\n self.conn.execute(\n \"CREATE INDEX dividends_sid \"\n \"ON dividends(sid)\"\n )\n self.conn.execute(\n \"CREATE INDEX dividends_effective_date \"\n \"ON dividends(effective_date)\"\n )\n self.conn.execute(\n \"CREATE INDEX dividend_payouts_sid \"\n \"ON dividend_payouts(sid)\"\n )\n self.conn.execute(\n \"CREATE INDEX dividends_payouts_ex_date \"\n \"ON dividend_payouts(ex_date)\"\n )\n self.conn.execute(\n \"CREATE INDEX stock_dividend_payouts_sid \"\n \"ON stock_dividend_payouts(sid)\"\n )\n self.conn.execute(\n \"CREATE INDEX stock_dividends_payouts_ex_date \"\n \"ON stock_dividend_payouts(ex_date)\"\n )\n\n def close(self):\n self.conn.close()\n\n\nUNPAID_QUERY_TEMPLATE = \"\"\"\nSELECT sid, amount, pay_date from dividend_payouts\nWHERE ex_date=? AND sid IN ({0})\n\"\"\"\n\nDividend = namedtuple('Dividend', ['asset', 'amount', 'pay_date'])\n\nUNPAID_STOCK_DIVIDEND_QUERY_TEMPLATE = \"\"\"\nSELECT sid, payment_sid, ratio, pay_date from stock_dividend_payouts\nWHERE ex_date=? AND sid IN ({0})\n\"\"\"\n\nStockDividend = namedtuple(\n 'StockDividend',\n ['asset', 'payment_asset', 'ratio', 'pay_date'])\n\n\nclass SQLiteAdjustmentReader(object):\n \"\"\"\n Loads adjustments based on corporate actions from a SQLite database.\n\n Expects data written in the format output by `SQLiteAdjustmentWriter`.\n\n Parameters\n ----------\n conn : str or sqlite3.Connection\n Connection from which to load data.\n\n See Also\n --------\n zipline.data.us_equity_pricing.SQLiteAdjustmentWriter\n \"\"\"\n\n @preprocess(conn=coerce_string(sqlite3.connect))\n def __init__(self, conn):\n self.conn = conn\n\n def load_adjustments(self, columns, dates, assets):\n return load_adjustments_from_sqlite(\n self.conn,\n list(columns),\n dates,\n assets,\n )\n\n def get_adjustments_for_sid(self, table_name, sid):\n t = (sid,)\n c = self.conn.cursor()\n adjustments_for_sid = c.execute(\n \"SELECT effective_date, ratio FROM %s WHERE sid = ?\" %\n table_name, t).fetchall()\n c.close()\n\n return [[Timestamp(adjustment[0], unit='s', tz='UTC'), adjustment[1]]\n for adjustment in\n adjustments_for_sid]\n\n def get_dividends_with_ex_date(self, assets, date, asset_finder):\n seconds = date.value / int(1e9)\n c = self.conn.cursor()\n\n divs = []\n for chunk in group_into_chunks(assets):\n query = UNPAID_QUERY_TEMPLATE.format(\n \",\".join(['?' for _ in chunk]))\n t = (seconds,) + tuple(map(lambda x: int(x), chunk))\n\n c.execute(query, t)\n\n rows = c.fetchall()\n for row in rows:\n div = Dividend(\n asset_finder.retrieve_asset(row[0]),\n row[1], Timestamp(row[2], unit='s', tz='UTC'))\n divs.append(div)\n c.close()\n\n return divs\n\n def get_stock_dividends_with_ex_date(self, assets, date, asset_finder):\n seconds = date.value / int(1e9)\n c = self.conn.cursor()\n\n stock_divs = []\n for chunk in group_into_chunks(assets):\n query = UNPAID_STOCK_DIVIDEND_QUERY_TEMPLATE.format(\n \",\".join(['?' for _ in chunk]))\n t = (seconds,) + tuple(map(lambda x: int(x), chunk))\n\n c.execute(query, t)\n\n rows = c.fetchall()\n\n for row in rows:\n stock_div = StockDividend(\n asset_finder.retrieve_asset(row[0]), # asset\n asset_finder.retrieve_asset(row[1]), # payment_asset\n row[2],\n Timestamp(row[3], unit='s', tz='UTC'))\n stock_divs.append(stock_div)\n c.close()\n\n return stock_divs\n"
] |
[
[
"pandas.isnull",
"numpy.issubdtype",
"numpy.nan_to_num",
"pandas.DatetimeIndex",
"pandas.DataFrame",
"numpy.full",
"numpy.iinfo",
"numpy.array",
"pandas.Timestamp",
"numpy.zeros"
]
] |
DemisseG/Approximately_Covariant_CNN-
|
[
"19c4b2363083ba25a2af17a2672da330ae9ed7fc"
] |
[
"ac/utils.py"
] |
[
"from __future__ import absolute_import, division\nfrom typing import Tuple, List\n\nimport math\nimport numpy\nimport torch\n\n\ndef scheduled_learning(init: float, current_epoch: int, total_epoch: int) -> int:\n scale = lambda x, y :x * math.pow(0.2, y)\n if(current_epoch > int(total_epoch*0.8)):\n return scale(init, 3)\n elif(current_epoch > int(total_epoch*0.6)):\n return scale(init, 2)\n elif(current_epoch > int(total_epoch*0.3)):\n return scale(init, 1)\n else:\n return init\n\n\ndef squeeze_entropy(epoch: int) -> torch.Tensor:\n if epoch < 25:\n return torch.tensor(0.1, dtype=torch.float32)\n elif epoch < 50 and epoch >= 25:\n return torch.tensor(1.0, dtype=torch.float32) \n elif epoch < 75 and epoch >= 50:\n return torch.tensor(10.0, dtype=torch.float32)\n elif epoch < 100 and epoch >= 75:\n return torch.tensor(100.0, dtype=torch.float32)\n\n\ndef rotation(period: float) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:\n # returns counterclockwise rotation with mod(\\theta, period * math.pi) = 0\n trans, i_trans = [], []\n for j in numpy.arange(0.0, 2.0, period): \n angle=torch.tensor(math.pi) * j\n temp = torch.tensor([[torch.cos(angle), -1 * torch.sin(angle)],\n [torch.sin(angle), torch.cos(angle)]])\n i_trans.append(torch.cat([temp.t(), torch.zeros(2,1)], 1))\n trans.append(torch.cat([temp, torch.zeros(2,1)], 1))\n return trans, i_trans\n\n\ndef scaling(min_s: float=0.75, max_s: float=2.0, nums: int=4) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:\n # returns unfirom scaling\n res, Ires = [], []\n steps = float(max_s - min_s) / float(nums)\n scales = list(numpy.arange(min_s, max_s, steps))\n for i in scales:\n trans = torch.tensor([[1.0 / float(i), 0.0, 0.0],[0.0, 1.0 / float(i), 0.0]], dtype=torch.float32)\n itrans = torch.tensor([[float(i), 0.0, 0.0],[0.0,float(i), 0.0]], dtype=torch.float32)\n res += [trans]\n Ires += [itrans]\n\n return res, Ires\n\n\ndef reflection() -> Tuple[List[torch.Tensor], List[torch.Tensor]]:\n ref = torch.tensor([[-1.0, 0.0, 0.0],[0.0, 1.0, 0.0]], dtype=torch.float32)\n return [ref], [ref]\n\n\ndef scale_ref() -> Tuple[List[torch.Tensor], List[torch.Tensor]]:\n # Non-separable transformation composition of scaling and reflection.\n scale, i_scale = scaling()\n ref, _ = reflection()\n scale_ref, i_scale_ref = scale , i_scale\n \n for i in range(len(scale)):\n ## non-commutative composition\n scale_ref[i][:,:2] = torch.mm(ref[0][:,:2], scale[i][:,:2])\n i_scale_ref[i][:,:2] = torch.mm(i_scale[i][:,:2], ref[0][:,:2])\n\n return scale_ref, i_scale_ref\n\n\ndef domain_trans(tra_type: str) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:\n import config \n \n def get_trans(key_id):\n if key_id == 'rot':\n t, it = rotation(0.5)\n elif key_id == 'rot_ext':\n t, it = rotation(0.25)\n elif key_id == 'ref':\n t, it = reflection()\n elif key_id == 'scale':\n t, it = scaling()\n elif key_id == 'scale_ref':\n t, it = scale_ref()\n return t, it\n\n assert tra_type in config.AUGMENTED_TRANS_SET, \"Error: unkown transformation!\" \n return get_trans(tra_type)\n"
] |
[
[
"torch.mm",
"torch.sin",
"torch.zeros",
"numpy.arange",
"torch.tensor",
"torch.cos"
]
] |
karnation22/tmpstpvol
|
[
"1a25c4847af496c2fdd7092a56c2e0dffa6c75a9"
] |
[
"triples_from_text.py"
] |
[
"## NOT KARN'S WORK!\r\n# -*- coding: utf-8 -*-\r\nimport os\r\nimport pandas as pd\r\nimport argparse\r\nimport re\r\nimport spacy\r\nfrom spacy.attrs import intify_attrs\r\nimport en_core_web_sm\r\nnlp = spacy.load(\"en_core_web_sm\")\r\n\r\nimport neuralcoref\r\n\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\n\r\n#nltk.download('stopwords')\r\nfrom nltk.corpus import stopwords\r\nall_stop_words = ['many', 'us', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday',\r\n 'today', 'january', 'february', 'march', 'april', 'may', 'june', 'july', 'august',\r\n 'september', 'october', 'november', 'december', 'today', 'old', 'new']\r\nall_stop_words = sorted(list(set(all_stop_words + list(stopwords.words('english')))))\r\n\r\nabspath = os.path.abspath('') ## String which contains absolute path to the script file\r\n#print(abspath)\r\nos.chdir(abspath)\r\n\r\n### ==================================================================================================\r\n# Tagger\r\n\r\ndef get_tags_spacy(nlp, text):\r\n doc = nlp(text) \r\n entities_spacy = [] # Entities that Spacy NER found\r\n for ent in doc.ents:\r\n entities_spacy.append([ent.text, ent.start_char, ent.end_char, ent.label_])\r\n return entities_spacy\r\n\r\ndef tag_all(nlp, text, entities_spacy):\r\n ## print(\"here3_1\")\r\n if ('neuralcoref' in nlp.pipe_names):\r\n nlp.pipeline.remove('neuralcoref') \r\n ## print(\"here3_2\") \r\n neuralcoref.add_to_pipe(nlp) # Add neural coref to SpaCy's pipe \r\n ## print(\"here3_3\") \r\n ## print(\"text3_3_2\", text)\r\n doc = nlp(text)\r\n ## print(\"here3_4\")\r\n return doc\r\n\r\ndef filter_spans(spans):\r\n # Filter a sequence of spans so they don't contain overlaps\r\n get_sort_key = lambda span: (span.end - span.start, span.start)\r\n sorted_spans = sorted(spans, key=get_sort_key, reverse=True)\r\n result = []\r\n seen_tokens = set()\r\n for span in sorted_spans:\r\n if span.start not in seen_tokens and span.end - 1 not in seen_tokens:\r\n result.append(span)\r\n seen_tokens.update(range(span.start, span.end))\r\n return result\r\n\r\ndef tag_chunks(doc):\r\n spans = list(doc.ents) + list(doc.noun_chunks)\r\n spans = filter_spans(spans)\r\n with doc.retokenize() as retokenizer:\r\n string_store = doc.vocab.strings\r\n for span in spans:\r\n start = span.start\r\n end = span.end\r\n retokenizer.merge(doc[start: end], attrs=intify_attrs({'ent_type': 'ENTITY'}, string_store))\r\n\r\ndef tag_chunks_spans(doc, spans, ent_type):\r\n spans = filter_spans(spans)\r\n with doc.retokenize() as retokenizer:\r\n string_store = doc.vocab.strings\r\n for span in spans:\r\n start = span.start\r\n end = span.end\r\n retokenizer.merge(doc[start: end], attrs=intify_attrs({'ent_type': ent_type}, string_store))\r\n\r\ndef clean(text):\r\n ## print(\"text: \",text)\r\n text = text.strip('[(),- :\\'\\\"\\n]\\s*')\r\n text = text.replace('—', ' - ')\r\n ## print(\"here1\")\r\n text = re.sub('([A-Za-z0-9\\)]{2,}\\.)([A-Z]+[a-z]*)', r\"\\g<1> \\g<2>\", text, flags=re.UNICODE)\r\n ## print(\"here2\")\r\n text = re.sub('([A-Za-z0-9]{2,}\\.)(\\\"\\w+)', r\"\\g<1> \\g<2>\", text, flags=re.UNICODE)\r\n ## print(\"here3\")\r\n text = re.sub('([A-Za-z0-9]{2,}\\.\\/)(\\w+)', r\"\\g<1> \\g<2>\", text, flags=re.UNICODE)\r\n ## print(\"here4\")\r\n text = re.sub('([[A-Z]{1}[[.]{1}[[A-Z]{1}[[.]{1}) ([[A-Z]{1}[a-z]{1,2} )', r\"\\g<1> . \\g<2>\", text, flags=re.UNICODE)\r\n ## print(\"here5\")\r\n text = re.sub('([A-Za-z]{3,}\\.)([A-Z]+[a-z]+)', r\"\\g<1> \\g<2>\", text, flags=re.UNICODE)\r\n ## print(\"here6\")\r\n text = re.sub('([[A-Z]{1}[[.]{1}[[A-Z]{1}[[.]{1}) ([[A-Z]{1}[a-z]{1,2} )', r\"\\g<1> . \\g<2>\", text, flags=re.UNICODE)\r\n ## print(\"here7\")\r\n text = re.sub('([A-Za-z0-9]{2,}\\.)([A-Za-z]+)', r\"\\g<1> \\g<2>\", text, flags=re.UNICODE)\r\n ## print(\"here8\")\r\n \r\n text = re.sub('’', \"'\", text, flags=re.UNICODE) # curly apostrophe\r\n ## print(\"here9\")\r\n text = re.sub('‘', \"'\", text, flags=re.UNICODE) # curly apostrophe\r\n ## print(\"here10\")\r\n text = re.sub('“', ' \"', text, flags=re.UNICODE)\r\n ## print(\"here11\")\r\n text = re.sub('”', ' \"', text, flags=re.UNICODE)\r\n ## print(\"here12\")\r\n text = re.sub(\"\\|\", \", \", text, flags=re.UNICODE)\r\n ## print(\"here13\")\r\n text = text.replace('\\t', ' ')\r\n \r\n text = re.sub('…', '.', text, flags=re.UNICODE) # elipsis\r\n ## print(\"here14\")\r\n text = re.sub('…', '.', text, flags=re.UNICODE) \r\n ## print(\"here15\") \r\n text = re.sub('–', '-', text) # long hyphen\r\n ## print(\"here16\")\r\n text = re.sub('\\s+', ' ', text, flags=re.UNICODE).strip()\r\n ## print(\"here17\")\r\n text = re.sub(' – ', ' . ', text, flags=re.UNICODE).strip()\r\n ## print(\"here18\")\r\n ## print(\"text: \",text)\r\n return text\r\n\r\ndef tagger(text): \r\n df_out = pd.DataFrame(columns=['Document#', 'Sentence#', 'Word#', 'Word', 'EntityType', 'EntityIOB', 'Lemma', 'POS', 'POSTag', 'Start', 'End', 'Dependency'])\r\n corefs = []\r\n text = clean(text)\r\n ## print(\"here2_1\")\r\n nlp = spacy.load(\"en_core_web_sm\")\r\n ## print(\"here2_2\")\r\n entities_spacy = get_tags_spacy(nlp, text)\r\n ## print(\"here2_3\")\r\n ## print(\"SPACY entities:\\n\", ([ent for ent in entities_spacy]), '\\n\\n')\r\n document = tag_all(nlp, text, entities_spacy)\r\n ## print(\"here2_4\")\r\n print(document)\r\n #for token in document:\r\n # print([token.i, token.text, token.ent_type_, token.ent_iob_, token.lemma_, token.pos_, token.tag_, token.idx, token.idx+len(token)-1, token.dep_])\r\n \r\n ### Coreferences\r\n if document._.has_coref:\r\n for cluster in document._.coref_clusters:\r\n main = cluster.main\r\n for m in cluster.mentions: \r\n if (str(m).strip() == str(main).strip()):\r\n continue\r\n corefs.append([str(m), str(main)])\r\n tag_chunks(document) \r\n \r\n # chunk - somethin OF something\r\n spans_change = []\r\n for i in range(2, len(document)):\r\n w_left = document[i-2]\r\n w_middle = document[i-1]\r\n w_right = document[i]\r\n if w_left.dep_ == 'attr':\r\n continue\r\n if w_left.ent_type_ == 'ENTITY' and w_right.ent_type_ == 'ENTITY' and (w_middle.text == 'of'): # or w_middle.text == 'for'): # or w_middle.text == 'with'\r\n spans_change.append(document[w_left.i : w_right.i + 1])\r\n tag_chunks_spans(document, spans_change, 'ENTITY')\r\n \r\n # chunk verbs with multiple words: 'were exhibited'\r\n spans_change_verbs = []\r\n for i in range(1, len(document)):\r\n w_left = document[i-1]\r\n w_right = document[i]\r\n if w_left.pos_ == 'VERB' and (w_right.pos_ == 'VERB'):\r\n spans_change_verbs.append(document[w_left.i : w_right.i + 1])\r\n tag_chunks_spans(document, spans_change_verbs, 'VERB')\r\n\r\n # chunk: verb + adp; verb + part \r\n spans_change_verbs = []\r\n for i in range(1, len(document)):\r\n w_left = document[i-1]\r\n w_right = document[i]\r\n if w_left.pos_ == 'VERB' and (w_right.pos_ == 'ADP' or w_right.pos_ == 'PART'):\r\n spans_change_verbs.append(document[w_left.i : w_right.i + 1])\r\n tag_chunks_spans(document, spans_change_verbs, 'VERB')\r\n\r\n # chunk: adp + verb; part + verb\r\n spans_change_verbs = []\r\n for i in range(1, len(document)):\r\n w_left = document[i-1]\r\n w_right = document[i]\r\n if w_right.pos_ == 'VERB' and (w_left.pos_ == 'ADP' or w_left.pos_ == 'PART'):\r\n spans_change_verbs.append(document[w_left.i : w_right.i + 1])\r\n tag_chunks_spans(document, spans_change_verbs, 'VERB')\r\n \r\n # chunk verbs with multiple words: 'were exhibited'\r\n spans_change_verbs = []\r\n for i in range(1, len(document)):\r\n w_left = document[i-1]\r\n w_right = document[i]\r\n if w_left.pos_ == 'VERB' and (w_right.pos_ == 'VERB'):\r\n spans_change_verbs.append(document[w_left.i : w_right.i + 1])\r\n tag_chunks_spans(document, spans_change_verbs, 'VERB')\r\n\r\n # chunk all between LRB- -RRB- (something between brackets)\r\n start = 0\r\n end = 0\r\n spans_between_brackets = []\r\n for i in range(0, len(document)):\r\n if ('-LRB-' == document[i].tag_ or r\"(\" in document[i].text):\r\n start = document[i].i\r\n continue\r\n if ('-RRB-' == document[i].tag_ or r')' in document[i].text):\r\n end = document[i].i + 1\r\n if (end > start and not start == 0):\r\n span = document[start:end]\r\n try:\r\n assert (u\"(\" in span.text and u\")\" in span.text)\r\n except:\r\n pass\r\n #print(span)\r\n spans_between_brackets.append(span)\r\n start = 0\r\n end = 0\r\n tag_chunks_spans(document, spans_between_brackets, 'ENTITY')\r\n \r\n # chunk entities\r\n spans_change_verbs = []\r\n for i in range(1, len(document)):\r\n w_left = document[i-1]\r\n w_right = document[i]\r\n if w_left.ent_type_ == 'ENTITY' and w_right.ent_type_ == 'ENTITY':\r\n spans_change_verbs.append(document[w_left.i : w_right.i + 1])\r\n tag_chunks_spans(document, spans_change_verbs, 'ENTITY')\r\n \r\n doc_id = 1\r\n count_sentences = 0\r\n prev_dep = 'nsubj'\r\n for token in document:\r\n if (token.dep_ == 'ROOT'):\r\n if token.pos_ == 'VERB':\r\n df_out.loc[len(df_out)] = [doc_id, count_sentences, token.i, token.text, token.ent_type_, token.ent_iob_, token.lemma_, token.pos_, token.tag_, token.idx, token.idx+len(token)-1, token.dep_]\r\n else:\r\n df_out.loc[len(df_out)] = [doc_id, count_sentences, token.i, token.text, token.ent_type_, token.ent_iob_, token.lemma_, token.pos_, token.tag_, token.idx, token.idx+len(token)-1, prev_dep]\r\n else:\r\n df_out.loc[len(df_out)] = [doc_id, count_sentences, token.i, token.text, token.ent_type_, token.ent_iob_, token.lemma_, token.pos_, token.tag_, token.idx, token.idx+len(token)-1, token.dep_]\r\n \r\n if (token.text == '.'):\r\n count_sentences += 1\r\n prev_dep = token.dep_\r\n \r\n return df_out, corefs\r\n\r\n### ==================================================================================================\r\n### triple extractor\r\n\r\ndef get_predicate(s):\r\n pred_ids = {}\r\n for w, index, spo in s:\r\n if spo == 'predicate' and w != \"'s\" and w != \"\\\"\": #= 11.95\r\n pred_ids[index] = w\r\n predicates = {}\r\n for key, value in pred_ids.items():\r\n predicates[key] = value\r\n return predicates\r\n\r\ndef get_subjects(s, start, end, adps):\r\n subjects = {}\r\n for w, index, spo in s:\r\n if index >= start and index <= end:\r\n if 'subject' in spo or 'entity' in spo or 'object' in spo:\r\n subjects[index] = w\r\n return subjects\r\n \r\ndef get_objects(s, start, end, adps):\r\n objects = {}\r\n for w, index, spo in s:\r\n if index >= start and index <= end:\r\n if 'object' in spo or 'entity' in spo or 'subject' in spo:\r\n objects[index] = w\r\n return objects\r\n\r\ndef get_positions(s, start, end):\r\n adps = {}\r\n for w, index, spo in s: \r\n if index >= start and index <= end:\r\n if 'of' == spo or 'at' == spo:\r\n adps[index] = w\r\n return adps\r\n\r\ndef create_triples(df_text, corefs):\r\n sentences = []\r\n aSentence = []\r\n \r\n for index, row in df_text.iterrows():\r\n d_id, s_id, word_id, word, ent, ent_iob, lemma, cg_pos, pos, start, end, dep = row.items()\r\n if 'subj' in dep[1]:\r\n aSentence.append([word[1], word_id[1], 'subject'])\r\n elif 'ROOT' in dep[1] or 'VERB' in cg_pos[1] or pos[1] == 'IN':\r\n aSentence.append([word[1], word_id[1], 'predicate'])\r\n elif 'obj' in dep[1]:\r\n aSentence.append([word[1], word_id[1], 'object'])\r\n elif ent[1] == 'ENTITY':\r\n aSentence.append([word[1], word_id[1], 'entity']) \r\n elif word[1] == '.':\r\n sentences.append(aSentence)\r\n aSentence = []\r\n else:\r\n aSentence.append([word[1], word_id[1], pos[1]])\r\n \r\n relations = []\r\n #loose_entities = []\r\n for s in sentences:\r\n if len(s) == 0: continue\r\n preds = get_predicate(s) # Get all verbs\r\n \"\"\"\r\n if preds == {}: \r\n preds = {p[1]:p[0] for p in s if (p[2] == 'JJ' or p[2] == 'IN' or p[2] == 'CC' or\r\n p[2] == 'RP' or p[2] == ':' or p[2] == 'predicate' or\r\n p[2] =='-LRB-' or p[2] =='-RRB-') }\r\n if preds == {}:\r\n #print('\\npred = 0', s)\r\n preds = {p[1]:p[0] for p in s if (p[2] == ',')}\r\n if preds == {}:\r\n ents = [e[0] for e in s if e[2] == 'entity']\r\n if (ents):\r\n loose_entities = ents # not significant for now\r\n #print(\"Loose entities = \", ents)\r\n \"\"\"\r\n if preds:\r\n if (len(preds) == 1):\r\n #print(\"preds = \", preds)\r\n predicate = list(preds.values())[0]\r\n if (len(predicate) < 2):\r\n predicate = 'is'\r\n #print(s)\r\n ents = [e[0] for e in s if e[2] == 'entity']\r\n #print('ents = ', ents)\r\n for i in range(1, len(ents)):\r\n relations.append([ents[0], predicate, ents[i]])\r\n\r\n pred_ids = list(preds.keys())\r\n pred_ids.append(s[0][1])\r\n pred_ids.append(s[len(s)-1][1])\r\n pred_ids.sort()\r\n \r\n for i in range(1, len(pred_ids)-1):\r\n predicate = preds[pred_ids[i]]\r\n adps_subjs = get_positions(s, pred_ids[i-1], pred_ids[i])\r\n subjs = get_subjects(s, pred_ids[i-1], pred_ids[i], adps_subjs)\r\n adps_objs = get_positions(s, pred_ids[i], pred_ids[i+1])\r\n objs = get_objects(s, pred_ids[i], pred_ids[i+1], adps_objs)\r\n for k_s, subj in subjs.items(): \r\n for k_o, obj in objs.items():\r\n obj_prev_id = int(k_o) - 1\r\n if obj_prev_id in adps_objs: # at, in, of\r\n relations.append([subj, predicate + ' ' + adps_objs[obj_prev_id], obj])\r\n else:\r\n relations.append([subj, predicate, obj])\r\n \r\n ### Read coreferences: coreference files are TAB separated values\r\n coreferences = []\r\n for val in corefs:\r\n if val[0].strip() != val[1].strip():\r\n if len(val[0]) <= 50 and len(val[1]) <= 50:\r\n co_word = val[0]\r\n real_word = val[1].strip('[,- \\'\\n]*')\r\n real_word = re.sub(\"'s$\", '', real_word, flags=re.UNICODE)\r\n if (co_word != real_word):\r\n coreferences.append([co_word, real_word])\r\n else:\r\n co_word = val[0]\r\n real_word = ' '.join((val[1].strip('[,- \\'\\n]*')).split()[:7])\r\n real_word = re.sub(\"'s$\", '', real_word, flags=re.UNICODE)\r\n if (co_word != real_word):\r\n coreferences.append([co_word, real_word])\r\n \r\n # Resolve corefs\r\n triples_object_coref_resolved = []\r\n triples_all_coref_resolved = []\r\n for s, p, o in relations:\r\n coref_resolved = False\r\n for co in coreferences:\r\n if (s == co[0]):\r\n subj = co[1]\r\n triples_object_coref_resolved.append([subj, p, o])\r\n coref_resolved = True\r\n break\r\n if not coref_resolved:\r\n triples_object_coref_resolved.append([s, p, o])\r\n\r\n for s, p, o in triples_object_coref_resolved:\r\n coref_resolved = False\r\n for co in coreferences:\r\n if (o == co[0]):\r\n obj = co[1]\r\n triples_all_coref_resolved.append([s, p, obj])\r\n coref_resolved = True\r\n break\r\n if not coref_resolved:\r\n triples_all_coref_resolved.append([s, p, o])\r\n return(triples_all_coref_resolved)\r\n\r\n### ==================================================================================================\r\n## Get more using Network shortest_paths\r\n\r\ndef get_graph(triples):\r\n G = nx.DiGraph()\r\n for s, p, o in triples:\r\n G.add_edge(s, o, key=p)\r\n return G\r\n\r\ndef get_entities_with_capitals(G):\r\n entities = []\r\n for node in G.nodes():\r\n if (any(ch.isupper() for ch in list(node))):\r\n entities.append(node)\r\n return entities\r\n\r\ndef get_paths_between_capitalised_entities(triples):\r\n \r\n g = get_graph(triples)\r\n ents_capitals = get_entities_with_capitals(g)\r\n paths = []\r\n #print('\\nShortest paths among capitalised words -------------------')\r\n for i in range(0, len(ents_capitals)):\r\n n1 = ents_capitals[i]\r\n for j in range(1, len(ents_capitals)):\r\n try:\r\n n2 = ents_capitals[j]\r\n path = nx.shortest_path(g, source=n1, target=n2)\r\n if path and len(path) > 2:\r\n paths.append(path)\r\n path = nx.shortest_path(g, source=n2, target=n1)\r\n if path and len(path) > 2:\r\n paths.append(path)\r\n except Exception:\r\n continue\r\n return g, paths\r\n\r\ndef get_paths(doc_triples):\r\n triples = []\r\n g, paths = get_paths_between_capitalised_entities(doc_triples)\r\n for p in paths:\r\n path = [(u, g[u][v]['key'], v) for (u, v) in zip(p[0:], p[1:])]\r\n length = len(p)\r\n if (path[length-2][1] == 'in' or path[length-2][1] == 'at' or path[length-2][1] == 'on'):\r\n if [path[0][0], path[length-2][1], path[length-2][2]] not in triples:\r\n triples.append([path[0][0], path[length-2][1], path[length-2][2]])\r\n elif (' in' in path[length-2][1] or ' at' in path[length-2][1] or ' on' in path[length-2][1]):\r\n if [path[0][0], path[length-2][1], path[length-2][2]] not in triples:\r\n triples.append([path[0][0], 'in', path[length-2][2]])\r\n for t in doc_triples:\r\n if t not in triples:\r\n triples.append(t)\r\n return triples\r\n\r\ndef get_center(nodes):\r\n center = ''\r\n if (len(nodes) == 1):\r\n center = nodes[0]\r\n else: \r\n # Capital letters and longer is preferred\r\n cap_ents = [e for e in nodes if any(x.isupper() for x in e)]\r\n if (cap_ents):\r\n center = max(cap_ents, key=len)\r\n else:\r\n center = max(nodes, key=len)\r\n return center\r\n\r\ndef connect_graphs(mytriples):\r\n G = nx.DiGraph()\r\n for s, p, o in mytriples:\r\n G.add_edge(s, o, p=p) \r\n \r\n \"\"\"\r\n # Get components\r\n graphs = list(nx.connected_component_subgraphs(G.to_undirected()))\r\n \r\n # Get the largest component\r\n largest_g = max(graphs, key=len)\r\n largest_graph_center = ''\r\n largest_graph_center = get_center(nx.center(largest_g))\r\n \r\n # for each graph, find the centre node\r\n smaller_graph_centers = []\r\n for g in graphs: \r\n center = get_center(nx.center(g))\r\n smaller_graph_centers.append(center)\r\n\r\n for n in smaller_graph_centers:\r\n if (largest_graph_center is not n):\r\n G.add_edge(largest_graph_center, n, p='with')\r\n \"\"\"\r\n return G\r\n \r\ndef rank_by_degree(mytriples): #, limit):\r\n G = connect_graphs(mytriples)\r\n degree_dict = dict(G.degree(G.nodes()))\r\n nx.set_node_attributes(G, degree_dict, 'degree')\r\n \r\n # Use this to draw the graph\r\n draw_graph_centrality(G, degree_dict)\r\n\r\n Egos = nx.DiGraph()\r\n for a, data in sorted(G.nodes(data=True), key=lambda x: x[1]['degree'], reverse=True):\r\n ego = nx.ego_graph(G, a)\r\n Egos.add_edges_from(ego.edges(data=True))\r\n Egos.add_nodes_from(ego.nodes(data=True))\r\n \r\n #if (nx.number_of_edges(Egos) > 20):\r\n # break\r\n \r\n ranked_triples = []\r\n for u, v, d in Egos.edges(data=True):\r\n ranked_triples.append([u, d['p'], v])\r\n return ranked_triples\r\n \r\ndef extract_triples(text):\r\n df_tagged, corefs = tagger(text)\r\n doc_triples = create_triples(df_tagged, corefs)\r\n all_triples = get_paths(doc_triples)\r\n filtered_triples = [] \r\n for s, p, o in all_triples:\r\n if ([s, p, o] not in filtered_triples):\r\n if s.lower() in all_stop_words or o.lower() in all_stop_words:\r\n continue\r\n elif s == p:\r\n continue\r\n if s.isdigit() or o.isdigit():\r\n continue\r\n if '%' in o or '%' in s: #= 11.96\r\n continue\r\n if (len(s) < 2) or (len(o) < 2):\r\n continue\r\n if (s.islower() and len(s) < 4) or (o.islower() and len(o) < 4):\r\n continue\r\n if s == o:\r\n continue \r\n subj = s.strip('[,- :\\'\\\"\\n]*')\r\n pred = p.strip('[- :\\'\\\"\\n]*.')\r\n obj = o.strip('[,- :\\'\\\"\\n]*')\r\n \r\n for sw in ['a', 'an', 'the', 'its', 'their', 'his', 'her', 'our', 'all', 'old', 'new', 'latest', 'who', 'that', 'this', 'these', 'those']:\r\n subj = ' '.join(word for word in subj.split() if not word == sw)\r\n obj = ' '.join(word for word in obj.split() if not word == sw)\r\n subj = re.sub(\"\\s\\s+\", \" \", subj)\r\n obj = re.sub(\"\\s\\s+\", \" \", obj)\r\n \r\n if subj and pred and obj:\r\n filtered_triples.append([subj, pred, obj])\r\n\r\n TRIPLES = rank_by_degree(filtered_triples)\r\n ## TRIPLES_ch = [ [translator.en2ch(source) for source in triple] for triple in TRIPLES]\r\n\r\n return TRIPLES\r\n\r\ndef draw_graph_centrality_helper(dictionary):\r\n dictVol, dictStress = dict(), dict()\r\n for dirItem in os.listdir():\r\n if(\"voice_stress_and_vol\" in dirItem): \r\n os.chdir(dirItem)\r\n break\r\n ## process voice stress first\r\n for item in dictionary:\r\n dictStress[item] = []\r\n for word in item.split():\r\n for subDirItem in os.listdir():\r\n if(word in subDirItem): ##if word in chunk in an rms file..\r\n with open(subDirItem,\"r\") as f_subDir:\r\n strSubDir = f_subDir.read().split()\r\n relVal = float(strSubDir[1])\r\n dictStress[item].append(relVal)\r\n break\r\n try: dictStress[item] = sum(dictStress[item])/len(dictStress[item])\r\n except: dictStress[item] = 0.5\r\n ## next process volume\r\n ## print(os.getcwd())\r\n for subDirItem in os.listdir():\r\n ## print(\"subDirItem: \",subDirItem)\r\n if(\"mono_RMS_vol\" in subDirItem):\r\n stemFile = subDirItem[:subDirItem.index(\"_\")]\r\n with open(subDirItem, \"r\") as f_sub_dir:\r\n allItems = f_sub_dir.readlines()\r\n break\r\n for item in dictionary:\r\n dictVol[item] = []\r\n for word in item.split():\r\n for volItem in allItems:\r\n if(word in volItem):\r\n relStr = volItem.strip()\r\n relVal = float(relStr.split()[1])\r\n dictVol[item].append(relVal)\r\n break\r\n try: dictVol[item] = sum(dictVol[item])/len(dictVol[item])\r\n except: dictVol[item] = 0.5\r\n \r\n return dictVol, dictStress, stemFile\r\n\r\n \r\ndef draw_graph_centrality(G, dictionary):\r\n plt.figure(figsize=(12,10))\r\n pos = nx.spring_layout(G)\r\n ## print(\"Nodes\\n\", G.nodes(True))\r\n ## print(\"Edges\\n\", G.edges())\r\n ## print(G.nodes().data())\r\n ## print(\"dictionary: \",dictionary)\r\n dictVol, dictStress, stemFile = draw_graph_centrality_helper(dictionary)\r\n ##dictVol, dictStress have same keys as dictionary; \r\n # values are 0.0 to 1.0 floating point of volume//voice stress..\r\n\r\n nx.draw_networkx_nodes(G, pos, \r\n nodelist=dictionary.keys(),\r\n with_labels=False,\r\n edge_color='black',\r\n width=1,\r\n linewidths=1,\r\n ## node_size = [v * 150 for v in dictionary.values()], ##minor change here... \r\n node_size= [v * 300 for v in dictVol.values()],\r\n ## node_color = 'blue', ##minor change here....\r\n node_color= [ i for i in dictStress.values()],\r\n alpha=0.5)\r\n edge_labels = {(u, v): d[\"p\"] for u, v, d in G.edges(data=True)}\r\n #print(edge_labels)\r\n nx.draw_networkx_edge_labels(G, pos,\r\n font_size=10,\r\n edge_labels=edge_labels,\r\n font_color='blue')\r\n nx.draw(G, pos, with_labels=True, node_size=1, node_color='blue')\r\n os.chdir(\"..\")\r\n networkXImageFile = \"{}networkXGraph.png\".format(stemFile)\r\n plt.savefig(networkXImageFile)\r\n if(\"{}_misc_data\".format(stemFile) not in os.listdir()):\r\n os.mkdir(\"{}_misc_data\".format(stemFile))\r\n os.system(\"mv {}textString.txt {}networkXGraph.png {}_misc_data\"\r\n .format(stemFile,stemFile,stemFile))\r\n \r\nif __name__ == \"__main__\":\r\n # \"\"\"\r\n # Celebrity chef Jamie Oliver's British restaurant chain has become insolvent, putting 1,300 jobs at risk. The firm said Tuesday that it had gone into administration, a form of bankruptcy protection, and appointed KPMG to oversee the process.The company operates 23 Jamie's Italian restaurants in the U.K. The company had been seeking buyers amid increased competition from casual dining rivals, according to The Guardian. Oliver began his restaurant empire in 2002 when he opened Fifteen in London. Oliver, known around the world for his cookbooks and television shows, said he was \"deeply saddened by this outcome and would like to thank all of the staff and our suppliers who have put their hearts and souls into this business for over a decade. \"He said \"I appreciate how difficult this is for everyone affected.\" I’m devastated that our much-loved UK restaurants have gone into administration.\r\n # \"\"\"\r\n # \"\"\"BYD debuted its E-SEED GT concept car and Song Pro SUV alongside its all-new e-series models at the Shanghai International Automobile Industry Exhibition. The company also showcased its latest Dynasty series of vehicles, which were recently unveiled at the company’s spring product launch in Beijing.\"\"\"\r\n parser = argparse.ArgumentParser(description=\"Please input a text file\")\r\n parser.add_argument(\"--text_file\",help=\"text file to be parsed..\", required=False, default=None)\r\n args = vars(parser.parse_args())\r\n if(args['text_file']==None or args['text_file'] not in os.listdir() or not args['text_file'].endswith(\".txt\")):\r\n text = \"\"\"\r\n BYD debuted its E-SEED GT concept car and Song Pro SUV alongside its all-new e-series models at the Shanghai International Automobile Industry Exhibition. The company also showcased its latest Dynasty series of vehicles, which were recently unveiled at the company’s spring product launch in Beijing. A total of 23 new car models were exhibited at the event, held at Shanghai’s National Convention and Exhibition Center, fully demonstrating the BYD New Architecture (BNA) design, the 3rd generation of Dual Mode technology, plus the e-platform framework. Today, China’s new energy vehicles have entered the ‘fast lane’, ushering in an even larger market outbreak. Presently, we stand at the intersection of old and new kinetic energy conversion for mobility, but also a new starting point for high-quality development. To meet the arrival of complete electrification, BYD has formulated a series of strategies, and is well prepared.\r\n \"\"\"\r\n else:\r\n with open(args['text_file'],'r') as f_args:\r\n text = f_args.read()\r\n # \"\"\"\r\n # An arson fire caused an estimated $50,000 damage at a house on Mt. Soledad that was being renovated, authorities said Friday.San Diego police were looking for the arsonist, described as a Latino man who was wearing a red hat, blue shirt and brown pants, and may have driven away in a small, black four-door car.A resident on Palomino Court, off Soledad Mountain Road, called 9-1-1 about 9:45 a.m. to report the house next door on fire, with black smoke coming out of the roof, police said. Firefighters had the flames knocked down 20 minutes later, holding the damage to the attic and roof, said City spokesperson Alec Phillip. No one was injured.Metro Arson Strike Team investigators were called and they determined the blaze had been set intentionally, Phillip said.Police said one or more witnesses saw the suspect run south from the house and possibly leave in the black car.\r\n # \"\"\"\r\n mytriples = extract_triples(text)\r\n print('\\n\\nFINAL TRIPLES = ', len(mytriples))\r\n for t in mytriples:\r\n print(t)"
] |
[
[
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"matplotlib.pyplot.figure"
]
] |
Avmb/lrn
|
[
"d71775d9f44efa2e73f126f56a41fe7d678203db",
"d71775d9f44efa2e73f126f56a41fe7d678203db"
] |
[
"nli/code/utils/initializer.py",
"doc/code/vocab.py"
] |
[
"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef get_initializer(params):\n if params.initializer == \"uniform\":\n max_val = params.initializer_gain\n return tf.random_uniform_initializer(-max_val, max_val)\n elif params.initializer == \"normal\":\n return tf.random_normal_initializer(0.0, params.initializer_gain)\n elif params.initializer == \"normal_unit_scaling\":\n return tf.variance_scaling_initializer(params.initializer_gain,\n mode=\"fan_avg\",\n distribution=\"normal\")\n elif params.initializer == \"uniform_unit_scaling\":\n return tf.variance_scaling_initializer(params.initializer_gain,\n mode=\"fan_avg\",\n distribution=\"uniform\")\n else:\n tf.logging.warn(\"Unrecognized initializer: %s\" % params.initializer)\n tf.logging.warn(\"Return to default initializer: glorot_uniform_initializer\")\n return tf.glorot_uniform_initializer()\n",
"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport argparse\nimport numpy as np\nimport tensorflow as tf\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nfrom bert.tokenization import BasicTokenizer as Tokenizer\n\n\nclass Vocab(object):\n def __init__(self, lower=False, vocab_file=None):\n self.lower = lower\n\n self.word2id = {}\n self.id2word = {}\n self.word2count = {}\n\n self.pad_sym = \"<pad>\"\n self.eos_sym = \"<eos>\"\n self.unk_sym = \"<unk>\"\n\n self.clean()\n\n self.pretrained_embedding = None\n\n if vocab_file is not None:\n self.load_vocab(vocab_file)\n\n if os.path.exists(vocab_file + \".npz\"):\n pretrain_embedding = np.load(vocab_file + \".npz\")['data']\n self.pretrained_embedding = pretrain_embedding\n\n def clean(self):\n self.word2id = {}\n self.id2word = {}\n self.word2count = {}\n\n self.insert(self.pad_sym)\n self.insert(self.unk_sym)\n self.insert(self.eos_sym)\n\n def insert(self, token):\n token = token if not self.lower else token.lower()\n if token not in self.word2id:\n index = len(self.word2id)\n self.word2id[token] = index\n self.id2word[index] = token\n\n self.word2count[token] = 0\n self.word2count[token] += 1\n\n def size(self):\n return len(self.word2id)\n\n def load_vocab(self, vocab_file):\n with open(vocab_file, 'r') as reader:\n for token in reader:\n self.insert(token.strip())\n\n def get_token(self, id):\n if id in self.id2word:\n return self.id2word[id]\n return self.unk_sym\n\n def get_id(self, token):\n token = token if not self.lower else token.lower()\n if token in self.word2id:\n return self.word2id[token]\n return self.word2id[self.unk_sym]\n\n def sort_vocab(self, least_freq=-1):\n sorted_word2count = sorted(\n self.word2count.items(), key=lambda x: - x[1])\n self.clean()\n for word, freq in sorted_word2count:\n if least_freq > 0:\n if freq <= least_freq:\n continue\n self.insert(word)\n\n def save_vocab(self, vocab_file):\n with open(vocab_file, 'w') as writer:\n for id in range(self.size()):\n writer.write(self.id2word[id].encode(\"utf-8\") + \"\\n\")\n\n np.savez(vocab_file + \".npz\", data=self.pretrained_embedding)\n\n def to_id(self, tokens, append_eos=True):\n if not append_eos:\n return [self.get_id(token) for token in tokens]\n else:\n return [self.get_id(token) for token in\n tokens + [self.eos_sym]]\n\n def to_tokens(self, ids):\n return [self.get_token(id) for id in ids]\n\n def eos(self):\n return self.get_id(self.eos_sym)\n\n def pad(self):\n return self.get_id(self.pad_sym)\n\n def make_vocab(self, data_set, use_char=False, embedding_path=None):\n tf.logging.info(\"Starting Reading Data in {} Manner\".format(use_char))\n tokenizer = Tokenizer(do_lower_case=False)\n\n for data_iter in [data_set.get_train_data(),\n data_set.get_dev_data(),\n data_set.get_test_data()]:\n for sample in data_iter:\n label, document = sample\n\n tokens = tokenizer.tokenize(document)\n for token in tokens:\n if not use_char:\n self.insert(token)\n else:\n for char in list(token):\n self.insert(char)\n\n tf.logging.info(\"Data Loading Over, Starting Sorted\")\n self.sort_vocab(least_freq=3 if use_char else -1)\n\n # process the vocabulary with pretrained-embeddings\n if embedding_path is not None:\n tf.logging.info(\"Pretrained Word Embedding Loading\")\n embed_tokens = {}\n embed_size = None\n with open(embedding_path, 'r') as reader:\n for line in reader:\n segs = line.strip().split(' ')\n\n token = segs[0]\n # Not used in our training data, pass\n if token not in self.word2id:\n continue\n embed_tokens[token] = list(map(float, segs[1:]))\n\n if embed_size is None:\n embed_size = len(segs) - 1\n\n self.clean()\n for token in embed_tokens:\n self.insert(token)\n\n # load embeddings\n embeddings = np.zeros([len(embed_tokens), embed_size])\n for token in embed_tokens:\n # 3: the special symbols\n embeddings[self.get_id(token) - 3] = embed_tokens[token]\n\n self.pretrained_embedding = embeddings\n\n tf.logging.info(\"Vocabulary Loading Finished\")\n"
] |
[
[
"tensorflow.random_uniform_initializer",
"tensorflow.logging.warn",
"tensorflow.variance_scaling_initializer",
"tensorflow.glorot_uniform_initializer",
"tensorflow.random_normal_initializer"
],
[
"numpy.load",
"numpy.savez",
"tensorflow.logging.info"
]
] |
clvcooke/densenet.pytorch
|
[
"ae3b0aca6743d4579ec43f693f283a341cf3911c"
] |
[
"densenet.py"
] |
[
"import torch\n\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\n\nimport torchvision.models as models\n\nimport sys\nimport math\n\nclass Bottleneck(nn.Module):\n def __init__(self, nChannels, growthRate):\n super(Bottleneck, self).__init__()\n interChannels = 4*growthRate\n self.bn1 = nn.BatchNorm2d(nChannels)\n self.conv1 = nn.Conv2d(nChannels, interChannels, kernel_size=1,\n bias=False)\n self.bn2 = nn.BatchNorm2d(interChannels)\n self.conv2 = nn.Conv2d(interChannels, growthRate, kernel_size=3,\n padding=1, bias=False)\n\n def forward(self, x):\n out = self.conv1(F.relu(self.bn1(x)))\n out = self.conv2(F.relu(self.bn2(out)))\n out = torch.cat((x, out), 1)\n return out\n\nclass SingleLayer(nn.Module):\n def __init__(self, nChannels, growthRate):\n super(SingleLayer, self).__init__()\n self.bn1 = nn.BatchNorm2d(nChannels)\n self.conv1 = nn.Conv2d(nChannels, growthRate, kernel_size=3,\n padding=1, bias=False)\n\n def forward(self, x):\n out = self.conv1(F.relu(self.bn1(x)))\n out = torch.cat((x, out), 1)\n return out\n\nclass Transition(nn.Module):\n def __init__(self, nChannels, nOutChannels):\n super(Transition, self).__init__()\n self.bn1 = nn.BatchNorm2d(nChannels)\n self.conv1 = nn.Conv2d(nChannels, nOutChannels, kernel_size=1,\n bias=False)\n\n def forward(self, x):\n out = self.conv1(F.relu(self.bn1(x)))\n out = F.avg_pool2d(out, 2)\n return out\n\n\nclass DenseNet(nn.Module):\n def __init__(self, growthRate, depth, reduction, nClasses, bottleneck):\n super(DenseNet, self).__init__()\n\n nDenseBlocks = (depth-4) // 3\n if bottleneck:\n nDenseBlocks //= 2\n\n nChannels = 2*growthRate\n self.conv1 = nn.Conv2d(3, nChannels, kernel_size=3, padding=1,\n bias=False)\n self.dense1 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)\n nChannels += nDenseBlocks*growthRate\n nOutChannels = int(math.floor(nChannels*reduction))\n self.trans1 = Transition(nChannels, nOutChannels)\n\n nChannels = nOutChannels\n self.dense2 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)\n nChannels += nDenseBlocks*growthRate\n nOutChannels = int(math.floor(nChannels*reduction))\n self.trans2 = Transition(nChannels, nOutChannels)\n\n nChannels = nOutChannels\n self.dense3 = self._make_dense(nChannels, growthRate, nDenseBlocks, bottleneck)\n nChannels += nDenseBlocks*growthRate\n\n self.bn1 = nn.BatchNorm2d(nChannels)\n self.fc = nn.Linear(nChannels, nClasses)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n\n def _make_dense(self, nChannels, growthRate, nDenseBlocks, bottleneck):\n layers = []\n for i in range(int(nDenseBlocks)):\n if bottleneck:\n layers.append(Bottleneck(nChannels, growthRate))\n else:\n layers.append(SingleLayer(nChannels, growthRate))\n nChannels += growthRate\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.trans1(self.dense1(out))\n out = self.trans2(self.dense2(out))\n out = self.dense3(out)\n# print(out.shape)\n out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 24))\n# print(out.shape)\n out = F.log_softmax(self.fc(out))\n return out\n"
] |
[
[
"torch.nn.Sequential",
"torch.cat",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.BatchNorm2d"
]
] |
RozenAstrayChen/sound-anomaly-detection-for-manufacturing
|
[
"8ed0ec0cd6d436676c773cb323d7658c9259cde3"
] |
[
"model_torch.py"
] |
[
"import argparse, os\nimport numpy as np\nimport pickle\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.linear import Linear\n\ndef autoencoder(input_dims, type=0):\n \"\"\"\n Define a torch model for anoamly detection\n\n PARAMS\n ===\n input_dims\n \n RETURN\n ===\n Model return\n \"\"\"\n if type == 0:\n # origin autoencoder\n model = nn.Sequential(\n # Encoder\n torch.nn.Linear(input_dims, 64), torch.nn.ReLU(),\n torch.nn.Linear(64, 64), torch.nn.ReLU(),\n torch.nn.Linear(64, 8), torch.nn.ReLU(),\n\n # Decoder\n torch.nn.Linear(8, 64), torch.nn.ReLU(),\n torch.nn.Linear(64, 64), torch.nn.ReLU(),\n torch.nn.Linear(64, input_dims)\n )\n\n elif type == 1:\n # smooth autoencoder\n model = nn.Sequential(\n # Encoder\n torch.nn.Linear(input_dims, 256), torch.nn.ReLU(),\n torch.nn.Linear(256, 128), torch.nn.ReLU(),\n torch.nn.Linear(128, 64), torch.nn.ReLU(),\n torch.nn.Linear(64, 32), torch.nn.ReLU(),\n\n # Decoder\n torch.nn.Linear(32, 64), torch.nn.ReLU(),\n torch.nn.Linear(64, 128), torch.nn.ReLU(),\n torch.nn.Linear(128, 256), torch.nn.ReLU(),\n torch.nn.Linear(256, input_dims)\n )\n \n elif type == 2:\n # CNN\n pass\n\n return model\n\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.ReLU"
]
] |
Kyubyong/pororo-1
|
[
"d465a2699aa72cf7574de6c8c3f22c4be4647996"
] |
[
"pororo/models/tts/synthesizer.py"
] |
[
"import json\nfrom typing import Tuple\n\nimport librosa\nimport torch\n\nfrom pororo.models.tts.hifigan.checkpoint import load_checkpoint\nfrom pororo.models.tts.hifigan.model import Generator\nfrom pororo.models.tts.synthesis import synthesize\nfrom pororo.models.tts.tacotron.params import Params as tacotron_hp\nfrom pororo.models.tts.tacotron.tacotron2 import Tacotron\nfrom pororo.models.tts.utils import remove_dataparallel_prefix\nfrom pororo.models.tts.waveRNN.gen_wavernn import generate as wavernn_generate\nfrom pororo.models.tts.waveRNN.params import hp as wavernn_hp\nfrom pororo.models.tts.waveRNN.waveRNN import WaveRNN\n\n\nclass AttrDict(dict):\n\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self\n\n\nclass MultilingualSpeechSynthesizer(object):\n\n def __init__(\n self,\n tacotron_path: str,\n english_vocoder_path: str,\n english_vocoder_config: str,\n korean_vocoder_path: str,\n korean_vocoder_config: str,\n wavernn_path: str,\n device: str,\n lang: str = \"en\",\n ):\n self.lang = lang\n self.device = device\n self.vocoder_en_config = None\n self.vocoder_ko_config = None\n\n self.tacotron, self.vocoder_en, self.vocoder_ko, self.vocoder_multi = self.build_model(\n tacotron_path,\n english_vocoder_path,\n english_vocoder_config,\n korean_vocoder_path,\n korean_vocoder_config,\n wavernn_path,\n )\n\n def _build_hifigan(self, config: str, hifigan_path: str) -> Generator:\n with open(config) as f:\n data = f.read()\n\n config = json.loads(data)\n config = AttrDict(config)\n\n generator = Generator(config).to(self.device)\n state_dict_g = load_checkpoint(hifigan_path, self.device)\n generator.load_state_dict(state_dict_g['generator'])\n generator.eval()\n generator.remove_weight_norm()\n\n return generator\n\n def _build_tacotron(self, tacotron_path: str) -> Tacotron:\n state = torch.load(tacotron_path, map_location=self.device)\n tacotron_hp.load_state_dict(state[\"parameters\"])\n tacotron = Tacotron()\n tacotron.load_state_dict(remove_dataparallel_prefix(state[\"model\"]))\n tacotron.eval().to(self.device)\n return tacotron\n\n def _build_wavernn(self, wavernn_path: str) -> WaveRNN:\n wavernn = (WaveRNN(\n rnn_dims=wavernn_hp.voc_rnn_dims,\n fc_dims=wavernn_hp.voc_fc_dims,\n bits=wavernn_hp.bits,\n pad=wavernn_hp.voc_pad,\n upsample_factors=wavernn_hp.voc_upsample_factors,\n feat_dims=wavernn_hp.num_mels,\n compute_dims=wavernn_hp.voc_compute_dims,\n res_out_dims=wavernn_hp.voc_res_out_dims,\n res_blocks=wavernn_hp.voc_res_blocks,\n hop_length=wavernn_hp.hop_length,\n sample_rate=wavernn_hp.sample_rate,\n mode=wavernn_hp.voc_mode,\n ).eval().to(self.device))\n wavernn.load(wavernn_path)\n return wavernn\n\n def build_model(\n self,\n tacotron_path: str,\n english_vocoder_path: str,\n english_vocoder_config: str,\n korean_vocoder_path: str,\n korean_vocoder_config: str,\n wavernn_path: str,\n ) -> Tuple[Tacotron, Generator, Generator, WaveRNN]:\n \"\"\"Load and build tacotron a from checkpoint.\"\"\"\n tacotron = self._build_tacotron(tacotron_path)\n vocoder_multi = self._build_wavernn(wavernn_path)\n vocoder_ko = self._build_hifigan(\n korean_vocoder_config,\n korean_vocoder_path,\n )\n vocoder_en = self._build_hifigan(\n english_vocoder_config,\n english_vocoder_path,\n )\n return tacotron, vocoder_en, vocoder_ko, vocoder_multi\n\n def _spectrogram_postprocess(self, spectrogram):\n spectrogram = librosa.db_to_amplitude(spectrogram)\n spectrogram = torch.log(\n torch.clamp(torch.Tensor(spectrogram), min=1e-5) * 1)\n return spectrogram\n\n def predict(self, text: str, speaker: str):\n speakers = speaker.split(',')\n\n spectrogram = synthesize(self.tacotron, f\"|{text}\", device=self.device)\n\n if len(speakers) > 1:\n spectrogram = self._spectrogram_postprocess(spectrogram)\n y_g_hat = self.vocoder_en(\n torch.Tensor(spectrogram).to(self.device).unsqueeze(0))\n audio = y_g_hat.squeeze()\n audio = audio * 32768.0\n return audio.cpu().detach().numpy()\n\n if speaker in (\"ko\", \"en\"):\n spectrogram = self._spectrogram_postprocess(spectrogram)\n\n if speaker == \"ko\":\n y_g_hat = self.vocoder_ko(\n torch.Tensor(spectrogram).to(self.device).unsqueeze(0))\n else:\n y_g_hat = self.vocoder_en(\n torch.Tensor(spectrogram).to(self.device).unsqueeze(0))\n\n audio = y_g_hat.squeeze()\n audio = audio * 32768.0\n return audio.cpu().detach().numpy()\n\n else:\n audio = wavernn_generate(\n self.vocoder_multi,\n spectrogram,\n wavernn_hp.voc_gen_batched,\n wavernn_hp.voc_target,\n wavernn_hp.voc_overlap,\n )\n audio = audio * 32768.0\n return audio\n"
] |
[
[
"torch.Tensor",
"torch.load"
]
] |
trichter/qopen
|
[
"998fe27ec2d98d46c76093bb70f477ab7bede1a6"
] |
[
"qopen/tests/test_rt.py"
] |
[
"# Copyright 2015-2017 Tom Eulenfeld, MIT license\n\"\"\"\nTests for rt module.\n\"\"\"\n\nimport matplotlib\nimport numpy as np\nimport unittest\nfrom pkg_resources import load_entry_point\nimport warnings\n\nfrom qopen.rt import G, rt3d_coda_reduced\nfrom qopen.tests.util import tempdir, quiet\n\nmatplotlib.use('agg')\n\n\nclass TestCase(unittest.TestCase):\n\n def test_Paasschens(self):\n \"\"\"Test 3 values of figure 2 of Paasschens (1997)\"\"\"\n # r, t, c, l, P, dP\n tests = [(2, 3, 1, 1, 0.04 / 4, 5e-4), # r=2.0l, (3, 0.04)\n # r=2.8l, (4, 0.03)\n (2.8 * 2, 4.0, 2, 2, 0.03 / (2.8 * 2) ** 2 / 2, 2e-5),\n (4, 1, 6, 1, 0.02 / 4 ** 2, 2e-4)] # r=4.0l, (6, 0.02)\n for r, t, c, l, P, dP in tests:\n # print(r, t, c, l, P, G(r, t, c, 1/l) / FS, dP)\n self.assertLess(abs(G(r, t, c, 1 / l) - P), dP)\n\n def test_preservation_of_total_energy_3d(self):\n \"\"\"Volume integral over Green's function should be 1\"\"\"\n c = 3000\n g0 = 1e-5\n for t in (1, 10, 100):\n r = np.linspace(0, 1.1 * c * t, 1000)\n G_ = G(r, t, c, g0)\n G_int = 4 * np.pi * np.sum(r ** 2 * G_) * (r[1] - r[0])\n # 2% error are OK for Paaschens solution\n self.assertLess(abs(G_int - 1), 0.02)\n\n def test_preservation_of_total_energy_2d(self):\n \"\"\"Area integral over Green's function should be 1\"\"\"\n c = 3000\n g0 = 1e-5\n for t in (1, 10, 100):\n r = np.linspace(0, 1.1 * c * t, 1000)\n G_ = G(r, t, c, g0, type='rt2d')\n G_int = 2 * np.pi * np.sum(r * G_) * (r[1] - r[0])\n self.assertLess(abs(G_int - 1), 0.005)\n\n def test_preservation_of_total_energy_1d(self):\n \"\"\"Integral over Green's function should be 1\"\"\"\n c = 3000\n g0 = 1e-5\n for t in (1, 10, 100):\n r = np.linspace(0, 1.1 * c * t, 1000)\n G_ = G(r, t, c, g0, type='rt1d')\n G_int = 2 * np.sum(G_) * (r[1] - r[0])\n self.assertLess(abs(G_int - 1), 0.005)\n\n def test_reduced_Sato(self):\n \"\"\"Test against exact solution in figure 8.4, page 256 of\n Sato, Fehler, Maeda, Second edition (2012) \"\"\"\n # left side of figure\n self.assertLess(abs(rt3d_coda_reduced(1.6, 2.1) - 0.02), 0.002)\n self.assertLess(abs(rt3d_coda_reduced(3.2, 3.5) - 0.002), 0.0002)\n # right side of figure\n self.assertLess(abs(rt3d_coda_reduced(1, 1.92) - 0.04), 0.004)\n self.assertLess(abs(rt3d_coda_reduced(2, 7.68) - 0.004), 0.0004)\n\n def cmd(self, cmd):\n self.script(cmd.split())\n\n def test_script(self):\n self.script = load_entry_point('qopen', 'console_scripts', 'qopen')\n with tempdir():\n with quiet():\n self.cmd('rt calc 1600 500 -t 5 -r 1000')\n self.cmd('rt calc 1600 500 -t 5 -r 1000 -a 5000')\n self.cmd('rt calc-direct 1600 500 -t 5')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self.cmd('rt plot-t 1600 500 -r 1000')\n self.cmd('rt plot-t 1600 500 -r 1000 --no-direct')\n self.cmd('rt plot-r 1600 500 -t 0.5 --type rt2d')\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"matplotlib.use",
"numpy.sum",
"numpy.linspace"
]
] |
KaihuaTang/mmdetection-support-LVIS
|
[
"3d69795666790e789595344f15c123391bcef831"
] |
[
"mmdet/models/bbox_heads/bbox_head.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.core import (auto_fp16, bbox_target, delta2bbox, force_fp32,\n multiclass_nms)\nfrom ..builder import build_loss\nfrom ..losses import accuracy\nfrom ..registry import HEADS\n\n\n@HEADS.register_module\nclass BBoxHead(nn.Module):\n \"\"\"Simplest RoI head, with only two fc layers for classification and\n regression respectively\"\"\"\n\n def __init__(self,\n with_avg_pool=False,\n with_cls=True,\n with_reg=True,\n roi_feat_size=7,\n in_channels=256,\n num_classes=81,\n target_means=[0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2],\n reg_class_agnostic=False,\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=1.0),\n loss_bbox=dict(\n type='SmoothL1Loss', beta=1.0, loss_weight=1.0)):\n super(BBoxHead, self).__init__()\n assert with_cls or with_reg\n self.with_avg_pool = with_avg_pool\n self.with_cls = with_cls\n self.with_reg = with_reg\n self.roi_feat_size = _pair(roi_feat_size)\n self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]\n self.in_channels = in_channels\n self.num_classes = num_classes\n self.target_means = target_means\n self.target_stds = target_stds\n self.reg_class_agnostic = reg_class_agnostic\n self.fp16_enabled = False\n\n self.loss_cls = build_loss(loss_cls)\n self.loss_bbox = build_loss(loss_bbox)\n\n in_channels = self.in_channels\n if self.with_avg_pool:\n self.avg_pool = nn.AvgPool2d(self.roi_feat_size)\n else:\n in_channels *= self.roi_feat_area\n if self.with_cls:\n self.fc_cls = nn.Linear(in_channels, num_classes)\n if self.with_reg:\n out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes\n self.fc_reg = nn.Linear(in_channels, out_dim_reg)\n self.debug_imgs = None\n\n def init_weights(self):\n if self.with_cls:\n nn.init.normal_(self.fc_cls.weight, 0, 0.01)\n #nn.init.constant_(self.fc_cls.bias, 0)\n if self.with_reg:\n nn.init.normal_(self.fc_reg.weight, 0, 0.001)\n nn.init.constant_(self.fc_reg.bias, 0)\n\n @auto_fp16()\n def forward(self, x):\n if self.with_avg_pool:\n x = self.avg_pool(x)\n x = x.view(x.size(0), -1)\n cls_score = self.fc_cls(x) if self.with_cls else None\n bbox_pred = self.fc_reg(x) if self.with_reg else None\n return cls_score, bbox_pred\n\n def get_target(self, sampling_results, gt_bboxes, gt_labels,\n rcnn_train_cfg):\n pos_proposals = [res.pos_bboxes for res in sampling_results]\n neg_proposals = [res.neg_bboxes for res in sampling_results]\n pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]\n pos_gt_labels = [res.pos_gt_labels for res in sampling_results]\n reg_classes = 1 if self.reg_class_agnostic else self.num_classes\n cls_reg_targets = bbox_target(\n pos_proposals,\n neg_proposals,\n pos_gt_bboxes,\n pos_gt_labels,\n rcnn_train_cfg,\n reg_classes,\n target_means=self.target_means,\n target_stds=self.target_stds)\n return cls_reg_targets\n\n @force_fp32(apply_to=('cls_score', 'bbox_pred'))\n def loss(self,\n cls_score,\n bbox_pred,\n labels,\n label_weights,\n bbox_targets,\n bbox_weights,\n reduction_override=None):\n losses = dict()\n if cls_score is not None:\n avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)\n if cls_score.numel() > 0:\n losses['loss_cls'] = self.loss_cls(\n cls_score,\n labels,\n label_weights,\n avg_factor=avg_factor,\n reduction_override=reduction_override)\n losses['acc'] = accuracy(cls_score, labels)\n if bbox_pred is not None:\n pos_inds = labels > 0\n if pos_inds.any():\n if self.reg_class_agnostic:\n pos_bbox_pred = bbox_pred.view(bbox_pred.size(0),\n 4)[pos_inds]\n else:\n pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1,\n 4)[pos_inds,\n labels[pos_inds]]\n losses['loss_bbox'] = self.loss_bbox(\n pos_bbox_pred,\n bbox_targets[pos_inds],\n bbox_weights[pos_inds],\n avg_factor=bbox_targets.size(0),\n reduction_override=reduction_override)\n return losses\n\n @force_fp32(apply_to=('cls_score', 'bbox_pred'))\n def get_det_bboxes(self,\n rois,\n cls_score,\n bbox_pred,\n img_shape,\n scale_factor,\n rescale=False,\n cfg=None):\n if isinstance(cls_score, list):\n cls_score = sum(cls_score) / float(len(cls_score))\n scores = F.softmax(cls_score, dim=1) if cls_score is not None else None\n\n if bbox_pred is not None:\n bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means,\n self.target_stds, img_shape)\n else:\n bboxes = rois[:, 1:].clone()\n if img_shape is not None:\n bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)\n bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)\n\n if rescale:\n if isinstance(scale_factor, float):\n bboxes /= scale_factor\n else:\n scale_factor = torch.from_numpy(scale_factor).to(bboxes.device)\n bboxes = (bboxes.view(bboxes.size(0), -1, 4) /\n scale_factor).view(bboxes.size()[0], -1)\n\n if cfg is None:\n return bboxes, scores\n else:\n det_bboxes, det_labels = multiclass_nms(bboxes, scores,\n cfg.score_thr, cfg.nms,\n cfg.max_per_img)\n\n return det_bboxes, det_labels\n\n @force_fp32(apply_to=('bbox_preds', ))\n def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):\n \"\"\"Refine bboxes during training.\n\n Args:\n rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,\n and bs is the sampled RoIs per image. The first column is\n the image id and the next 4 columns are x1, y1, x2, y2.\n labels (Tensor): Shape (n*bs, ).\n bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class).\n pos_is_gts (list[Tensor]): Flags indicating if each positive bbox\n is a gt bbox.\n img_metas (list[dict]): Meta info of each image.\n\n Returns:\n list[Tensor]: Refined bboxes of each image in a mini-batch.\n\n Example:\n >>> # xdoctest: +REQUIRES(module:kwarray)\n >>> import kwarray\n >>> import numpy as np\n >>> from mmdet.core.bbox.demodata import random_boxes\n >>> self = BBoxHead(reg_class_agnostic=True)\n >>> n_roi = 2\n >>> n_img = 4\n >>> scale = 512\n >>> rng = np.random.RandomState(0)\n >>> img_metas = [{'img_shape': (scale, scale)}\n ... for _ in range(n_img)]\n >>> # Create rois in the expected format\n >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)\n >>> img_ids = torch.randint(0, n_img, (n_roi,))\n >>> img_ids = img_ids.float()\n >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)\n >>> # Create other args\n >>> labels = torch.randint(0, 2, (n_roi,)).long()\n >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)\n >>> # For each image, pretend random positive boxes are gts\n >>> is_label_pos = (labels.numpy() > 0).astype(np.int)\n >>> lbl_per_img = kwarray.group_items(is_label_pos,\n ... img_ids.numpy())\n >>> pos_per_img = [sum(lbl_per_img.get(gid, []))\n ... for gid in range(n_img)]\n >>> pos_is_gts = [\n >>> torch.randint(0, 2, (npos,)).byte().sort(\n >>> descending=True)[0]\n >>> for npos in pos_per_img\n >>> ]\n >>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,\n >>> pos_is_gts, img_metas)\n >>> print(bboxes_list)\n \"\"\"\n img_ids = rois[:, 0].long().unique(sorted=True)\n assert img_ids.numel() <= len(img_metas)\n\n bboxes_list = []\n for i in range(len(img_metas)):\n inds = torch.nonzero(rois[:, 0] == i).squeeze(dim=1)\n num_rois = inds.numel()\n\n bboxes_ = rois[inds, 1:]\n label_ = labels[inds]\n bbox_pred_ = bbox_preds[inds]\n img_meta_ = img_metas[i]\n pos_is_gts_ = pos_is_gts[i]\n\n bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,\n img_meta_)\n\n # filter gt bboxes\n pos_keep = 1 - pos_is_gts_\n keep_inds = pos_is_gts_.new_ones(num_rois)\n keep_inds[:len(pos_is_gts_)] = pos_keep\n\n bboxes_list.append(bboxes[keep_inds])\n\n return bboxes_list\n\n @force_fp32(apply_to=('bbox_pred', ))\n def regress_by_class(self, rois, label, bbox_pred, img_meta):\n \"\"\"Regress the bbox for the predicted class. Used in Cascade R-CNN.\n\n Args:\n rois (Tensor): shape (n, 4) or (n, 5)\n label (Tensor): shape (n, )\n bbox_pred (Tensor): shape (n, 4*(#class+1)) or (n, 4)\n img_meta (dict): Image meta info.\n\n Returns:\n Tensor: Regressed bboxes, the same shape as input rois.\n \"\"\"\n assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape)\n\n if not self.reg_class_agnostic:\n label = label * 4\n inds = torch.stack((label, label + 1, label + 2, label + 3), 1)\n bbox_pred = torch.gather(bbox_pred, 1, inds)\n assert bbox_pred.size(1) == 4\n\n if rois.size(1) == 4:\n new_rois = delta2bbox(rois, bbox_pred, self.target_means,\n self.target_stds, img_meta['img_shape'])\n else:\n bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means,\n self.target_stds, img_meta['img_shape'])\n new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)\n\n return new_rois\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.cat",
"torch.nn.init.constant_",
"torch.gather",
"torch.from_numpy",
"torch.sum",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.init.normal_",
"torch.nonzero",
"torch.nn.modules.utils._pair",
"torch.stack"
]
] |
mozillakab/POSTAG-Kabyle
|
[
"177a299c8640f5cd19842ab56e7203509cae59e6"
] |
[
"PosTagAnalysis.py"
] |
[
"import csv\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ncorpora=\"corpus-kab.txt\"\ndef tagset (corpora):\n tags=[]\n for sentence in open(corpora,encoding='utf-8'):\n tagged_sentence=sentence.replace('\\ufeff',\"\").replace('\\n',\"\").split()\n for tagged_word in tagged_sentence:\n tag=tagged_word.split(\"/\")[1]\n if tag not in tags:\n tags.append(tag)\n return tags\n\ntags=tagset(corpora)\n\n\nheader = tags\ndata=[]\n\ndef initilialize (tags):\n words=[]\n for i in tags:\n words.append('')\n return words\n\n\n\nfor sentence in open(corpora,encoding='utf-8'):\n words=initilialize (tags)\n\n tagged_sentence=sentence.replace('\\ufeff',\"\").replace('\\n',\"\").split()\n for tagged_word in tagged_sentence:\n word=tagged_word.split(\"/\")[0]\n words[tags.index(tagged_word.split(\"/\")[1])]=word\n data.append(words)\n#print (data)\n\n\nwith open('postag.csv', 'w', encoding='UTF8', newline='\\n') as f:\n writer = csv.writer(f,delimiter='\\t')\n\n # write the header\n writer.writerow(header)\n for i in data:\n writer.writerow(i)\n\n\ndf = pd.read_csv ('postag.csv',delimiter='\\t')\n\nVerbs=['VAF', #aoriste futur\n 'VAI', # aoriste impératif\n 'VAIT', #aoriste intensif\n 'VII', #impératif intensif\n 'VP', # prétérit\n 'VPA', #participe aoriste\n 'VPAIN', #participe aoriste intensif négatif\n 'VPAIP', #participe aoriste intensif positif\n 'VPN', # prétérit négatif\n 'VPPN', #participe prétérit négatif\n 'VPPP', # participe prétérit positif\n 'VS' # verbe subjonctif\n ]\noccurences=[]\nfor i in Verbs:\n\n occurences.append(df[i].count())\n\npatches, texts, autotexts = plt.pie(occurences,\n labels=Verbs, autopct='%.0f%%',\n shadow=False, radius=1)\nfor t in texts:\n t.set_size('smaller')\nautotexts[0].set_color('y')\n\nplt.xlabel('Ifmiḍen n yimyagen s tmeẓri deg uḍris n ulmad')\n\nplt.show()\n\n\n##noms Verbes\n\nVerbes=['VAF', #aoriste futur\n 'VAI', # aoriste impératif\n 'VAIT', #aoriste intensif\n 'VII', #impératif intensif\n 'VP', # prétérit\n 'VPA', #participe aoriste\n 'VPAIN', #participe aoriste intensif négatif\n 'VPAIP', #participe aoriste intensif positif\n 'VPN', # prétérit négatif\n 'VPPN', #participe prétérit négatif\n 'VPPP', # participe prétérit positif\n 'VS' # verbe subjonctif\n ]\n\nVerbs=['Imyagen','Ismawen']\n\noccurences=[]\nnb=0\nfor i in Verbes:\n nb=nb+df[i].count()\n\n\noccurences.append(nb)\n\nNames=['NMC', #nom commun\n 'NMP', # nom propre\n 'NCM', #nom cardinal\n]\n\nnb=0\nfor i in Names:\n nb=nb+df[i].count()\n\n\noccurences.append(nb)\n\n\npatches, texts, autotexts = plt.pie(occurences,\n labels=Verbs, autopct='%.0f%%',\n shadow=False, radius=1)\nfor t in texts:\n t.set_size('smaller')\nautotexts[0].set_color('y')\n\nplt.xlabel('Ismawen d Imyagen')\n\nplt.show()\n\n"
] |
[
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.pie",
"pandas.read_csv",
"matplotlib.pyplot.show"
]
] |
kyleabeauchamp/pymc
|
[
"6ce0094584f1fa00eed0b2ecee533c2fb7f190d6",
"6ce0094584f1fa00eed0b2ecee533c2fb7f190d6",
"6ce0094584f1fa00eed0b2ecee533c2fb7f190d6"
] |
[
"pymc/NumpyDeterministics.py",
"pymc/tests/test_graph.py",
"pymc/database/ram.py"
] |
[
"\"\"\"\npymc.NumpyDeterministics\n\"\"\"\n\n__docformat__ = 'reStructuredText'\nfrom . import PyMCObjects as pm\nimport numpy as np\nfrom numpy import sum, ones, zeros, ravel, shape, size, newaxis\nfrom .utils import find_element, get_signature\nimport inspect\n\nfrom . import six\nxrange = six.moves.xrange\n\n# accumulations\n_boolean_accumulation_deterministics = ['any', 'all']\n_accumulation_deterministics = ['sum'] # ['sum', 'prod']\n\n\n# transformations (broadcasted)\n_generic = ['abs', 'exp', 'log', 'sqrt', 'expm1', 'log1p']\n_trig = ['sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan']\n_hyp_trig = ['sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh']\n_transformation_deterministics = _generic + _trig + _hyp_trig\n_misc_funcs1 = ['arctan2', 'hypot']\n\n__all__ = _accumulation_deterministics + _boolean_accumulation_deterministics + \\\n _transformation_deterministics + _misc_funcs1\n\n\ndef deterministic_from_funcs(\n name, eval, jacobians={}, jacobian_formats={}, dtype=np.float, mv=False):\n \"\"\"\n Return a Stochastic subclass made from a particular distribution.\n\n :Parameters:\n name : string\n The name of the new class.\n jacobians : function\n The log-probability function.\n random : function\n The random function\n dtype : numpy dtype\n The dtype of values of instances.\n mv : boolean\n A flag indicating whether this class represents\n array-valued variables.\n\n \"\"\"\n\n (args, defaults) = get_signature(eval)\n parent_names = args[0:]\n try:\n parents_default = dict(zip(args[-len(defaults):], defaults))\n except TypeError: # No parents at all.\n parents_default = {}\n\n # Build docstring from distribution\n docstr = name[0] + ' = ' + name + '('.join(parent_names) + ')\\n\\n'\n docstr += 'Deterministic variable with ' + name + \\\n ' distribution.\\nParents are: ' + ', '.join(parent_names) + '.\\n\\n'\n docstr += 'Docstring of evaluatio function:\\n'\n docstr += eval.__doc__\n\n return new_deterministic_class(\n dtype, name, parent_names, parents_default, docstr, eval, jacobians, jacobian_formats)\n\n\ndef new_deterministic_class(*new_class_args):\n \"\"\"\n Returns a new class from a distribution.\n\n :Parameters:\n dtype : numpy dtype\n The dtype values of instances of this class.\n name : string\n Name of the new class.\n parent_names : list of strings\n The labels of the parents of this class.\n parents_default : list\n The default values of parents.\n docstr : string\n The docstring of this class.\n eval : function\n The function for this class.\n jacobians : dictionary of functions\n The dictionary of jacobian functions for the class\n jacobian_formats : dictionary of strings\n A dictionary indicating the format of each jacobian function\n \"\"\"\n\n (dtype, name, parent_names, parents_default, docstr,\n eval, jacobians, jacobian_formats) = new_class_args\n\n class new_class(pm.Deterministic):\n __doc__ = docstr\n\n def __init__(self, *args, **kwds):\n (dtype, name, parent_names, parents_default, docstr,\n eval, jacobians, jacobian_formats) = new_class_args\n parents = parents_default\n\n # Figure out what argument names are needed.\n arg_keys = ['parents', 'trace', 'doc', 'debug', 'plot', 'verbose']\n arg_vals = [parents, False, True, None, False, -1]\n\n arg_dict_out = dict(zip(arg_keys, arg_vals))\n args_needed = parent_names + arg_keys[2:]\n\n # Sort positional arguments\n for i in xrange(len(args)):\n try:\n k = args_needed.pop(0)\n if k in parent_names:\n parents[k] = args[i]\n else:\n arg_dict_out[k] = args[i]\n except:\n raise ValueError(\n 'Too many positional arguments provided. Arguments for class ' + self.__class__.__name__ + ' are: ' + str(\n all_args_needed))\n\n # Sort keyword arguments\n for k in args_needed:\n if k in parent_names:\n try:\n parents[k] = kwds.pop(k)\n except:\n if k in parents_default:\n parents[k] = parents_default[k]\n else:\n raise ValueError('No value given for parent ' + k)\n elif k in arg_dict_out.keys():\n try:\n arg_dict_out[k] = kwds.pop(k)\n except:\n pass\n\n # Remaining unrecognized arguments raise an error.\n if len(kwds) > 0:\n raise TypeError(\n 'Keywords ' + str(\n kwds.keys(\n )) + ' not recognized. Arguments recognized are ' + str(\n args_needed))\n\n # Call base class initialization method\n if arg_dict_out.pop('debug'):\n pass\n else:\n parent_strs = []\n for key in parents.keys():\n parent_strs.append(str(key))\n\n instance_name = name + '(' + ','.join(parent_strs) + ')'\n\n pm.Deterministic.__init__(\n self,\n name=instance_name,\n eval=eval,\n jacobians=jacobians,\n jacobian_formats=jacobian_formats,\n dtype=dtype,\n **arg_dict_out)\n\n new_class.__name__ = name\n new_class.parent_names = parent_names\n\n return new_class\n\n\n_sum_hist = {}\n\n\ndef sum_jacobian_a(a, axis):\n try:\n return _sum_hist[shape(a)]\n except KeyError:\n j = ones(shape(a))\n _sum_hist[shape(a)] = j\n return j\n\nsum_jacobians = {'a': sum_jacobian_a}\n\nabs_jacobians = {'x': lambda x: np.sign(x)}\nexp_jacobians = {'x': lambda x: np.exp(x)}\nlog_jacobians = {'x': lambda x: 1.0 / x}\nsqrt_jacobians = {'x': lambda x: .5 * x ** -.5}\nhypot_jacobians = {'x1': lambda x1, x2: (x1 ** 2 + x2 ** 2) ** -.5 * x1,\n 'x2': lambda x1, x2: (x1 ** 2 + x2 ** 2) ** -.5 * x2}\nexpm1_jacobians = exp_jacobians\nlog1p_jacobians = {'x': lambda x: 1.0 / (1.0 + x)}\n\nsin_jacobians = {'x': lambda x: np.cos(x)}\ncos_jacobians = {'x': lambda x: -np.sin(x)}\ntan_jacobians = {'x': lambda x: 1 + np.tan(x) ** 2}\n\narcsin_jacobians = {'x': lambda x: (1.0 - x ** 2) ** -.5}\narccos_jacobians = {'x': lambda x: -(1.0 - x ** 2) ** -.5}\narctan_jacobians = {'x': lambda x: 1.0 / (1.0 + x ** 2)}\narctan2_jacobians = {'x1': lambda x1, x2: x2 / (x2 ** 2 + x1 ** 2),\n 'x2': lambda x1, x2: -x1 / (x2 ** 2 + x1 ** 2)}\n# found in www.math.smith.edu/phyllo/Assets/pdf/findcenter.pdf p21\n\nsinh_jacobians = {'x': lambda x: np.cosh(x)}\ncosh_jacobians = {'x': lambda x: np.sinh(x)}\ntanh_jacobians = {'x': lambda x: 1.0 - np.tanh(x) ** 2}\n\narcsinh_jacobians = {'x': lambda x: (1 + x ** 2) ** -.5}\narccosh_jacobians = {'x': lambda x: (x + 1) ** -.5 * (x - 1.0) ** -.5}\narctanh_jacobians = {'x': lambda x: 1.0 / (1 - x ** 2)}\n\n\ndef wrap_function_accum(function):\n def wrapped_function(a, axis=None):\n return function(a, axis)\n wrapped_function.__doc__ = function.__doc__\n\n return wrapped_function\n\nfor function_name in _accumulation_deterministics:\n wrapped_function = wrap_function_accum(\n find_element(function_name,\n np,\n error_on_fail=True))\n\n jacobians = find_element(\n function_name +\n \"_jacobians\",\n locals(\n ),\n error_on_fail=True)\n\n locals(\n )[function_name] = deterministic_from_funcs(function_name,\n wrapped_function,\n jacobians,\n jacobian_formats={'a': 'accumulation_operation'})\n\n\nfor function_name in _boolean_accumulation_deterministics:\n wrapped_function = wrap_function_accum(\n find_element(function_name,\n np,\n error_on_fail=True))\n\n locals()[function_name] = deterministic_from_funcs(\n function_name, wrapped_function)\n\n\ndef wrapped_function_trans(function):\n def wrapped_function(x):\n return function(x)\n wrapped_function.__doc__ = function.__doc__\n\n return wrapped_function\n\nfor function_name in _transformation_deterministics:\n wrapped_function = wrapped_function_trans(\n find_element(function_name,\n np,\n error_on_fail=True))\n\n jacobians = find_element(\n function_name +\n \"_jacobians\",\n locals(\n ),\n error_on_fail=True)\n locals(\n )[function_name] = deterministic_from_funcs(function_name,\n wrapped_function,\n jacobians,\n jacobian_formats={'x': 'transformation_operation'})\n\n\ndef wrap_function_misc1(function):\n def wrapped_function(x1, x2):\n return function(x1, x2)\n wrapped_function.__doc__ = function.__doc__\n\n return wrapped_function\n\nfor function_name in _misc_funcs1:\n wrapped_function = wrap_function_misc1(\n find_element(function_name,\n np,\n error_on_fail=True))\n\n jacobians = find_element(\n function_name +\n \"_jacobians\",\n locals(\n ),\n error_on_fail=True)\n\n locals(\n )[function_name] = deterministic_from_funcs(\n function_name, wrapped_function, jacobians, jacobian_formats={\n 'x1': 'broadcast_operation',\n 'x2': 'broadcast_operation'})\n",
"import pymc as pm\nfrom numpy.testing import *\nimport numpy as np\nimport nose\nimport sys\n\nfrom pymc import six\nxrange = six.moves.xrange\n\nDIR = 'testresults'\n\n\ndef mymodel():\n mu = pm.Normal('mu', 0, 1)\n N = [pm.Normal('N_%i' % i, mu, 1) for i in xrange(3)]\n z1 = pm.Lambda('z1', lambda n=N: np.sum(n))\n z2 = pm.Lambda('z2', lambda n=N: np.sum(n))\n\n @pm.potential\n def y(z1=z1, z2=z2, mu=mu):\n return 0\n return mu, N, z1, z2, y\n\n\ndef powerset(seq):\n \"\"\"\n Returns all the subsets of this set. This is a generator.\n\n From http://blog.technomancy.org/2009/3/17/a-powerset-generator-in-python\n \"\"\"\n if len(seq) <= 1:\n yield seq\n yield []\n else:\n for item in powerset(seq[1:]):\n yield [seq[0]] + item\n yield item\n\nclass test_graph(TestCase):\n\n @dec.skipif(sys.version_info.major==3)\n def test_graph(self):\n try:\n import pydot\n except ImportError:\n raise nose.SkipTest\n mu, N, z1, z2, y = mymodel()\n for mods in [[mu], [mu, N], [mu, N, z1, z2], [mu, N, z1, z2, y]]:\n for args in powerset([('collapse_deterministics', True), ('collapse_potentials', True), ('label_edges', False), ('legend', True), ('consts', True)]):\n M = pm.Model(mods)\n pm.graph.graph(M, path=DIR, **dict(args))\n \n @dec.skipif(sys.version_info.major==3)\n def test_moral(self):\n try:\n import pydot\n except ImportError:\n raise nose.SkipTest\n mu, N, z1, z2, y = mymodel()\n for mods in [[mu], [mu, N], [mu, N, z1, z2], [mu, N, z1, z2, y]]:\n M = pm.Model(mods)\n pm.graph.moral_graph(M, path=DIR)\n\n\nif __name__ == '__main__':\n C = nose.config.Config(verbosity=1)\n nose.runmodule(config=C)\n",
"\"\"\"\nRAM database module\n\nStore the trace in memory using NumPy arrays.\n\nImplementation Notes\n--------------------\nThis is the only backend using preallocated memory. All others simply\nappend values to a stack. It might be worthwhile to use a list instead\nof a NumPy array to 1. simplify this backend, 2. standardize the\n`Trace model` and 3. remove the need for a truncate method.\nWe would need to catch MemoryError exceptions though.\n\"\"\"\n\nimport pymc\nfrom numpy import zeros, shape, concatenate, ndarray, dtype\nfrom . import base\nimport warnings\nimport numpy as np\n\n__all__ = ['Trace', 'Database']\n\n\nclass Trace(base.Trace):\n\n \"\"\"RAM Trace\n\n Store the samples in memory. No data is written to disk.\n \"\"\"\n\n def __init__(self, name, getfunc=None, db=None, value=None):\n \"\"\"Create a Trace instance.\n\n :Parameters:\n name : string\n The trace object name. This name should uniquely identify\n the pymc variable.\n getfunc : function\n A function returning the value to tally.\n db : Database instance\n The database owning this Trace.\n value : list\n The list of trace arrays. This is used when loading the Trace from\n disk.\"\"\"\n if value is None:\n self._trace = {}\n self._index = {}\n else:\n self._trace = value\n self._index = dict(zip(value.keys(), list(map(len, value.values()))))\n\n base.Trace.__init__(self, name=name, getfunc=getfunc, db=db)\n\n def _initialize(self, chain, length):\n \"\"\"Create an array of zeros with shape (length, shape(obj)), where\n obj is the internal PyMC Stochastic or Deterministic.\n \"\"\"\n # If this db was loaded from the disk, it may not have its\n # tallied step methods' getfuncs yet.\n if self._getfunc is None:\n self._getfunc = self.db.model._funs_to_tally[self.name]\n\n # First, see if the object has an explicit dtype.\n value = np.array(self._getfunc())\n\n if value.dtype is object:\n self._trace[chain] = zeros(length, dtype=object)\n\n elif value.dtype is not None:\n self._trace[chain] = zeros((length,) + shape(value), value.dtype)\n\n # Otherwise, if it's an array, read off its value's dtype.\n elif isinstance(value, ndarray):\n self._trace[chain] = zeros((length,) + shape(value), value.dtype)\n\n # Otherwise, let numpy type its value. If the value is a scalar, the trace will be of the\n # corresponding type. Otherwise it'll be an object array.\n else:\n self._trace[chain] = zeros(\n (length,\n ) + shape(value),\n dtype=value.__class__)\n\n self._index[chain] = 0\n\n def tally(self, chain):\n \"\"\"Store the object's current value to a chain.\n\n :Parameters:\n chain : integer\n Chain index.\n \"\"\"\n\n value = self._getfunc()\n\n try:\n self._trace[chain][self._index[chain]] = value.copy()\n except AttributeError:\n self._trace[chain][self._index[chain]] = value\n self._index[chain] += 1\n\n def truncate(self, index, chain):\n \"\"\"\n Truncate the trace array to some index.\n\n :Parameters:\n index : int\n The index within the chain after which all values will be removed.\n chain : int\n The chain index (>=0).\n \"\"\"\n self._trace[chain] = self._trace[chain][:index]\n\n def gettrace(self, burn=0, thin=1, chain=-1, slicing=None):\n \"\"\"Return the trace.\n\n :Stochastics:\n - burn (int): The number of transient steps to skip.\n - thin (int): Keep one in thin.\n - chain (int): The index of the chain to fetch. If None, return all chains.\n - slicing: A slice, overriding burn and thin assignement.\n \"\"\"\n if slicing is None:\n slicing = slice(burn, None, thin)\n if chain is not None:\n if chain < 0:\n chain = range(self.db.chains)[chain]\n return self._trace[chain][slicing]\n else:\n return concatenate(list(self._trace.values()))[slicing]\n\n def __getitem__(self, index):\n chain = self._chain\n if chain is None:\n return concatenate(list(self._trace.values()))[index]\n else:\n if chain < 0:\n chain = range(self.db.chains)[chain]\n return self._trace[chain][index]\n\n __call__ = gettrace\n\n def length(self, chain=-1):\n \"\"\"Return the length of the trace.\n\n :Parameters:\n chain : int or None\n The chain index. If None, returns the combined length of all chains.\n \"\"\"\n if chain is not None:\n if chain < 0:\n chain = range(self.db.chains)[chain]\n return self._trace[chain].shape[0]\n else:\n return sum([t.shape[0] for t in self._trace.values()])\n\n\nclass Database(base.Database):\n\n \"\"\"RAM database.\n\n Store the samples in memory. No data is written to disk.\n \"\"\"\n\n def __init__(self, dbname):\n \"\"\"Create a RAM Database instance.\"\"\"\n self.__name__ = 'ram'\n self.__Trace__ = Trace\n self.dbname = dbname\n self.trace_names = []\n # A list of sequences of names of the objects to tally.\n self._traces = {} # A dictionary of the Trace objects.\n self.chains = 0\n"
] |
[
[
"numpy.cosh",
"numpy.cos",
"numpy.sinh",
"numpy.sin",
"numpy.sign",
"numpy.tan",
"numpy.shape",
"numpy.tanh",
"numpy.exp"
],
[
"numpy.sum"
],
[
"numpy.shape",
"numpy.zeros"
]
] |
sethah/transfer_nlp
|
[
"bd094ab80f7f8ce6854256f094bdaea6ad683455"
] |
[
"src/text_utils.py"
] |
[
"import torch\n\nimport spacy\nimport re\nimport html\nimport json\nimport ftfy\nre1 = re.compile(r' +')\n\n\nclass AttentionIterator(object):\n \"\"\"\n An iterator that adds positional indices to a sequence iterator\n \"\"\"\n\n def __init__(self, iterator, pos_start_index):\n self.iterator = iterator\n # TODO: this leaks abstraction from elsewhere\n self.pos_start_index = pos_start_index\n self.dataset = self.iterator.dataset\n\n def __iter__(self):\n for batch in self.iterator:\n batch.text = batch.text.transpose(0, 1)\n batch_size, seq_len = batch.text.shape\n position_indices = torch.arange(self.pos_start_index, self.pos_start_index + seq_len,\n device=batch.text.device,\n dtype=torch.long).repeat(batch_size, 1)\n batch.text = torch.stack((batch.text, position_indices), dim=2)\n yield batch\n\n def __len__(self):\n return len(self.iterator)\n\n\nclass Batch(object):\n\n \"\"\"\n Object for holding a batch of data with mask during training.\n \"\"\"\n\n def __init__(self, src, tgt, pad=0):\n self.src = src\n self.src_y = tgt\n self.src_mask = self.make_std_mask(self.src, pad)\n self.ntokens = (self.src_y != pad).sum()\n\n @staticmethod\n def make_std_mask(tgt, pad):\n \"Create a mask to hide padding and future words.\"\n tgt_mask = (tgt != pad).unsqueeze(-2)\n tgt_mask = tgt_mask & Batch.subsequent_mask(tgt.size(-1)).type_as(tgt_mask)\n return tgt_mask\n\n @staticmethod\n def subsequent_mask(size):\n \"Mask out subsequent positions.\"\n attn_shape = (size, size)\n subsequent_mask = torch.triu(torch.ones(attn_shape), diagonal=1).type(torch.uint8)\n return subsequent_mask.unsqueeze(0) == 0\n\n\ndef get_pairs(word):\n \"\"\"\n Return set of symbol pairs in a word.\n word is represented as tuple of symbols (symbols being variable-length strings)\n \"\"\"\n pairs = set()\n prev_char = word[0]\n for char in word[1:]:\n pairs.add((prev_char, char))\n prev_char = char\n return pairs\n\n\nclass TextEncoder(object):\n \"\"\"\n mostly a wrapper for a public python bpe tokenizer\n \"\"\"\n\n def __init__(self, encoder_path, bpe_path):\n self.nlp = spacy.load('en', disable=['parser', 'tagger', 'ner', 'textcat'])\n self.encoder = json.load(open(encoder_path))\n self.decoder = {v:k for k,v in self.encoder.items()}\n merges = open(bpe_path, encoding='utf-8').read().split('\\n')[1:-1]\n merges = [tuple(merge.split()) for merge in merges]\n self.bpe_ranks = dict(zip(merges, range(len(merges))))\n self.cache = {}\n\n def bpe(self, token):\n word = tuple(token[:-1]) + ( token[-1] + '</w>',)\n if token in self.cache:\n return self.cache[token]\n pairs = get_pairs(word)\n\n if not pairs:\n return token+'</w>'\n\n while True:\n bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word)-1 and word[i+1] == second:\n new_word.append(first+second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = ' '.join(word)\n if word == '\\n </w>':\n word = '\\n</w>'\n self.cache[token] = word\n return word\n\n def encode(self, texts, verbose=True):\n texts_tokens = []\n if verbose:\n for text in tqdm(texts, ncols=80, leave=False):\n text = self.nlp(text_standardize(ftfy.fix_text(text)))\n text_tokens = []\n for token in text:\n text_tokens.extend([self.encoder.get(t, 0) for t in self.bpe(token.text.lower()).split(' ')])\n texts_tokens.append(text_tokens)\n else:\n for text in texts:\n text = self.nlp(text_standardize(ftfy.fix_text(text)))\n text_tokens = []\n for token in text:\n text_tokens.extend([self.encoder.get(t, 0) for t in self.bpe(token.text.lower()).split(' ')])\n texts_tokens.append(text_tokens)\n return texts_tokens\n\n\ndef text_standardize(text):\n \"\"\"\n fixes some issues the spacy tokenizer had on books corpus\n also does some whitespace standardization\n \"\"\"\n text = text.replace('—', '-')\n text = text.replace('–', '-')\n text = text.replace('―', '-')\n text = text.replace('…', '...')\n text = text.replace('´', \"'\")\n text = re.sub(r'''(-+|~+|!+|\"+|;+|\\?+|\\++|,+|\\)+|\\(+|\\\\+|\\/+|\\*+|\\[+|\\]+|}+|{+|\\|+|_+)''', r' \\1 ', text)\n text = re.sub(r'\\s*\\n\\s*', ' \\n ', text)\n text = re.sub(r'[^\\S\\n]+', ' ', text)\n return text.strip()\n\n\ndef tokenizer(text, nlp, encoder):\n text = text.replace(\"<unk>\", \"unk\")\n text = fixup(text)\n tokens = []\n for tok in nlp(text_standardize(ftfy.fix_text(text))):\n tokens.extend(encoder.bpe(tok.text.lower()).split(' '))\n tokens.append('_classify_')\n return tokens\n\n\ndef fixup(x):\n x = x.replace('#39;', \"'\").replace('amp;', '&').replace('#146;', \"'\").replace(\n 'nbsp;', ' ').replace('#36;', '$').replace('\\\\n', \"\\n\").replace('quot;', \"'\").replace(\n '<br />', \"\\n\").replace('\\\\\"', '\"').replace(' @.@ ','.').replace(\n ' @-@ ','-').replace('\\\\', ' \\\\ ')\n return re1.sub(' ', html.unescape(x))"
] |
[
[
"torch.stack",
"torch.ones",
"torch.arange"
]
] |
pangeo-data/storage-benchmarks
|
[
"36dac78ffc30b93a58cba901441aae9609d709d3"
] |
[
"benchmarks/gcp_kubernetes_read_zarr.py"
] |
[
"\"\"\"Dask IO performance.\n\nThese ASV classes are meant to test the IO performance of various Dask/Xarray\nbased calculations and operations against a variety of storage backends and\narchitectures.\n\nASV Parameters:\n backend (str): Storage backend that will be used. e.g. POSIX fs, FUSE,\n etc.\n\n dask_get_opt (obj): Dask processing option. See Dask docs on\n set_options.\n\n chunk_size (int): Dask chunk size across 'x' axis of\n dataset.\n\n n_workers (int): Number of Kubernetes Dask workers to spawn\n\n\"\"\"\n\nfrom . import target_zarr\nfrom . import benchmark_tools as bmt\nfrom . import getTestConfigValue\nfrom dask.distributed import Client\nfrom dask_kubernetes import KubeCluster\nimport itertools\nimport numpy as np\nimport timeit\nimport xarray as xr\nimport zarr\n\nRETRIES = 5\nDS_STORE = 'llc4320_zarr_1000'\nRUNS = getTestConfigValue('n_runs')\n\nclass llc4320_benchmarks():\n \"\"\"Zarr GCP tests on LLC4320 Datasets\n\n \"\"\"\n timer = timeit.default_timer\n timeout = 3600\n repeat = 1\n number = 1\n warmup_time = 0.0\n run_nums = np.arange(1, RUNS + 1)\n params = (['GCS'], [1], [60, 80, 100, 120, 140, 160], run_nums)\n #params = (['GCS'], [1], [60], run_nums)\n #params = getTestConfigValue(\"gcp_kubernetes_read_zarr.llc4320_benchmarks\")\n param_names = ['backend', 'z_chunksize', 'n_workers', 'run_num']\n\n @bmt.test_gcp\n def setup(self, backend, z_chunksize, n_workers, run_num):\n self.cluster = KubeCluster(n_workers=n_workers)\n self.client = Client(self.cluster)\n bmt.cluster_wait(self.client, n_workers)\n self.target = target_zarr.ZarrStore(backend=backend, dask=True)\n # Open Zarr DS\n self.ds_zarr = self.target.open_store(DS_STORE)\n self.ds_zarr_theta = self.ds_zarr.Theta\n\n @bmt.test_gcp\n def time_read(self, backend, z_chunksize, n_workers, run_num):\n self.ds_zarr_theta.max().load(retries=RETRIES) \n\n @bmt.test_gcp\n def teardown(self, backend, z_chunksize, n_workers, run_num):\n del self.ds_zarr_theta\n self.cluster.close()\n\nclass llc4320_ds_size():\n number = 1\n timeout = 300\n repeat = 1\n warmup_time = 0.0\n\n def track_megabytes(self):\n target = target_zarr.ZarrStore(backend='GCS', dask=True)\n llc_ds = target.open_store(DS_STORE)\n return llc_ds.nbytes / 2**20 \n"
] |
[
[
"numpy.arange"
]
] |
HughLDDMM/TreeLDDMMCVPR
|
[
"528a79d39617481b5dde53bccefa86e9eb7e489a"
] |
[
"Optimization/new_tree_structures.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 26 19:23:04 2021\n\"\"\"\nfrom constants import THRSHLD_ORTHANT, THRSHLD_LENGTH, N_INTERP, THRSHLD_CONSTRUCTION\n\nfrom registration import Hamiltonian_points\n\nfrom keops_utils import TestCuda\nimport torch\n\nuse_cuda,torchdeviceId,torchdtype,KeOpsdeviceId,KeOpsdtype,KernelMethod = TestCuda()\n\n\ndef Points_threshold_from_singular(pts, connections, n_interp=N_INTERP):\n \"\"\"\n Here connections input correspond to potentially too small segments. \n Needs to be taken into account in this function.\n \n Parameters\n ----------\n @param : pts : torch tensor\n n-points x d-dimension points.\n \n @param : connections : torch Long tensor\n m-connections x 2-dim tensor containing pair of connected points' indices.\n\n @param : n_interp : int\n The number of points in the branch.\n\n Returns\n -------\n @output : V : Torch Float Tensor\n Average euclidean distance between connected points in the tree.\n @output : L : Torch Long Tensor\n Standard deviation of the euclidean distance between connected points\n \"\"\"\n \n n_redundant = 0 #connections.shape[0] - connections[:,0].unique().shape[0]\n \n seen = []\n \n mask_ok = torch.ones(connections.shape[0])\n connections = connections.unique(dim=0)\n \n output = torch.unique(pts, dim=0)\n \n list_seg_too_small = []\n indices_avoided = []\n ind_con2avoid = []\n #Remove the segments too short \n for i, con in enumerate(connections):\n if (pts[con[0],:]-pts[con[1],:]).norm() <= THRSHLD_LENGTH:\n ind_con2avoid.append(i)\n list_seg_too_small.append(con)\n indices_avoided += con\n \n seg_too_small = len(list_seg_too_small)\n \n \"\"\"print(seg_too_small)\n print(\"List : \", list_seg_too_small)\n print(\"Connections : \")\n print(connections)\"\"\"\n \n #print(\"NUMBER OF SEGMENTS TOO SMALL : \", seg_too_small)\n \n n_V = n_interp*(connections.shape[0] - seg_too_small) #+ pts.shape[0] - output.shape[0]\n \n V = torch.zeros((n_V, pts.shape[1])).to(dtype=torchdtype, device=torchdeviceId)\n \n n_pts_current = pts.shape[0] - len(indices_avoided)\n \n list_all_con = []\n \n cpt = 0\n \n new_con = connections.clone()\n \n for i,pt in enumerate(pts):\n if i not in indices_avoided:\n V[cpt,:] = pt\n for k,con in enumerate(connections):\n if con[0]==i and con[0] not in indices_avoided:\n connections[k,0] = cpt\n if con[1]==i and con[1] not in indices_avoided:\n connections[k,1] = cpt\n cpt+=1\n \n if cpt != n_pts_current:\n print(\"Wrong count in Points_threshold_from_singular\")\n #V[:pts.shape[0],:] = pts\n \n #print(\"V shape : \", V.shape)\n \n for ind_test,con in enumerate(connections): #[c for c in connections]:\n \n if ind_test not in ind_con2avoid:\n start = V[con[0],:]\n end = V[con[1],:]\n\n if (end-start).norm() >= THRSHLD_LENGTH: \n \n #Compute the points as barycenters of the extremities\n for j in range(V.shape[1]):\n V[n_pts_current:n_pts_current+n_interp-2,j] = start[j]+(end[j]-start[j])*(torch.linspace(0, 1, steps = n_interp)[1:-1]).to(dtype=torchdtype, device=torchdeviceId)\n \n #Add the connections\n interp_connections = torch.zeros((n_interp-1,2)).to(dtype=torch.long)\n interp_connections[:,0] = torch.arange(0,n_interp-1)\n interp_connections[:,1] = torch.arange(1,n_interp)\n \n interp_connections += n_pts_current-1\n interp_connections[0,0] = con[0]\n interp_connections[-1,1] = con[1]\n \n list_all_con.append(interp_connections)\n \n for i in range(interp_connections.shape[0]):\n if interp_connections[i,0]== interp_connections[i,1] :\n print(\"GOT THE PROBLEM ! In the connections : \", interp_connections[i,0], interp_connections[i,1])\n print(con)\n if (V[interp_connections[i,0],:]==V[interp_connections[i,1],:]).all():\n print(\"GOT THE PROBLEM ! In the points : \", V[interp_connections[i,0],:], interp_connections[i,0], interp_connections[i,1])\n print(\"For segment : \", con, \" and pts : \", start, end)\n \n n_pts_current += n_interp-2\n\n L = torch.cat(list_all_con, 0).to(dtype=torch.long, device=torchdeviceId)\n \n return V.contiguous(), L.contiguous()\n \n \ndef Control_from_singular(pts, connections, n_leaves, n_interp=N_INTERP):\n \"\"\"\n Compute the control points from the active segments and the current topology. \n \n If we are close to a orthant's boundary, we just have to fill the positions \n of the inactivted segments with new control points. \n \n If we are at a boundary, the control points will have \n (n_interp \\times n_shrunk_seg) points overlapping.\n \n \n Parameters\n ----------\n @param : pts : torch tensor\n n-points x d-dimension points.\n \n @param : connections : torch Long tensor\n m-connections x 2-dim tensor containing pair of connected points' indices.\n\n @param : n_interp : int\n The number of points in the branch.\n\n Returns\n -------\n @output : V : Torch Float Tensor\n Average euclidean distance between connected points in the tree.\n @output : L : Torch Long Tensor\n Standard deviation of the euclidean distance between connected points\n \"\"\"\n \n n_branches = 2*n_leaves-3\n n_V = (n_interp-2)*n_branches #Remove redundant points, first point of each branch that is not the root\n \n V = torch.zeros((n_V, pts.shape[1])).to(dtype=torchdtype, device=torchdeviceId)\n \n #print(\"V SHAPE : \", V.shape)\n \n n_pts_current = 0 #pts.shape[0]\n \n list_all_con = []\n \n #V[:pts.shape[0],:] = pts\n\n #print(connections.shape, n_branches)\n\n for con in connections:\n \n start = pts[con[0],:]\n end = pts[con[1],:]\n if (end-start).norm() >= THRSHLD_LENGTH: \n for i in range(V.shape[1]):\n V[n_pts_current:n_pts_current+n_interp-2,i] = start[i]+(end[i]-start[i])*(torch.linspace(0, 1, steps = n_interp)[1:-1]).to(dtype=torchdtype, device=torchdeviceId)\n else:\n V[n_pts_current:n_pts_current+n_interp-2,:] = start\n \n n_pts_current += n_interp-2\n\n #print(V)\n\n return V.contiguous()\n \n \ndef Points_from_singular(pts, connections, n_interp=N_INTERP):\n \"\"\"\n \n Parameters\n ----------\n @param : pts : torch tensor\n n-points x d-dimension points.\n \n @param : connections : torch Long tensor\n m-connections x 2-dim tensor containing pair of connected points' indices.\n\n @param : n_interp : int\n The number of points in the branch.\n\n Returns\n -------\n @output : V : Torch Float Tensor\n Average euclidean distance between connected points in the tree.\n @output : L : Torch Long Tensor\n Standard deviation of the euclidean distance between connected points\n \"\"\"\n \n n_redundant = 0 #connections.shape[0] - connections[:,0].unique().shape[0]\n \n seen = []\n \n #Count the number of redundant points in the connections\n for i in connections[:,0]:\n if i in connections[:,1] or i in seen:\n n_redundant+=1\n seen.append(i)\n \n print(\"N redondants : \", n_redundant)\n \n n_V = n_interp*connections.shape[0] - n_redundant\n \n V = torch.zeros((n_V, pts.shape[1])).to(dtype=torchdtype, device=torchdeviceId)\n \n n_pts_current = pts.shape[0]\n \n list_all_con = []\n \n V[:pts.shape[0],:] = pts\n \n for j, con in enumerate(connections):\n \n start = pts[con[0],:]\n end = pts[con[1],:]\n \n if (start-end).norm(p=2) >= THRSHLD_LENGTH: #(start.data!=end.data).any(): #\n \n for i in range(V.shape[1]):\n V[n_pts_current:n_pts_current+n_interp-2,i] = start[i]+(end[i]-start[i])*(torch.linspace(0, 1, steps = n_interp)[1:-1]).to(dtype=torchdtype, device=torchdeviceId)\n \n interp_cons = torch.zeros((n_interp-1,2))\n interp_cons[:,0] = torch.arange(0,n_interp-1)\n interp_cons[:,1] = torch.arange(1,n_interp)\n \n interp_cons += n_pts_current-1\n interp_cons[0,0] = con[0]\n interp_cons[-1,-1] = con[1]\n \n list_all_con.append(interp_cons)\n \n n_pts_current += n_interp-2\n\n if list_all_con == []:\n for j, con in enumerate(connections):\n start = pts[con[0],:]\n end = pts[con[1],:]\n print(end-start)\n L = torch.cat(list_all_con, 0).to(dtype=torch.long, device=torchdeviceId)\n \n #print(V.shape)\n \n return V.contiguous(), L.contiguous()\n \n\ndef SelectAvailableTopo(pts, connections, ind_current_topology, dictionnary_topo_comparison,\n mask_topology_comparison, points_activation_mask, segments_activation_mask, \n threshold = THRSHLD_ORTHANT):\n \"\"\"\n \n This is not the best way to code it... In fact we need the active segments and not the points...\n \n \n Parameters\n ----------\n @param : pts : torch tensor\n n-points x d-dimension points.\n The singular points, segments extremities. \n \n @param : connections : torch Long tensor\n m-connections x 2-dim tensor containing pair of connected points' indices.\n Connections between the singular points\n\n @param : ind_current_topology : int\n XXX\n\n Returns\n -------\n @output : selected_topologies : Torch Boolean Tensor\n Mask of the selected topologies regarding the distance to the current orthant's borders.\n\n \"\"\"\n n_topo = segments_activation_mask.shape[1]\n\n if ind_current_topology == -1:\n return torch.tensor([k for k in range(n_topo)]).to(torch.long).to(device=torchdeviceId), torch.zeros((n_topo)).to(device=torchdeviceId)\n else:\n # get all columns with at most 2 values of difference with current topology\n #\n # WARNING : All the topologies of activated segments correspond to binary trees (so at \n # most 2*n_leaves - 3 segments), but to be able to compare we can either store all the \n # possible segments (with the merges) or we merge afterward.\n #\n \n segments_of_interest = mask_topology_comparison[:,ind_current_topology]\n\n compare = (mask_topology_comparison.to(dtype=torchdtype).add(-segments_of_interest.to(dtype=torchdtype).view(-1,1))).abs()\n\n #######################################\n current_topo = segments_activation_mask[:,ind_current_topology]\n compare = torch.zeros(segments_activation_mask.shape).to(device=torchdeviceId)\n \n compare_topos = torch.zeros(segments_activation_mask.shape).to(device=torchdeviceId)\n \n distances = torch.zeros((n_topo)).to(device=torchdeviceId)\n \n distances_non_null = torch.zeros((n_topo)).to(device=torchdeviceId)\n \n for i,test in enumerate(current_topo):\n \n if test: #this segment exists in the current topo\n \n con = connections[i,:]\n d2 = (pts[con[1],:]-pts[con[0],:]).norm(p=2).square()\n \n for j in range(n_topo):\n \n if not segments_activation_mask[i,j] and not points_activation_mask[con[1],j] and con[1] in connections[:,0]: # \n #then the segment has to shrink if we want to go to this topology\n \n compare_topos[i,j] = 1\n \n if d2.sqrt() >= threshold:\n compare[i,j] = 1\n distances_non_null[j] += d2\n \n distances[j] += d2\n \n distances = distances.sqrt()\n distances_non_null = distances_non_null.sqrt()\n #######################################\n\n #get the lengths of the segments\n V0, V1 = pts.index_select(0,connections[:,0]), pts.index_select(0,connections[:,1])\n u = (V1-V0)\n lengths = (u**2).sum(1)[:, None].sqrt()\n \n # Then check whether the active component of current topology is <= threshold\n lengths_of_interest = lengths.view(-1,1) * compare_topos\n \n #print(torch.where(compare.sum(dim=0)<=0)[0])\n #print(torch.where((lengths_of_interest-threshold<=0).all(0))[0])\n \n #print((lengths_of_interest-threshold).sum(dim=0))\n \n #Norm inf:\n #selected_topologies_projected = torch.where((lengths_of_interest-threshold<=0).all(0))[0] #first selection\n #selected_topologies_projected = torch.where(compare.sum(dim=0)<=0)[0]\n \n #Norm 2:\n #selected_topologies_projected = torch.where((lengths_of_interest.square().sum(dim=0).sqrt()-threshold).sum(dim=0)<=0)[0] # the overall distance should be smaller than the threshold\n\n #Length:\n selected_topologies_projected = torch.where((lengths_of_interest.sum(dim=0)-threshold)<=0)[0] # the overall distance should be smaller than the threshold\n\n contiguous_orthants = dictionnary_topo_comparison[int(ind_current_topology)]\n \n selected_topologies = [] \n \n for i in selected_topologies_projected:\n if True: # int(i) in contiguous_orthants: #The commented criterion corresponds to the case of a limited number of changes between the orthants\n selected_topologies.append(int(i))\n \n selected_topologies = torch.tensor(selected_topologies)\n \n selected_topologies.to(device=torchdeviceId)\n\n return selected_topologies, distances_non_null\n\n\ndef GoToBoundary(pts, connections, indices_available_topologies, mask_points, mask_segments):\n \"\"\"\n \n Given a set of possible topologies, finds the common border between the orthants and get to \n this border.\n \n \"\"\"\n n_pts = pts.shape[0]\n \n for i in range(n_pts):\n \n test = (mask_points[i,indices_available_topologies.to(dtype=torch.long)]==0).any()\n #test = (mask_points[i,ind_current_topo]==1 and mask_points[i,ind_new_topo]==0)\n \n if test: # i not in corresp_final.keys(): # \n parent = FindParent(i, connections, mask_points[:,indices_available_topologies.to(dtype=torch.long)], mask_segments)\n diff = (pts[parent,:]).data - (pts[i,:]).data\n (pts[i,:]).data += diff\n \n return \n \n \n \ndef GoToBoundaryWithMomenta(momenta, singular_points, singular_connections, indices_available_topologies, mask_points, mask_segments):\n \"\"\"\n \n Given a set of possible topologies, finds the common border between the orthants and get to \n this border.\n \n \"\"\"\n n_pts = singular_points.shape[0]\n \n for i in range(n_pts):\n \n #If a point is not activated in one topology at the boundary, must skrink the branch\n test = (mask_points[i,indices_available_topologies.to(dtype=torch.long)]==0).any()\n \n if test: \n parent = FindParent(i, singular_connections, mask_points[:,indices_available_topologies.to(dtype=torch.long)], mask_segments)\n diff = (singular_points[parent,:]).data - (singular_points[i,:]).data\n (singular_points[i,:]).data += diff\n \n \n return momenta, singular_points \n\n\n\ndef RearangeMomenta(momenta, singular_points, singular_connections, ind_previous_topo, ind_new_topo, indices_available_topologies, mask_points, mask_segments, n_interp, n_leaves):\n \"\"\"\n \n Given a set of possible topologies, finds the common border between the orthants and get to \n this border.\n \n \"\"\"\n n_pts = singular_points.shape[0]\n n_branches = 2*n_leaves-3\n n_ctrl_pts = (n_interp-2)*n_branches\n \n Current_topo = mask_points[:,ind_new_topo]\n Previous_topo = mask_points[:,ind_previous_topo]\n\n dict_parents = {}\n\n \n for i in range(n_pts):\n \n test = (mask_points[i,indices_available_topologies]==0).any()\n #test = (mask_points[i,ind_current_topo]==1 and mask_points[i,ind_new_topo]==0)\n \n if test: # and Current_topo[i]: # i not in corresp_final.keys(): # \n parent = FindParent(i, singular_connections, mask_points[:,indices_available_topologies], mask_segments)\n\n if parent not in dict_parents.keys():\n dict_parents[parent] = [i]\n else:\n dict_parents[parent].append(i)\n\n Current_segments = singular_connections[mask_segments[:,ind_new_topo]==1,:]\n Previous_segments = singular_connections[mask_segments[:,ind_previous_topo]==1,:]\n \n reorder_ind = torch.zeros((2*n_leaves-3,1),dtype=torch.long, device=torchdeviceId)\n \n print(Current_segments)\n print(Previous_segments)\n \n for i in range(n_pts)[1:]:\n \n test_common = (mask_points[i,indices_available_topologies]==1).all()\n \n if Current_topo[i] and test_common:\n #Then it is a shared point\n \n pos_previous = torch.where(Previous_segments[:,1]==i)[0]\n pos_current = torch.where(Current_segments[:,1]==i)[0]\n \n print(pos_current)\n print(pos_previous)\n \n reorder_ind[pos_current] = pos_previous\n \n if Current_topo[i] and not test_common:\n #Then this point is associated to a new branch that will grow\n \n if Previous_topo[i]==1:\n #Still shared with previous topo\n pos_previous = torch.where(Previous_segments[:,1]==i)[0]\n pos_current = torch.where(Current_segments[:,1]==i)[0]\n \n print(pos_current)\n print(pos_previous)\n \n print(\"TEST_WEIGHT MOMENTA\")\n momenta[pos_previous*(n_interp-2):(pos_previous+1)*(n_interp-2),:].data *= 000000.1\n \n reorder_ind[pos_current] = pos_previous\n\n for parent in dict_parents.keys():\n if i in dict_parents[parent]:\n dict_parents[parent].remove(i)\n break\n \n #We erased the shared points with the previous topo from dict_parent, now we find \n #the momenta associated to growing branches from the parents\n \n remaining_inds = [k for k in range( Current_segments.shape[0])]\n \n for ind in reorder_ind:\n if ind in remaining_inds:\n remaining_inds.remove(ind)\n \n for i in range(n_pts)[1:]:\n test_common = (mask_points[i,indices_available_topologies]==1).all()\n \n if Current_topo[i] and not test_common and Previous_topo[i]==0:\n \n print(i, Current_topo[i], test_common, Previous_topo[i])\n \n parent = FindParent(i, singular_connections, mask_points[:,indices_available_topologies], mask_segments)\n \n print(\"Paretn i : \", parent)\n \n dict_parents[parent] \n other_ind = -1\n for j,ind in enumerate(dict_parents[parent]):\n if Previous_topo[ind]==1:\n other_ind = int(dict_parents[parent].pop(j))\n print(other_ind)\n break\n \n if other_ind==-1:\n print(\"UH OH\")\n print(dict_parents[parent])\n print(Previous_topo[dict_parents[parent]])\n \n other_ind = remaining_inds.pop(0)\n print(other_ind)\n print(\"Parent other ind : \", FindParent(other_ind, singular_connections, mask_points[:,indices_available_topologies], mask_segments))\n \n \n print(i)\n print(\"check where : \", torch.where(Current_segments[:,1]==i))\n pos_current = torch.where(Current_segments[:,1]==i)[0]\n pos_previous = torch.where(Previous_segments[:,1]==other_ind)[0]\n \n reorder_ind[pos_current] = pos_previous\n \n print(pos_current)\n print(pos_previous)\n print(reorder_ind)\n\n ordered_momenta = momenta.clone()\n\n #Now we re-order the momenta so that we get the correct positions\n for i,previous_i in enumerate(reorder_ind):\n \n if i != previous_i:\n \n ordered_momenta[i*(n_interp-2):(i+1)*(n_interp-2),:].data = momenta[previous_i*(n_interp-2):(previous_i+1)*(n_interp-2),:].data\n \n momenta.data = ordered_momenta.data\n \n return momenta\n\n\ndef ComputeResidual(pts, connections, ind_current_topology, available_topologies, \n points_activation_mask, segments_activation_mask):\n \"\"\"\n For each other available, compute the length to go through if it were selected\n \"\"\"\n \n current_topo = segments_activation_mask[:,ind_current_topology]\n \n distances = torch.zeros((len(available_topologies))).to(device=torchdeviceId)\n \n for i,test in enumerate(current_topo):\n \n if test: #this segment exists in the current topo\n \n con = connections[i,:]\n d2 = (pts[con[1],:]-pts[con[0],:]).norm().square()\n \n for j,ind in enumerate(available_topologies):\n \n if not segments_activation_mask[i,ind] and not points_activation_mask[con[1],ind]:\n #then the segment has to shrink if we want to go to this topology\n \n distances[j] += d2\n \n distances.sqrt()\n \n return distances\n \n\n\ndef FindParent(child, connections, mask_points, mask_segments = None):\n\n found = False\n to_return = 0\n indices_lines = torch.where(connections[:,1]==child)\n \n #print(connections,child)\n \n #print(indices_lines[0])\n \n parent = connections[indices_lines[0],0]\n\n for i in indices_lines[0]:\n #print(i)\n ind = int(i)\n #print(\"First test : \", mask_points[connections[ind,0],:], \"for \", connections[ind,0])\n if (mask_points[connections[ind,0],:]==1).all():\n found = True\n to_return = int(connections[ind,0])\n break\n #return int(connections[ind,0])\n \n #None of the potential parents is active, must recursively find the parent\n #print(\"None of the potential parents is active, must recursively find the parent\")\n #print(\"Current child : \", child, \", indices_lines : \", indices_lines)\n if not found:\n for i in indices_lines[0]:\n #print(\"Parent : \", i)\n ind = int(i)\n ancestor = FindParent(connections[ind,0], connections, mask_points, mask_segments = mask_segments)\n #print(\"Second test : \", mask_points[ancestor,:], \"for \", ancestor)\n if (mask_points[ancestor,:]==1).all():\n found = True\n to_return = ancestor\n break\n #return ancestor\n\n if to_return==0:\n print(\"NO PARENT FOUND........ !!!!!!!!!!!!!! \\n\")\n return to_return\n\n\ndef MergeSegments(connections, activated_segments):\n \"\"\"\n \n Select the segment that can be used in the data attachment. \n For now, a segment [a, b] is inactivated if :\n - b is inactive.\n - a is inactive and b is the end of another segment. \n \n \"\"\"\n \n new_connections = connections.new(connections)\n new_activated_segments = torch.clone(activated_segments)\n\n active_connections = torch.masked_select(connections, activated_segments.view(-1,1)).view(-1,2)\n\n indices = active_connections.unique()\n\n for i in indices:\n\n m_start = ((connections[:,0]==i) * activated_segments ).to(dtype=torch.bool)\n m_end = ((connections[:,1]==i) * activated_segments ).to(dtype=torch.bool) \n\n #we deactivate if there is another segment ending with con[1]\n if torch.count_nonzero(m_start)==1 and torch.count_nonzero(m_end)==1: \n\n print(\"There will be a merge at : \", i)\n\n s1 = torch.where(m_start==1,m_start, False)\n s2 = torch.where(m_end==1, m_end, False)\n\n new_activated_segments[s1] = 0\n new_activated_segments[s2] = 0\n \n #new_seg = torch.tensor([connections[s2,0],connections[s1,1]]).to(dtype=torch.long, device=torchdeviceId)\n #activation = torch.tensor([1]).to(dtype=torch.bool, device=torchdeviceId)\n \n #new_connections = torch.cat((new_connections,new_seg.view(-1,2)), 0)\n #new_activated_segments = torch.cat((new_activated_segments,activation), 0)\n\n return new_activated_segments.contiguous(), new_connections.contiguous()\n\n\ndef ActivateSegments(pts, connections, activated_points):\n \"\"\"\n \n Select the segment that can be used in the data attachment. \n For now, a segment [a, b] is inactivated if :\n - b is inactive.\n - a is inactive and b is the end of another segment. \n \n \"\"\"\n \n activated_segments = torch.ones(connections.shape[0]).to(dtype=torch.bool, device=torchdeviceId)\n\n for i, con in enumerate(connections):\n if not activated_points[con[0]] and not activated_points[con[1]]: #both ends are inactive\n activated_segments[i] = 0\n \n elif not activated_points[con[0]]: \n \n if torch.count_nonzero(connections[:,1]==con[1])>1: #we deactivate if there is another segment ending with con[1]\n activated_segments[i] = 0\n \n #elif not activated_points[con[1]] or not activated_points[con[0]]: \n # activated_segments[i] = 0\n \n return activated_segments\n \n \ndef StoreIndBackward(dictionnary, connections, ind_current_topo, topo_to_check,\n points_activation_mask, segments_activation_mask, \n ind_start, ind_end, ref_start, ref_end):\n \"\"\"\n Stores for the backward the indices of interest. \n The idea is that every points acitve in the topologies should receive the gradient of \n - The corresponding created point in the tree that will be used in the data attachment term,\n - If it is not activated in the current topology, the gradient of the points sharing a segment's extremity. \n (Ex: point 6 is not acivated in the current topology but not in another one. The segment [2,4] exists\n in the current topology and [6,4] doesn't, then the gradient of 2 for the segment [2,4] will be also \n attributed to 6 in the other topology.\n \n \"\"\"\n \n #print(\"\\n\\n\\n\\n\\n\\n New topo : \", topo_to_check)\n \n \n for ind_con in range(connections.shape[0]):\n \n if segments_activation_mask[ind_con,topo_to_check]==1: \n \n a = int(connections[ind_con,0])\n b = int(connections[ind_con,1])\n \n ind_diff = ind_start\n #print(ind_start, ind_end, connections[ind_con,:], ind_con)\n \n if ind_start in connections[ind_con,:]:\n \n #Add a reference in the dictionnary\n if ind_start not in dictionnary.keys():\n dictionnary[ind_start] = []\n \n if ref_start not in dictionnary[ind_start]:\n dictionnary[ind_start].append(ref_start)\n \n \"\"\"if a != ind_start:\n ind_diff = a\n else:\n ind_diff = b\n \n #Then we found a point that is not activated in the current topology \n #and we need to attribute it a gradient\n if (points_activation_mask[ind_diff,ind_current_topo] == 0 and \n points_activation_mask[ind_diff,topo_to_check] == 1 and ind_diff == b):\n \n if ind_diff not in dictionnary.keys():\n dictionnary[ind_diff] = []\n \n if ref_start not in dictionnary[ind_diff]:\n dictionnary[ind_diff].append(ref_start)\n print(\"Adding from [{0}-{1}] the point {2} for the grad of {3}\".format(ind_start,ind_end,ref_start,ind_diff)) \n \n elif points_activation_mask[ind_diff,topo_to_check] == 0:\n print(\"\\n\\n\\n\\n\\n WARNING Here a point is selected in a connection, but not in the points \\n\\n\\n\\n\")\n \"\"\"\n if ind_end in connections[ind_con,:]:\n \n if ind_end not in dictionnary.keys():\n dictionnary[ind_end] = []\n \n if ref_end not in dictionnary[ind_end]:\n dictionnary[ind_end].append(ref_end)\n \n if segments_activation_mask[ind_con,ind_current_topo]==0:\n \n ind_diff = -1\n \n if a != ind_end:\n ind_diff = a\n else:\n ind_diff = b\n \n #Then we found a point that is not activated in the current topology \n #and we need to attribute it a gradient\n if (points_activation_mask[a,ind_current_topo] == 0 and \n points_activation_mask[a,topo_to_check] == 1 and ind_diff == a):\n if a not in dictionnary.keys():\n dictionnary[a] = []\n \n if ref_start not in dictionnary[a]:\n dictionnary[a].append(ref_start)\n \n return\n \n \n \n \nclass SingularToSegments(torch.autograd.Function):\n \"\"\"\n We can implement our own custom autograd Functions by subclassing\n torch.autograd.Function and implementing the forward and backward passes\n which operate on Tensors.\n \n In the forward pass we receive a Tensor containing the input and return\n a Tensor containing the output. self is a context object that can be used\n to stash information for backward computation. You can cache arbitrary\n objects for use in the backward pass using the ctx.save_for_backward method.\n \"\"\"\n def __init__(self):\n super(SingularToSegments, self).__init__()\n self.ind_selected_topo = -1\n \n def update_topo(self, ind):\n self.ind_selected_topo = ind\n\n def get_topo(self):\n return self.ind_selected_topo\n\n @staticmethod\n def forward(self, pts, connections, \n mask_topo, segments_activation_mask, \n ind_current_topo, list_previous = [], \n available_topologies = []):\n \"\"\"\n \n In this forward function the output could be : \n - the selected segments extremities\n - the whole points and connectivities... to be decided\n \n Parameters\n ----------\n @param : V : torch tensor\n n-points x d-dimension points.\n \n @param : F : torch Long tensor\n m-connections x 2-dim tensor containing pair of connected points' indices.\n\n @param : n_interp : int\n The number of points in the branch.\n\n Returns\n -------\n @output : V : Torch Float Tensor\n Average euclidean distance between connected points in the tree.\n @output : L : Torch Long Tensor\n Standard deviation of the euclidean distance between connected points\n \"\"\"\n\n n_seg = 0\n dim = pts.shape[1]\n list_seg = [] #List containing the unique segments \n\n if available_topologies == []:\n available_topologies = [ind_current_topo]\n \n n_topo = len(available_topologies)\n \n if ind_current_topo != -1:\n #select the correct activated segments\n activated_segments = segments_activation_mask[:,ind_current_topo] \n else:\n activated_segments = torch.ones(connections.shape[0]).to(dtype=torchdtype, device=torchdeviceId)\n\n correspondences_per_topo = [{} for i in range(n_topo)]\n\n for i, con in enumerate(connections):\n \n start = pts[con[0],:]\n end = pts[con[1],:]\n \n ind_start = int(con[0].detach().cpu())\n ind_end = int(con[1].detach().cpu())\n\n if activated_segments[i]:\n \n #Then create the segment and check if it already exists\n segment = torch.Tensor(2, pts.shape[1]).to(dtype=torchdtype, device=torchdeviceId)\n torch.cat([start,end], out=segment)\n \n \"\"\"l_bool = [(segment==s).all() for s in list_seg if s.shape==segment.shape]\n \n if any( l_bool ):\n ind_segment = l_bool.index(True)\n ref_start = 2*ind_segment\n ref_end = 2*ind_segment+1\n else:\"\"\"\n list_seg.append(segment)\n ref_start = 2*n_seg\n ref_end = 2*n_seg+1\n \n #for the gradient projection, stores the connected points\n for i,topo in enumerate(available_topologies):\n \n dict_correspondences = correspondences_per_topo[i]\n \n StoreIndBackward(dict_correspondences, connections, ind_current_topo, topo,\n mask_topo, segments_activation_mask, \n ind_start, ind_end, ref_start, ref_end)\n \n #if True:\n n_seg+=1\n\n print(\"N seg : \", n_seg)\n V = torch.zeros((2*n_seg, dim)).to(dtype=torchdtype, device=torchdeviceId)\n L = torch.zeros((n_seg, 2)).to(dtype=torch.long, device=torchdeviceId)\n\n for dict_correspondences in correspondences_per_topo:\n for k in dict_correspondences.keys():\n dict_correspondences[k] = torch.Tensor(dict_correspondences[k]).to(dtype=torch.long, device=torchdeviceId)\n \n for i, seg in enumerate(list_seg):\n \n V[2*i,:] = seg[:dim]\n V[2*i+1,:] = seg[dim:]\n \n L[i,0] = 2*i\n L[i,1] = 2*i+1\n\n self.all_correspondences = correspondences_per_topo\n self.ind_current_topo = ind_current_topo\n self.list_previous = list_previous\n self.available_topologies = available_topologies\n self.save_for_backward(pts)\n\n return V, L\n\n \n @staticmethod\n def backward(self, grad_points, grad_connections):\n \"\"\"\n In the backward pass we receive a Tensor containing the gradient of the loss\n with respect to the output, and we need to compute the gradient of the loss\n with respect to the input.\n \n Here we need in ctx : the association between input[i,:] and the different output[j,:]\n \"\"\"\n\n pts = self.saved_tensors[0] \n n_pts, dim = pts.shape[0], pts.shape[1]\n \n ind_current_topo = self.ind_current_topo \n all_correspondences = self.all_correspondences\n \n n_topo_available = len(self.available_topologies)\n available_topologies = torch.tensor(self.available_topologies).to(device=torchdeviceId)\n\n grad_input = torch.zeros(pts.shape).to(dtype=torchdtype, device=torchdeviceId)\n \n selected_gradients = torch.zeros((n_pts,dim,n_topo_available)).to(dtype=torchdtype, device=torchdeviceId)\n \n list_previous_topo = self.list_previous\n \n for ind_topo,corresp in enumerate(all_correspondences):\n #Retrieve the gradient with respect to the template's vertices\n for k in corresp.keys():\n selected_gradients[k,:,ind_topo] = grad_points.index_select(0,corresp[k]).sum(dim=0) #/len(corresp[k])\n\n print(\"topo available : \", available_topologies)\n\n if n_topo_available > 1:\n \n #### Project the gradient onto different orthants ###\n projection_matrix_norm = selected_gradients.norm(dim=[0,1])\n \n print(\"gradient norms : \", projection_matrix_norm)\n \n #Select the projection maximizing the norm of the projected gradient\n selected_topo = projection_matrix_norm.argmax()\n grad_input = selected_gradients[:,:,selected_topo]\n \n final_ind = available_topologies[selected_topo]\n \n else:\n final_ind = ind_current_topo\n print(grad_input.shape)\n grad_input = selected_gradients[:,:,0]\n \n if len(list_previous_topo)>1: #means that we stored two previous topologies\n list_previous_topo.pop(0)\n list_previous_topo.append(int(final_ind))\n\n return grad_input, None, None, None, None, None, None\n"
] |
[
[
"torch.linspace",
"torch.ones",
"torch.Tensor",
"torch.zeros",
"torch.cat",
"torch.clone",
"torch.tensor",
"torch.unique",
"torch.where",
"torch.arange",
"torch.count_nonzero"
]
] |
AmirS2/sagemaker-python-sdk
|
[
"bb17c3b3de6a9af718279670c8177b5f3a19659c"
] |
[
"tests/integ/test_tuner.py"
] |
[
"# Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nfrom __future__ import absolute_import\n\nimport gzip\nimport json\nimport os\nimport pickle\nimport sys\nimport time\n\nimport numpy as np\nimport pytest\nimport tests.integ\nfrom botocore.exceptions import ClientError\nfrom tests.integ import DATA_DIR, PYTHON_VERSION, TUNING_DEFAULT_TIMEOUT_MINUTES\nfrom tests.integ.record_set import prepare_record_set_from_local_files\nfrom tests.integ.timeout import timeout, timeout_and_delete_endpoint_by_name\nfrom tests.integ import vpc_test_utils\n\nfrom sagemaker import KMeans, LDA, RandomCutForest\nfrom sagemaker.amazon.amazon_estimator import registry\nfrom sagemaker.amazon.common import read_records\nfrom sagemaker.chainer import Chainer\nfrom sagemaker.estimator import Estimator\nfrom sagemaker.mxnet.estimator import MXNet\nfrom sagemaker.predictor import json_deserializer\nfrom sagemaker.pytorch import PyTorch\nfrom sagemaker.tensorflow import TensorFlow\nfrom sagemaker.tuner import (\n IntegerParameter,\n ContinuousParameter,\n CategoricalParameter,\n HyperparameterTuner,\n WarmStartConfig,\n WarmStartTypes,\n create_transfer_learning_tuner,\n create_identical_dataset_and_algorithm_tuner,\n)\nfrom sagemaker.utils import unique_name_from_base\n\nDATA_PATH = os.path.join(DATA_DIR, \"iris\", \"data\")\n\n\n@pytest.fixture(scope=\"module\")\ndef kmeans_train_set(sagemaker_session):\n data_path = os.path.join(DATA_DIR, \"one_p_mnist\", \"mnist.pkl.gz\")\n pickle_args = {} if sys.version_info.major == 2 else {\"encoding\": \"latin1\"}\n # Load the data into memory as numpy arrays\n with gzip.open(data_path, \"rb\") as f:\n train_set, _, _ = pickle.load(f, **pickle_args)\n\n return train_set\n\n\n@pytest.fixture(scope=\"module\")\ndef kmeans_estimator(sagemaker_session, cpu_instance_type):\n kmeans = KMeans(\n role=\"SageMakerRole\",\n train_instance_count=1,\n train_instance_type=cpu_instance_type,\n k=10,\n sagemaker_session=sagemaker_session,\n output_path=\"s3://{}/\".format(sagemaker_session.default_bucket()),\n )\n # set kmeans specific hp\n kmeans.init_method = \"random\"\n kmeans.max_iterators = 1\n kmeans.tol = 1\n kmeans.num_trials = 1\n kmeans.local_init_method = \"kmeans++\"\n kmeans.half_life_time_size = 1\n kmeans.epochs = 1\n\n return kmeans\n\n\n@pytest.fixture(scope=\"module\")\ndef hyperparameter_ranges():\n return {\n \"extra_center_factor\": IntegerParameter(1, 10),\n \"mini_batch_size\": IntegerParameter(10, 100),\n \"epochs\": IntegerParameter(1, 2),\n \"init_method\": CategoricalParameter([\"kmeans++\", \"random\"]),\n }\n\n\ndef _tune_and_deploy(\n kmeans_estimator,\n kmeans_train_set,\n sagemaker_session,\n cpu_instance_type,\n hyperparameter_ranges=None,\n job_name=None,\n warm_start_config=None,\n early_stopping_type=\"Off\",\n):\n tuner = _tune(\n kmeans_estimator,\n kmeans_train_set,\n hyperparameter_ranges=hyperparameter_ranges,\n warm_start_config=warm_start_config,\n job_name=job_name,\n early_stopping_type=early_stopping_type,\n )\n _deploy(kmeans_train_set, sagemaker_session, tuner, early_stopping_type, cpu_instance_type)\n\n\ndef _deploy(kmeans_train_set, sagemaker_session, tuner, early_stopping_type, cpu_instance_type):\n best_training_job = tuner.best_training_job()\n assert tuner.early_stopping_type == early_stopping_type\n with timeout_and_delete_endpoint_by_name(best_training_job, sagemaker_session):\n predictor = tuner.deploy(1, cpu_instance_type)\n\n result = predictor.predict(kmeans_train_set[0][:10])\n\n assert len(result) == 10\n for record in result:\n assert record.label[\"closest_cluster\"] is not None\n assert record.label[\"distance_to_cluster\"] is not None\n\n\ndef _tune(\n kmeans_estimator,\n kmeans_train_set,\n tuner=None,\n hyperparameter_ranges=None,\n job_name=None,\n warm_start_config=None,\n wait_till_terminal=True,\n max_jobs=2,\n max_parallel_jobs=2,\n early_stopping_type=\"Off\",\n):\n with timeout(minutes=TUNING_DEFAULT_TIMEOUT_MINUTES):\n\n if not tuner:\n tuner = HyperparameterTuner(\n estimator=kmeans_estimator,\n objective_metric_name=\"test:msd\",\n hyperparameter_ranges=hyperparameter_ranges,\n objective_type=\"Minimize\",\n max_jobs=max_jobs,\n max_parallel_jobs=max_parallel_jobs,\n warm_start_config=warm_start_config,\n early_stopping_type=early_stopping_type,\n )\n\n records = kmeans_estimator.record_set(kmeans_train_set[0][:100])\n test_record_set = kmeans_estimator.record_set(kmeans_train_set[0][:100], channel=\"test\")\n\n tuner.fit([records, test_record_set], job_name=job_name)\n print(\"Started hyperparameter tuning job with name:\" + tuner.latest_tuning_job.name)\n\n if wait_till_terminal:\n tuner.wait()\n\n return tuner\n\n\n@pytest.mark.canary_quick\ndef test_tuning_kmeans(\n sagemaker_session, kmeans_train_set, kmeans_estimator, hyperparameter_ranges, cpu_instance_type\n):\n job_name = unique_name_from_base(\"test-tune-kmeans\")\n _tune_and_deploy(\n kmeans_estimator,\n kmeans_train_set,\n sagemaker_session,\n cpu_instance_type,\n hyperparameter_ranges=hyperparameter_ranges,\n job_name=job_name,\n )\n\n\ndef test_tuning_kmeans_identical_dataset_algorithm_tuner_raw(\n sagemaker_session, kmeans_train_set, kmeans_estimator, hyperparameter_ranges\n):\n parent_tuning_job_name = unique_name_from_base(\"kmeans-identical\", max_length=32)\n child_tuning_job_name = unique_name_from_base(\"c-kmeans-identical\", max_length=32)\n _tune(\n kmeans_estimator,\n kmeans_train_set,\n job_name=parent_tuning_job_name,\n hyperparameter_ranges=hyperparameter_ranges,\n max_parallel_jobs=1,\n max_jobs=1,\n )\n child_tuner = _tune(\n kmeans_estimator,\n kmeans_train_set,\n job_name=child_tuning_job_name,\n hyperparameter_ranges=hyperparameter_ranges,\n warm_start_config=WarmStartConfig(\n warm_start_type=WarmStartTypes.IDENTICAL_DATA_AND_ALGORITHM,\n parents=[parent_tuning_job_name],\n ),\n max_parallel_jobs=1,\n max_jobs=1,\n )\n\n child_warm_start_config_response = WarmStartConfig.from_job_desc(\n sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job(\n HyperParameterTuningJobName=child_tuning_job_name\n )[\"WarmStartConfig\"]\n )\n\n assert child_warm_start_config_response.type == child_tuner.warm_start_config.type\n assert child_warm_start_config_response.parents == child_tuner.warm_start_config.parents\n\n\ndef test_tuning_kmeans_identical_dataset_algorithm_tuner(\n sagemaker_session, kmeans_train_set, kmeans_estimator, hyperparameter_ranges\n):\n \"\"\"Tests Identical dataset and algorithm use case with one parent and child job launched with\n .identical_dataset_and_algorithm_tuner() \"\"\"\n\n parent_tuning_job_name = unique_name_from_base(\"km-iden1-parent\", max_length=32)\n child_tuning_job_name = unique_name_from_base(\"km-iden1-child\", max_length=32)\n\n parent_tuner = _tune(\n kmeans_estimator,\n kmeans_train_set,\n job_name=parent_tuning_job_name,\n hyperparameter_ranges=hyperparameter_ranges,\n )\n\n child_tuner = parent_tuner.identical_dataset_and_algorithm_tuner()\n _tune(\n kmeans_estimator,\n kmeans_train_set,\n job_name=child_tuning_job_name,\n tuner=child_tuner,\n max_parallel_jobs=1,\n max_jobs=1,\n )\n\n child_warm_start_config_response = WarmStartConfig.from_job_desc(\n sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job(\n HyperParameterTuningJobName=child_tuning_job_name\n )[\"WarmStartConfig\"]\n )\n\n assert child_warm_start_config_response.type == child_tuner.warm_start_config.type\n assert child_warm_start_config_response.parents == child_tuner.warm_start_config.parents\n\n\ndef test_create_tuning_kmeans_identical_dataset_algorithm_tuner(\n sagemaker_session, kmeans_train_set, kmeans_estimator, hyperparameter_ranges\n):\n \"\"\"Tests Identical dataset and algorithm use case with one parent and child job launched with\n .create_identical_dataset_and_algorithm_tuner() \"\"\"\n\n parent_tuning_job_name = unique_name_from_base(\"km-iden2-parent\", max_length=32)\n child_tuning_job_name = unique_name_from_base(\"km-iden2-child\", max_length=32)\n\n parent_tuner = _tune(\n kmeans_estimator,\n kmeans_train_set,\n job_name=parent_tuning_job_name,\n hyperparameter_ranges=hyperparameter_ranges,\n max_parallel_jobs=1,\n max_jobs=1,\n )\n\n child_tuner = create_identical_dataset_and_algorithm_tuner(\n parent=parent_tuner.latest_tuning_job.name, sagemaker_session=sagemaker_session\n )\n\n _tune(\n kmeans_estimator,\n kmeans_train_set,\n job_name=child_tuning_job_name,\n tuner=child_tuner,\n max_parallel_jobs=1,\n max_jobs=1,\n )\n\n child_warm_start_config_response = WarmStartConfig.from_job_desc(\n sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job(\n HyperParameterTuningJobName=child_tuning_job_name\n )[\"WarmStartConfig\"]\n )\n\n assert child_warm_start_config_response.type == child_tuner.warm_start_config.type\n assert child_warm_start_config_response.parents == child_tuner.warm_start_config.parents\n\n\ndef test_transfer_learning_tuner(\n sagemaker_session, kmeans_train_set, kmeans_estimator, hyperparameter_ranges\n):\n \"\"\"Tests Transfer learning use case with one parent and child job launched with\n .transfer_learning_tuner() \"\"\"\n\n parent_tuning_job_name = unique_name_from_base(\"km-tran1-parent\", max_length=32)\n child_tuning_job_name = unique_name_from_base(\"km-tran1-child\", max_length=32)\n\n parent_tuner = _tune(\n kmeans_estimator,\n kmeans_train_set,\n job_name=parent_tuning_job_name,\n hyperparameter_ranges=hyperparameter_ranges,\n max_jobs=1,\n max_parallel_jobs=1,\n )\n\n child_tuner = parent_tuner.transfer_learning_tuner()\n _tune(\n kmeans_estimator,\n kmeans_train_set,\n job_name=child_tuning_job_name,\n tuner=child_tuner,\n max_parallel_jobs=1,\n max_jobs=1,\n )\n\n child_warm_start_config_response = WarmStartConfig.from_job_desc(\n sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job(\n HyperParameterTuningJobName=child_tuning_job_name\n )[\"WarmStartConfig\"]\n )\n\n assert child_warm_start_config_response.type == child_tuner.warm_start_config.type\n assert child_warm_start_config_response.parents == child_tuner.warm_start_config.parents\n\n\ndef test_create_transfer_learning_tuner(\n sagemaker_session, kmeans_train_set, kmeans_estimator, hyperparameter_ranges\n):\n \"\"\"Tests Transfer learning use case with two parents and child job launched with\n create_transfer_learning_tuner() \"\"\"\n parent_tuning_job_name_1 = unique_name_from_base(\"km-tran2-parent1\", max_length=32)\n parent_tuning_job_name_2 = unique_name_from_base(\"km-tran2-parent2\", max_length=32)\n child_tuning_job_name = unique_name_from_base(\"km-tran2-child\", max_length=32)\n\n parent_tuner_1 = _tune(\n kmeans_estimator,\n kmeans_train_set,\n job_name=parent_tuning_job_name_1,\n hyperparameter_ranges=hyperparameter_ranges,\n max_parallel_jobs=1,\n max_jobs=1,\n )\n\n parent_tuner_2 = _tune(\n kmeans_estimator,\n kmeans_train_set,\n job_name=parent_tuning_job_name_2,\n hyperparameter_ranges=hyperparameter_ranges,\n max_parallel_jobs=1,\n max_jobs=1,\n )\n\n child_tuner = create_transfer_learning_tuner(\n parent=parent_tuner_1.latest_tuning_job.name,\n sagemaker_session=sagemaker_session,\n estimator=kmeans_estimator,\n additional_parents={parent_tuner_2.latest_tuning_job.name},\n )\n\n _tune(kmeans_estimator, kmeans_train_set, job_name=child_tuning_job_name, tuner=child_tuner)\n\n child_warm_start_config_response = WarmStartConfig.from_job_desc(\n sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job(\n HyperParameterTuningJobName=child_tuning_job_name\n )[\"WarmStartConfig\"]\n )\n\n assert child_warm_start_config_response.type == child_tuner.warm_start_config.type\n assert child_warm_start_config_response.parents == child_tuner.warm_start_config.parents\n\n\ndef test_tuning_kmeans_identical_dataset_algorithm_tuner_from_non_terminal_parent(\n sagemaker_session, kmeans_train_set, kmeans_estimator, hyperparameter_ranges\n):\n \"\"\"Tests Identical dataset and algorithm use case with one non terminal parent and child job launched with\n .identical_dataset_and_algorithm_tuner() \"\"\"\n parent_tuning_job_name = unique_name_from_base(\"km-non-term\", max_length=32)\n child_tuning_job_name = unique_name_from_base(\"km-non-term-child\", max_length=32)\n\n parent_tuner = _tune(\n kmeans_estimator,\n kmeans_train_set,\n job_name=parent_tuning_job_name,\n hyperparameter_ranges=hyperparameter_ranges,\n wait_till_terminal=False,\n max_parallel_jobs=1,\n max_jobs=1,\n )\n\n child_tuner = parent_tuner.identical_dataset_and_algorithm_tuner()\n with pytest.raises(ClientError):\n _tune(\n kmeans_estimator,\n kmeans_train_set,\n job_name=child_tuning_job_name,\n tuner=child_tuner,\n max_parallel_jobs=1,\n max_jobs=1,\n )\n\n\n@pytest.mark.skipif(\n tests.integ.test_region() in tests.integ.NO_LDA_REGIONS,\n reason=\"LDA image is not supported in certain regions\",\n)\ndef test_tuning_lda(sagemaker_session, cpu_instance_type):\n with timeout(minutes=TUNING_DEFAULT_TIMEOUT_MINUTES):\n data_path = os.path.join(DATA_DIR, \"lda\")\n data_filename = \"nips-train_1.pbr\"\n\n with open(os.path.join(data_path, data_filename), \"rb\") as f:\n all_records = read_records(f)\n\n # all records must be same\n feature_num = int(all_records[0].features[\"values\"].float32_tensor.shape[0])\n\n lda = LDA(\n role=\"SageMakerRole\",\n train_instance_type=cpu_instance_type,\n num_topics=10,\n sagemaker_session=sagemaker_session,\n )\n\n record_set = prepare_record_set_from_local_files(\n data_path, lda.data_location, len(all_records), feature_num, sagemaker_session\n )\n test_record_set = prepare_record_set_from_local_files(\n data_path, lda.data_location, len(all_records), feature_num, sagemaker_session\n )\n test_record_set.channel = \"test\"\n\n # specify which hp you want to optimize over\n hyperparameter_ranges = {\n \"alpha0\": ContinuousParameter(1, 10),\n \"num_topics\": IntegerParameter(1, 2),\n }\n objective_metric_name = \"test:pwll\"\n\n tuner = HyperparameterTuner(\n estimator=lda,\n objective_metric_name=objective_metric_name,\n hyperparameter_ranges=hyperparameter_ranges,\n objective_type=\"Maximize\",\n max_jobs=2,\n max_parallel_jobs=2,\n early_stopping_type=\"Auto\",\n )\n\n tuning_job_name = unique_name_from_base(\"test-lda\", max_length=32)\n tuner.fit([record_set, test_record_set], mini_batch_size=1, job_name=tuning_job_name)\n\n latest_tuning_job_name = tuner.latest_tuning_job.name\n\n print(\"Started hyperparameter tuning job with name:\" + latest_tuning_job_name)\n\n time.sleep(15)\n tuner.wait()\n\n attached_tuner = HyperparameterTuner.attach(\n tuning_job_name, sagemaker_session=sagemaker_session\n )\n assert attached_tuner.early_stopping_type == \"Auto\"\n assert attached_tuner.estimator.alpha0 == 1.0\n assert attached_tuner.estimator.num_topics == 1\n\n best_training_job = attached_tuner.best_training_job()\n\n with timeout_and_delete_endpoint_by_name(best_training_job, sagemaker_session):\n predictor = tuner.deploy(1, cpu_instance_type)\n predict_input = np.random.rand(1, feature_num)\n result = predictor.predict(predict_input)\n\n assert len(result) == 1\n for record in result:\n assert record.label[\"topic_mixture\"] is not None\n\n\ndef test_stop_tuning_job(sagemaker_session, cpu_instance_type):\n feature_num = 14\n train_input = np.random.rand(1000, feature_num)\n\n rcf = RandomCutForest(\n role=\"SageMakerRole\",\n train_instance_count=1,\n train_instance_type=cpu_instance_type,\n num_trees=50,\n num_samples_per_tree=20,\n sagemaker_session=sagemaker_session,\n )\n\n records = rcf.record_set(train_input)\n records.distribution = \"FullyReplicated\"\n\n test_records = rcf.record_set(train_input, channel=\"test\")\n test_records.distribution = \"FullyReplicated\"\n\n hyperparameter_ranges = {\n \"num_trees\": IntegerParameter(50, 100),\n \"num_samples_per_tree\": IntegerParameter(1, 2),\n }\n\n objective_metric_name = \"test:f1\"\n tuner = HyperparameterTuner(\n estimator=rcf,\n objective_metric_name=objective_metric_name,\n hyperparameter_ranges=hyperparameter_ranges,\n objective_type=\"Maximize\",\n max_jobs=2,\n max_parallel_jobs=2,\n )\n\n tuning_job_name = unique_name_from_base(\"test-randomcutforest\", max_length=32)\n tuner.fit([records, test_records], tuning_job_name)\n\n time.sleep(15)\n\n latest_tuning_job_name = tuner.latest_tuning_job.name\n\n print(\"Attempting to stop {}\".format(latest_tuning_job_name))\n\n tuner.stop_tuning_job()\n\n desc = tuner.latest_tuning_job.sagemaker_session.sagemaker_client.describe_hyper_parameter_tuning_job(\n HyperParameterTuningJobName=latest_tuning_job_name\n )\n assert desc[\"HyperParameterTuningJobStatus\"] == \"Stopping\"\n\n\n@pytest.mark.canary_quick\ndef test_tuning_mxnet(sagemaker_session, mxnet_full_version, cpu_instance_type):\n with timeout(minutes=TUNING_DEFAULT_TIMEOUT_MINUTES):\n script_path = os.path.join(DATA_DIR, \"mxnet_mnist\", \"mnist.py\")\n data_path = os.path.join(DATA_DIR, \"mxnet_mnist\")\n\n estimator = MXNet(\n entry_point=script_path,\n role=\"SageMakerRole\",\n py_version=PYTHON_VERSION,\n train_instance_count=1,\n train_instance_type=cpu_instance_type,\n framework_version=mxnet_full_version,\n sagemaker_session=sagemaker_session,\n )\n\n hyperparameter_ranges = {\"learning-rate\": ContinuousParameter(0.01, 0.2)}\n objective_metric_name = \"Validation-accuracy\"\n metric_definitions = [\n {\"Name\": \"Validation-accuracy\", \"Regex\": \"Validation-accuracy=([0-9\\\\.]+)\"}\n ]\n tuner = HyperparameterTuner(\n estimator,\n objective_metric_name,\n hyperparameter_ranges,\n metric_definitions,\n max_jobs=4,\n max_parallel_jobs=2,\n )\n\n train_input = estimator.sagemaker_session.upload_data(\n path=os.path.join(data_path, \"train\"), key_prefix=\"integ-test-data/mxnet_mnist/train\"\n )\n test_input = estimator.sagemaker_session.upload_data(\n path=os.path.join(data_path, \"test\"), key_prefix=\"integ-test-data/mxnet_mnist/test\"\n )\n\n tuning_job_name = unique_name_from_base(\"tune-mxnet\", max_length=32)\n tuner.fit({\"train\": train_input, \"test\": test_input}, job_name=tuning_job_name)\n\n print(\"Started hyperparameter tuning job with name:\" + tuning_job_name)\n\n time.sleep(15)\n tuner.wait()\n\n best_training_job = tuner.best_training_job()\n with timeout_and_delete_endpoint_by_name(best_training_job, sagemaker_session):\n predictor = tuner.deploy(1, cpu_instance_type)\n data = np.zeros(shape=(1, 1, 28, 28))\n predictor.predict(data)\n\n\n@pytest.mark.canary_quick\ndef test_tuning_tf_script_mode(sagemaker_session, cpu_instance_type):\n resource_path = os.path.join(DATA_DIR, \"tensorflow_mnist\")\n script_path = os.path.join(resource_path, \"mnist.py\")\n\n estimator = TensorFlow(\n entry_point=script_path,\n role=\"SageMakerRole\",\n train_instance_count=1,\n train_instance_type=cpu_instance_type,\n script_mode=True,\n sagemaker_session=sagemaker_session,\n py_version=PYTHON_VERSION,\n framework_version=TensorFlow.LATEST_VERSION,\n )\n\n hyperparameter_ranges = {\"epochs\": IntegerParameter(1, 2)}\n objective_metric_name = \"accuracy\"\n metric_definitions = [{\"Name\": objective_metric_name, \"Regex\": \"accuracy = ([0-9\\\\.]+)\"}]\n\n tuner = HyperparameterTuner(\n estimator,\n objective_metric_name,\n hyperparameter_ranges,\n metric_definitions,\n max_jobs=2,\n max_parallel_jobs=2,\n )\n\n with timeout(minutes=TUNING_DEFAULT_TIMEOUT_MINUTES):\n inputs = estimator.sagemaker_session.upload_data(\n path=os.path.join(resource_path, \"data\"), key_prefix=\"scriptmode/mnist\"\n )\n\n tuning_job_name = unique_name_from_base(\"tune-tf-script-mode\", max_length=32)\n tuner.fit(inputs, job_name=tuning_job_name)\n\n print(\"Started hyperparameter tuning job with name: \" + tuning_job_name)\n\n time.sleep(15)\n tuner.wait()\n\n\n@pytest.mark.canary_quick\n@pytest.mark.skipif(PYTHON_VERSION != \"py2\", reason=\"TensorFlow image supports only python 2.\")\ndef test_tuning_tf(sagemaker_session, cpu_instance_type):\n with timeout(minutes=TUNING_DEFAULT_TIMEOUT_MINUTES):\n script_path = os.path.join(DATA_DIR, \"iris\", \"iris-dnn-classifier.py\")\n\n estimator = TensorFlow(\n entry_point=script_path,\n role=\"SageMakerRole\",\n training_steps=1,\n evaluation_steps=1,\n hyperparameters={\"input_tensor_name\": \"inputs\"},\n train_instance_count=1,\n train_instance_type=cpu_instance_type,\n sagemaker_session=sagemaker_session,\n )\n\n inputs = sagemaker_session.upload_data(path=DATA_PATH, key_prefix=\"integ-test-data/tf_iris\")\n hyperparameter_ranges = {\"learning_rate\": ContinuousParameter(0.05, 0.2)}\n\n objective_metric_name = \"loss\"\n metric_definitions = [{\"Name\": \"loss\", \"Regex\": \"loss = ([0-9\\\\.]+)\"}]\n\n tuner = HyperparameterTuner(\n estimator,\n objective_metric_name,\n hyperparameter_ranges,\n metric_definitions,\n objective_type=\"Minimize\",\n max_jobs=2,\n max_parallel_jobs=2,\n )\n\n tuning_job_name = unique_name_from_base(\"tune-tf\", max_length=32)\n tuner.fit(inputs, job_name=tuning_job_name)\n\n print(\"Started hyperparameter tuning job with name:\" + tuning_job_name)\n\n time.sleep(15)\n tuner.wait()\n\n best_training_job = tuner.best_training_job()\n with timeout_and_delete_endpoint_by_name(best_training_job, sagemaker_session):\n predictor = tuner.deploy(1, cpu_instance_type)\n\n features = [6.4, 3.2, 4.5, 1.5]\n dict_result = predictor.predict({\"inputs\": features})\n print(\"predict result: {}\".format(dict_result))\n list_result = predictor.predict(features)\n print(\"predict result: {}\".format(list_result))\n\n assert dict_result == list_result\n\n\n@pytest.mark.skipif(PYTHON_VERSION != \"py2\", reason=\"TensorFlow image supports only python 2.\")\ndef test_tuning_tf_vpc_multi(sagemaker_session, cpu_instance_type):\n \"\"\"Test Tensorflow multi-instance using the same VpcConfig for training and inference\"\"\"\n instance_type = cpu_instance_type\n instance_count = 2\n\n script_path = os.path.join(DATA_DIR, \"iris\", \"iris-dnn-classifier.py\")\n\n ec2_client = sagemaker_session.boto_session.client(\"ec2\")\n subnet_ids, security_group_id = vpc_test_utils.get_or_create_vpc_resources(ec2_client)\n vpc_test_utils.setup_security_group_for_encryption(ec2_client, security_group_id)\n\n estimator = TensorFlow(\n entry_point=script_path,\n role=\"SageMakerRole\",\n training_steps=1,\n evaluation_steps=1,\n hyperparameters={\"input_tensor_name\": \"inputs\"},\n train_instance_count=instance_count,\n train_instance_type=instance_type,\n sagemaker_session=sagemaker_session,\n base_job_name=\"test-vpc-tf\",\n subnets=subnet_ids,\n security_group_ids=[security_group_id],\n encrypt_inter_container_traffic=True,\n )\n\n inputs = sagemaker_session.upload_data(path=DATA_PATH, key_prefix=\"integ-test-data/tf_iris\")\n hyperparameter_ranges = {\"learning_rate\": ContinuousParameter(0.05, 0.2)}\n\n objective_metric_name = \"loss\"\n metric_definitions = [{\"Name\": \"loss\", \"Regex\": \"loss = ([0-9\\\\.]+)\"}]\n\n tuner = HyperparameterTuner(\n estimator,\n objective_metric_name,\n hyperparameter_ranges,\n metric_definitions,\n objective_type=\"Minimize\",\n max_jobs=2,\n max_parallel_jobs=2,\n )\n\n tuning_job_name = unique_name_from_base(\"tune-tf\", max_length=32)\n with timeout(minutes=TUNING_DEFAULT_TIMEOUT_MINUTES):\n tuner.fit(inputs, job_name=tuning_job_name)\n\n print(\"Started hyperparameter tuning job with name:\" + tuning_job_name)\n\n time.sleep(15)\n tuner.wait()\n\n\n@pytest.mark.canary_quick\ndef test_tuning_chainer(sagemaker_session, cpu_instance_type):\n with timeout(minutes=TUNING_DEFAULT_TIMEOUT_MINUTES):\n script_path = os.path.join(DATA_DIR, \"chainer_mnist\", \"mnist.py\")\n data_path = os.path.join(DATA_DIR, \"chainer_mnist\")\n\n estimator = Chainer(\n entry_point=script_path,\n role=\"SageMakerRole\",\n py_version=PYTHON_VERSION,\n train_instance_count=1,\n train_instance_type=cpu_instance_type,\n sagemaker_session=sagemaker_session,\n hyperparameters={\"epochs\": 1},\n )\n\n train_input = estimator.sagemaker_session.upload_data(\n path=os.path.join(data_path, \"train\"), key_prefix=\"integ-test-data/chainer_mnist/train\"\n )\n test_input = estimator.sagemaker_session.upload_data(\n path=os.path.join(data_path, \"test\"), key_prefix=\"integ-test-data/chainer_mnist/test\"\n )\n\n hyperparameter_ranges = {\"alpha\": ContinuousParameter(0.001, 0.005)}\n\n objective_metric_name = \"Validation-accuracy\"\n metric_definitions = [\n {\n \"Name\": \"Validation-accuracy\",\n \"Regex\": r\"\\[J1\\s+\\d\\.\\d+\\s+\\d\\.\\d+\\s+\\d\\.\\d+\\s+(\\d\\.\\d+)\",\n }\n ]\n\n tuner = HyperparameterTuner(\n estimator,\n objective_metric_name,\n hyperparameter_ranges,\n metric_definitions,\n max_jobs=2,\n max_parallel_jobs=2,\n )\n\n tuning_job_name = unique_name_from_base(\"chainer\", max_length=32)\n tuner.fit({\"train\": train_input, \"test\": test_input}, job_name=tuning_job_name)\n\n print(\"Started hyperparameter tuning job with name:\" + tuning_job_name)\n\n time.sleep(15)\n tuner.wait()\n\n best_training_job = tuner.best_training_job()\n with timeout_and_delete_endpoint_by_name(best_training_job, sagemaker_session):\n predictor = tuner.deploy(1, cpu_instance_type)\n\n batch_size = 100\n data = np.zeros((batch_size, 784), dtype=\"float32\")\n output = predictor.predict(data)\n assert len(output) == batch_size\n\n data = np.zeros((batch_size, 1, 28, 28), dtype=\"float32\")\n output = predictor.predict(data)\n assert len(output) == batch_size\n\n data = np.zeros((batch_size, 28, 28), dtype=\"float32\")\n output = predictor.predict(data)\n assert len(output) == batch_size\n\n\n@pytest.mark.canary_quick\n@pytest.mark.skip(\n reason=\"This test has always failed, but the failure was masked by a bug. \"\n \"This test should be fixed. Details in https://github.com/aws/sagemaker-python-sdk/pull/968\"\n)\ndef test_attach_tuning_pytorch(sagemaker_session, cpu_instance_type):\n mnist_dir = os.path.join(DATA_DIR, \"pytorch_mnist\")\n mnist_script = os.path.join(mnist_dir, \"mnist.py\")\n\n estimator = PyTorch(\n entry_point=mnist_script,\n role=\"SageMakerRole\",\n train_instance_count=1,\n py_version=PYTHON_VERSION,\n train_instance_type=cpu_instance_type,\n sagemaker_session=sagemaker_session,\n )\n\n with timeout(minutes=TUNING_DEFAULT_TIMEOUT_MINUTES):\n objective_metric_name = \"evaluation-accuracy\"\n metric_definitions = [\n {\"Name\": \"evaluation-accuracy\", \"Regex\": r\"Overall test accuracy: (\\d+)\"}\n ]\n hyperparameter_ranges = {\"batch-size\": IntegerParameter(50, 100)}\n\n tuner = HyperparameterTuner(\n estimator,\n objective_metric_name,\n hyperparameter_ranges,\n metric_definitions,\n max_jobs=2,\n max_parallel_jobs=2,\n early_stopping_type=\"Auto\",\n )\n\n training_data = estimator.sagemaker_session.upload_data(\n path=os.path.join(mnist_dir, \"training\"),\n key_prefix=\"integ-test-data/pytorch_mnist/training\",\n )\n\n tuning_job_name = unique_name_from_base(\"pytorch\", max_length=32)\n tuner.fit({\"training\": training_data}, job_name=tuning_job_name)\n\n print(\"Started hyperparameter tuning job with name:\" + tuning_job_name)\n\n time.sleep(15)\n tuner.wait()\n\n endpoint_name = tuning_job_name\n model_name = \"model-name-1\"\n attached_tuner = HyperparameterTuner.attach(\n tuning_job_name, sagemaker_session=sagemaker_session\n )\n assert attached_tuner.early_stopping_type == \"Auto\"\n\n with timeout_and_delete_endpoint_by_name(endpoint_name, sagemaker_session):\n predictor = attached_tuner.deploy(\n 1, cpu_instance_type, endpoint_name=endpoint_name, model_name=model_name\n )\n best_training_job = tuner.best_training_job()\n with timeout_and_delete_endpoint_by_name(best_training_job, sagemaker_session):\n predictor = attached_tuner.deploy(1, cpu_instance_type)\n data = np.zeros(shape=(1, 1, 28, 28), dtype=np.float32)\n predictor.predict(data)\n\n batch_size = 100\n data = np.random.rand(batch_size, 1, 28, 28).astype(np.float32)\n output = predictor.predict(data)\n\n assert output.shape == (batch_size, 10)\n _assert_model_name_match(sagemaker_session.sagemaker_client, endpoint_name, model_name)\n\n\n@pytest.mark.canary_quick\ndef test_tuning_byo_estimator(sagemaker_session, cpu_instance_type):\n \"\"\"Use Factorization Machines algorithm as an example here.\n\n First we need to prepare data for training. We take standard data set, convert it to the\n format that the algorithm can process and upload it to S3.\n Then we create the Estimator and set hyperparamets as required by the algorithm.\n Next, we can call fit() with path to the S3.\n Later the trained model is deployed and prediction is called against the endpoint.\n Default predictor is updated with json serializer and deserializer.\n \"\"\"\n image_name = registry(sagemaker_session.boto_session.region_name) + \"/factorization-machines:1\"\n training_data_path = os.path.join(DATA_DIR, \"dummy_tensor\")\n\n with timeout(minutes=TUNING_DEFAULT_TIMEOUT_MINUTES):\n data_path = os.path.join(DATA_DIR, \"one_p_mnist\", \"mnist.pkl.gz\")\n pickle_args = {} if sys.version_info.major == 2 else {\"encoding\": \"latin1\"}\n\n with gzip.open(data_path, \"rb\") as f:\n train_set, _, _ = pickle.load(f, **pickle_args)\n\n prefix = \"test_byo_estimator\"\n key = \"recordio-pb-data\"\n s3_train_data = sagemaker_session.upload_data(\n path=training_data_path, key_prefix=os.path.join(prefix, \"train\", key)\n )\n\n estimator = Estimator(\n image_name=image_name,\n role=\"SageMakerRole\",\n train_instance_count=1,\n train_instance_type=cpu_instance_type,\n sagemaker_session=sagemaker_session,\n )\n\n estimator.set_hyperparameters(\n num_factors=10, feature_dim=784, mini_batch_size=100, predictor_type=\"binary_classifier\"\n )\n\n hyperparameter_ranges = {\"mini_batch_size\": IntegerParameter(100, 200)}\n\n tuner = HyperparameterTuner(\n estimator=estimator,\n objective_metric_name=\"test:binary_classification_accuracy\",\n hyperparameter_ranges=hyperparameter_ranges,\n max_jobs=2,\n max_parallel_jobs=2,\n )\n\n tuner.fit(\n {\"train\": s3_train_data, \"test\": s3_train_data},\n include_cls_metadata=False,\n job_name=unique_name_from_base(\"byo\", 32),\n )\n\n print(\"Started hyperparameter tuning job with name:\" + tuner.latest_tuning_job.name)\n\n time.sleep(15)\n tuner.wait()\n\n best_training_job = tuner.best_training_job()\n with timeout_and_delete_endpoint_by_name(best_training_job, sagemaker_session):\n predictor = tuner.deploy(1, cpu_instance_type, endpoint_name=best_training_job)\n predictor.serializer = _fm_serializer\n predictor.content_type = \"application/json\"\n predictor.deserializer = json_deserializer\n\n result = predictor.predict(train_set[0][:10])\n\n assert len(result[\"predictions\"]) == 10\n for prediction in result[\"predictions\"]:\n assert prediction[\"score\"] is not None\n\n\n# Serializer for the Factorization Machines predictor (for BYO example)\ndef _fm_serializer(data):\n js = {\"instances\": []}\n for row in data:\n js[\"instances\"].append({\"features\": row.tolist()})\n return json.dumps(js)\n\n\ndef _assert_model_name_match(sagemaker_client, endpoint_config_name, model_name):\n endpoint_config_description = sagemaker_client.describe_endpoint_config(\n EndpointConfigName=endpoint_config_name\n )\n assert model_name == endpoint_config_description[\"ProductionVariants\"][0][\"ModelName\"]\n"
] |
[
[
"numpy.zeros",
"numpy.random.rand"
]
] |
GT4SD/moses
|
[
"2fb13dc757f82484beaae19140be335affb60c4b"
] |
[
"moses/metrics/metrics.py"
] |
[
"import warnings\n\nimport numpy as np\nfrom scipy.spatial.distance import cosine\n\nfrom .utils import compute_fragments, average_agg_tanimoto, \\\n compute_scaffolds, fingerprints, \\\n get_mol, canonic_smiles, mol_passes_filters, \\\n logP, QED, SA, NP, weight\nfrom moses.utils import mapper\nfrom .utils_fcd import get_predictions, calculate_frechet_distance\nfrom multiprocessing import Pool\nfrom moses.utils import disable_rdkit_log, enable_rdkit_log\n\n\ndef get_all_metrics(test, gen, k=[1000, 10000], n_jobs=1, gpu=-1,\n batch_size=512, test_scaffolds=None,\n ptest=None, ptest_scaffolds=None):\n '''\n Computes all available metrics between test (scaffold test) and generated sets of SMILES.\n Parameters:\n test: list of test SMILES\n gen: list of generated SMILES\n k: list with values for unique@k.\n Will calculate number of unique molecules in the first k molecules.\n n_jobs: number of workers for parallel processing\n gpu: index of GPU for FCD metric and internal diversity, -1 means use CPU\n batch_size: batch size for FCD metric\n test_scaffolds: list of scaffold test SMILES\n Will compute only on the general test set if not specified\n ptest: dict with precalculated statistics of the test set\n ptest_scaffolds: dict with precalculated statistics of the scaffold test set\n \n \n Available metrics:\n * %valid\n * %unique@k\n * Frechet ChemNet Distance (FCD)\n * Fragment similarity (Frag)\n * Scaffold similarity (Scaf)\n * Similarity to nearest neighbour (SNN)\n * Internal diversity (IntDiv)\n * Internal diversity 2: using square root of mean squared Tanimoto similarity (IntDiv2)\n * %passes filters (Filters)\n * Distribution difference for logP, SA, QED, NP, weight\n '''\n disable_rdkit_log()\n metrics = {}\n if n_jobs != 1:\n pool = Pool(n_jobs)\n else:\n pool = 1\n metrics['valid'] = fraction_valid(gen, n_jobs=n_jobs)\n gen = remove_invalid(gen, canonize=True)\n if not isinstance(k, (list, tuple)):\n k = [k]\n for _k in k:\n metrics['unique@{}'.format(_k)] = fraction_unique(gen, _k, pool)\n\n if ptest is None:\n ptest = compute_intermediate_statistics(test, n_jobs=n_jobs, gpu=gpu, batch_size=batch_size)\n if test_scaffolds is not None and ptest_scaffolds is None:\n ptest_scaffolds = compute_intermediate_statistics(test_scaffolds, n_jobs=n_jobs,\n gpu=gpu, batch_size=batch_size)\n mols = mapper(pool)(get_mol, gen)\n kwargs = {'n_jobs': pool, 'gpu': gpu, 'batch_size': batch_size}\n metrics['FCD/Test'] = FCDMetric(**kwargs)(gen=gen, ptest=ptest['FCD'])\n metrics['SNN/Test'] = SNNMetric(**kwargs)(gen=mols, ptest=ptest['SNN'])\n metrics['Frag/Test'] = FragMetric(**kwargs)(gen=mols, ptest=ptest['Frag'])\n metrics['Scaf/Test'] = ScafMetric(**kwargs)(gen=mols, ptest=ptest['Scaf'])\n if ptest_scaffolds is not None:\n metrics['FCD/TestSF'] = FCDMetric(**kwargs)(gen=gen, ptest=ptest_scaffolds['FCD'])\n metrics['SNN/TestSF'] = SNNMetric(**kwargs)(gen=mols, ptest=ptest_scaffolds['SNN'])\n metrics['Frag/TestSF'] = FragMetric(**kwargs)(gen=mols, ptest=ptest_scaffolds['Frag'])\n metrics['Scaf/TestSF'] = ScafMetric(**kwargs)(gen=mols, ptest=ptest_scaffolds['Scaf'])\n\n metrics['IntDiv'] = internal_diversity(mols, pool, gpu=gpu)\n metrics['IntDiv2'] = internal_diversity(mols, pool, gpu=gpu, p=2)\n metrics['Filters'] = fraction_passes_filters(mols, pool)\n\n # Properties\n for name, func in [('logP', logP), ('SA', SA),\n ('QED', QED), ('NP', NP),\n ('weight', weight)]:\n metrics[name] = FrechetMetric(func, **kwargs)(gen=mols, ptest=ptest[name])\n enable_rdkit_log()\n if n_jobs != 1:\n pool.terminate()\n return metrics\n\n\n\ndef compute_intermediate_statistics(smiles, n_jobs=1, gpu=-1, batch_size=512):\n '''\n The function precomputes statistics such as mean and variance for FCD, etc.\n It is useful to compute the statistics for test and scaffold test sets to\n speedup metrics calculation.\n '''\n\n if n_jobs != 1:\n pool = Pool(n_jobs)\n else:\n pool = 1\n statistics = {}\n mols = mapper(pool)(get_mol, smiles)\n kwargs = {'n_jobs': n_jobs, 'gpu': gpu, 'batch_size': batch_size}\n statistics['FCD'] = FCDMetric(**kwargs).precalc(smiles)\n statistics['SNN'] = SNNMetric(**kwargs).precalc(mols)\n statistics['Frag'] = FragMetric(**kwargs).precalc(mols)\n statistics['Scaf'] = ScafMetric(**kwargs).precalc(mols)\n for name, func in [('logP', logP), ('SA', SA),\n ('QED', QED), ('NP', NP),\n ('weight', weight)]:\n statistics[name] = FrechetMetric(func, **kwargs).precalc(mols)\n if n_jobs != 1:\n pool.terminate()\n return statistics\n\n\ndef fraction_passes_filters(gen, n_jobs=1):\n '''\n Computes the fraction of molecules that pass filters:\n * MCF\n * PAINS\n * Only allowed atoms ('C','N','S','O','F','Cl','Br','H')\n * No charges\n '''\n passes = mapper(n_jobs)(mol_passes_filters, gen)\n return np.mean(passes)\n\n\ndef internal_diversity(gen, n_jobs=1, gpu=-1, fp_type='morgan', gen_fps=None, p=1):\n '''\n Computes internal diversity as:\n 1/|A|^2 sum_{x, y in AxA} (1-tanimoto(x, y))\n '''\n if gen_fps is None:\n gen_fps = fingerprints(gen, fp_type=fp_type, n_jobs=n_jobs)\n return 1 - (average_agg_tanimoto(gen_fps, gen_fps,\n agg='mean', gpu=gpu, p=p)).mean()\n\n\ndef fraction_unique(gen, k=None, n_jobs=1, check_validity=True):\n '''\n Computes a number of unique molecules\n :param gen: list of SMILES\n :param k: compute unique@k\n :param check_validity: raises ValueError if invalid molecules are present\n '''\n if k is not None:\n if len(gen) < k:\n warnings.warn(\n \"Can't compute unique@{}. gen contains only {} molecules\".format(\n k, len(gen)))\n gen = gen[:k]\n canonic = set(mapper(n_jobs)(canonic_smiles, gen))\n if None in canonic and check_validity:\n raise ValueError(\"Invalid molecule passed to unique@k\")\n return len(canonic) / len(gen)\n\n\ndef fraction_valid(gen, n_jobs=1):\n '''\n Computes a number of valid molecules\n :param gen: list of SMILES\n '''\n gen = mapper(n_jobs)(get_mol, gen)\n return 1 - gen.count(None) / len(gen)\n\n\ndef remove_invalid(gen, canonize=True, n_jobs=1):\n '''\n Removes invalid molecules from the dataset\n '''\n if canonize:\n mols = mapper(n_jobs)(get_mol, gen)\n return [gen_ for gen_, mol in zip(gen, mols) if mol is not None]\n else:\n return [x for x in mapper(n_jobs)(canonic_smiles, gen) if\n x is not None]\n\n\nclass Metric:\n def __init__(self, n_jobs=1, gpu=-1, batch_size=512, **kwargs):\n self.n_jobs = n_jobs\n self.gpu = gpu\n self.batch_size = batch_size\n for k, v in kwargs.values():\n setattr(self, k, v)\n\n def __call__(self, test=None, gen=None, ptest=None, pgen=None):\n assert (test is None) != (ptest is None), \"specify test xor ptest\"\n assert (gen is None) != (pgen is None), \"specify gen xor pgen\"\n if ptest is None:\n ptest = self.precalc(test)\n if pgen is None:\n pgen = self.precalc(gen)\n return self.metric(ptest, pgen)\n\n def precalc(self, moleclues):\n raise NotImplementedError\n\n def metric(self, ptest, pgen):\n raise NotImplementedError\n\n\nclass FCDMetric(Metric):\n '''\n Computes Frechet ChemNet Distance\n ''' \n def precalc(self, smiles):\n if len(smiles) < 2:\n warnings.warn(\"Can't compute FCD for less than 2 molecules\")\n return np.nan\n\n chemnet_activations = get_predictions(smiles, gpu=self.gpu,\n batch_size=self.batch_size)\n mu = chemnet_activations.mean(0)\n sigma = np.cov(chemnet_activations.T)\n return {'mu': mu, 'sigma': sigma}\n \n def metric(self, ptest, pgen):\n return calculate_frechet_distance(ptest['mu'], ptest['sigma'],\n pgen['mu'], pgen['sigma'])\n\n\n\nclass SNNMetric(Metric):\n '''\n Computes average max similarities of gen SMILES to test SMILES\n '''\n def __init__(self, fp_type='morgan', **kwargs):\n self.fp_type = fp_type\n super().__init__(**kwargs)\n\n def precalc(self, mols):\n return {'fps': fingerprints(mols, n_jobs=self.n_jobs, fp_type=self.fp_type)}\n \n def metric(self, ptest, pgen):\n return average_agg_tanimoto(ptest['fps'], pgen['fps'], gpu=self.gpu)\n\n\ndef cos_distance(test_counts, gen_counts):\n '''\n Computes 1 - cosine similarity between\n dictionaries of form {name: count}. Non-present\n elements are considered zero\n '''\n if len(test_counts) == 0 or len(gen_counts) == 0:\n return np.nan\n keys = np.unique(list(test_counts.keys()) + list(gen_counts.keys()))\n test_vec = np.array([test_counts.get(k, 0) for k in keys])\n gen_vec = np.array([gen_counts.get(k, 0) for k in keys])\n return 1 - cosine(test_vec, gen_vec)\n\n\nclass FragMetric(Metric):\n def precalc(self, mols):\n return {'frag': compute_fragments(mols, n_jobs=self.n_jobs)}\n\n def metric(self, ptest, pgen):\n return cos_distance(ptest['frag'], pgen['frag'])\n\n\nclass ScafMetric(Metric):\n def precalc(self, mols):\n return {'scaf': compute_scaffolds(mols, n_jobs=self.n_jobs)}\n\n def metric(self, ptest, pgen):\n return cos_distance(ptest['scaf'], pgen['scaf'])\n\n\nclass FrechetMetric(Metric):\n def __init__(self, func=None, **kwargs):\n self.func = func\n super().__init__(**kwargs)\n \n def precalc(self, mols):\n if self.func is not None:\n values = mapper(self.n_jobs)(self.func, mols)\n else:\n values = mols\n return {'mu': np.mean(values), 'var': np.var(values)}\n \n def metric(self, ptest, pgen):\n return calculate_frechet_distance(ptest['mu'], ptest['var'],\n pgen['mu'], pgen['var'])\n"
] |
[
[
"numpy.var",
"numpy.cov",
"numpy.mean",
"scipy.spatial.distance.cosine"
]
] |
tobinsouth/privacy-preserving-synthetic-mobility-data
|
[
"fd4d1851b47e3e7304761a894b460e8345fae5db"
] |
[
"MoveSim/code/gen_data.py"
] |
[
"from evaluations import *\r\nfrom utils import *\r\nfrom tqdm import tqdm\r\n\r\ndef distance(gps1,gps2):\r\n x1,y1 = gps1\r\n x2,y2 = gps2\r\n return np.sqrt((x1-x2)**2+(y1-y2)**2 )\r\n\r\n \r\n \r\n\r\ndef gen_matrix(data='geolife'):\r\n train_data = read_data_from_file('../data/%s/real.data'%data)\r\n gps = get_gps('../data/%s/gps'%data)\r\n max_locs = len(gps[0])\r\n\r\n print('Generating Matrices...')\r\n # Essentially, the transition matrix\r\n reg1 = np.zeros([max_locs,max_locs]) \r\n for i in range(len(train_data)):\r\n line = train_data[i]\r\n for j in range(len(line)-1):\r\n reg1[line[j],line[j+1]] +=1\r\n\r\n print('Generated Transition Matrix')\r\n # Now we create a distance matrix\r\n # reg2 = np.zeros([max_locs,max_locs])\r\n # for i in tqdm(range(max_locs)):\r\n # for j in range(max_locs):\r\n # if i!=j:\r\n # reg2[i,j] = distance((gps[0][i],gps[1][i]),(gps[0][j],gps[1][j]))\r\n from scipy.spatial.distance import cdist \r\n gps_np = np.vstack([gps[0], gps[1]]).T\r\n reg2 = cdist(gps_np,gps_np)\r\n print(gps_np.shape)\r\n print('Generated distance matrix')\r\n\r\n \r\n np.save('../data/%s/M1.npy'%data,reg1)\r\n np.save('../data/%s/M2.npy'%data,reg2)\r\n\r\n print('Matrix Generation Finished')\r\n\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n \r\n"
] |
[
[
"scipy.spatial.distance.cdist"
]
] |
utayao/MedicalZooPytorch
|
[
"34d0a735cec1bf554440423b52758d67334e64bf"
] |
[
"lib/visual3D_temp/BaseWriter.py"
] |
[
"import os\n\nimport numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom lib.utils import datestr\n\n\"\"\"\nUnder construction ......\n\"\"\"\ndict_class_names = {\"iseg2017\": [\"air\", \"csf\", \"gm\", \"wm\"],\n \"mrbrains4\": [\"air\", \"csf\", \"gm\", \"wm\"],\n \"mrbrains8\": [\"air\", \"csf\", \"gm\", \"wm\", \"class5\", \"class6\", \"class7\", \"class8\"],\n \"brats2018\": [\"c1\", \"c2\", \"c3\", \"c4\", \"c5\"],\n \"covid_seg\":[\"c1\", \"c2\", \"c3\"]}\n\n\n# TODO remove tensorboard x dependency make it work just with tensorboard\nclass TensorboardWriter():\n\n def __init__(self, args):\n name_model = args.model + \"_\" + args.dataset_name + \"_\" + datestr()\n self.writer = SummaryWriter(log_dir=args.tb_log_dir, comment=name_model)\n\n # self.step = 0\n # self.mode = ''\n self.csv_train, self.csv_val = self.create_stats_files(args.save)\n self.dataset_name = args.dataset_name\n self.label_names = dict_class_names[args.dataset_name]\n self.data = {\"train\": dict((label, 0.0) for label in self.label_names),\n \"val\": dict((label, 0.0) for label in self.label_names)}\n self.data['train']['loss'] = 0.0\n self.data['val']['loss'] = 0.0\n self.data['train']['count'] = 1.0\n self.data['val']['count'] = 1.0\n\n self.data['train']['dsc'] = 0.0\n self.data['val']['dsc'] = 0.0\n\n # self.tb_writer_ftns = {\n # 'add_scalar', 'add_scalars', 'add_image', 'add_images', 'add_audio',\n # 'add_text', 'add_histogram', 'add_pr_curve', 'add_embedding'\n # }\n #\n # self.timer = datetime.now()\n\n def display_terminal(self, iter, epoch, mode='train', summary=False):\n \"\"\"\n\n :param iter: iteration or partial epoch\n :param epoch: epoch of training\n :param loss: any loss numpy\n :param mode: train or val ( for training and validation)\n :param summary: to print total statistics at the end of epoch\n \"\"\"\n if summary:\n\n info_print = \"\\n Epoch {:2d} : {} summary Loss : {:.4f} DSC : {:.4f} \".format(epoch, mode,\n self.data[mode]['loss'] /\n self.data[mode]['count'],\n self.data[mode]['dsc'] /\n self.data[mode]['count'])\n\n for i in range(len(self.label_names)):\n info_print += \" {} : {:.4f}\".format(self.label_names[i],\n self.data[mode][self.label_names[i]] / self.data[mode]['count'])\n\n print(info_print)\n else:\n\n info_print = \"partial epoch: {:.3f} Loss : {:.4f} DSC : {:.4f}\".format(iter, self.data[mode]['loss'] /\n self.data[mode]['count'],\n self.data[mode]['dsc'] /\n self.data[mode]['count'])\n\n for i in range(len(self.label_names)):\n info_print += \" {} : {:.4f}\".format(self.label_names[i],\n self.data[mode][self.label_names[i]] / self.data[mode]['count'])\n print(info_print)\n\n def create_stats_files(self, path):\n train_f = open(os.path.join(path, 'train.csv'), 'w')\n val_f = open(os.path.join(path, 'val.csv'), 'w')\n return train_f, val_f\n\n def reset(self, mode):\n self.data[mode]['dsc'] = 0.0\n self.data[mode]['loss'] = 0.0\n self.data[mode]['count'] = 1\n for i in range(len(self.label_names)):\n self.data[mode][self.label_names[i]] = 0.0\n\n def update_scores(self, iter, loss, channel_score, mode, writer_step):\n \"\"\"\n\n :param iter: iteration or partial epoch\n :param loss: any loss torch.tensor.item()\n :param channel_score: per channel score or dice coef\n :param mode: train or val ( for training and validation)\n :param writer_step: tensorboard writer step\n \"\"\"\n ## WARNING ASSUMING THAT CHANNELS IN SAME ORDER AS DICTIONARY ###########\n\n dice_coeff = np.mean(channel_score) * 100\n\n num_channels = len(channel_score)\n self.data[mode]['dsc'] += dice_coeff\n self.data[mode]['loss'] += loss\n self.data[mode]['count'] = iter + 1\n\n for i in range(num_channels):\n self.data[mode][self.label_names[i]] += channel_score[i]\n if self.writer != None:\n self.writer.add_scalar(mode + '/' + self.label_names[i], channel_score[i], global_step=writer_step)\n\n def _write_end_of_epoch(self, epoch):\n\n self.writer.add_scalars('DSC/', {'train': self.data['train']['dsc'] / self.data['train']['count'],\n 'val': self.data['val']['dsc'] / self.data['val']['count'],\n }, epoch)\n self.writer.add_scalars('Loss/', {'train': self.data['train']['loss'] / self.data['train']['count'],\n 'val': self.data['val']['loss'] / self.data['val']['count'],\n }, epoch)\n for i in range(len(self.label_names)):\n self.writer.add_scalars(self.label_names[i],\n {'train': self.data['train'][self.label_names[i]] / self.data['train']['count'],\n 'val': self.data['val'][self.label_names[i]] / self.data['train']['count'],\n }, epoch)\n\n # TODO write labels accuracies in csv files\n\n train_csv_line = 'Epoch:{:2d} Loss:{:.4f} DSC:{:.4f}'.format(epoch,\n self.data['train']['loss'] / self.data['train'][\n 'count'],\n self.data['train']['dsc'] / self.data['train'][\n 'count'])\n val_csv_line = 'Epoch:{:2d} Loss:{:.4f} DSC:{:.4f}'.format(epoch,\n self.data['val']['loss'] / self.data['val'][\n 'count'],\n self.data['val']['dsc'] / self.data['val'][\n 'count'])\n self.csv_train.write(train_csv_line + '\\n')\n self.csv_val.write(val_csv_line + '\\n')\n"
] |
[
[
"numpy.mean",
"torch.utils.tensorboard.SummaryWriter"
]
] |
zhengjw1992/focal-loss
|
[
"0b811228151005f7c90328771bcd0b5812efb47c"
] |
[
"focalloss.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nclass FocalLoss(nn.Module):\n def __init__(self, gamma=0, alpha=None, size_average=True):\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.alpha = alpha\n # if isinstance(alpha,(float,int,long)): self.alpha = torch.Tensor([alpha,1-alpha])\n if isinstance(alpha,(float,int)): self.alpha = torch.Tensor([alpha,1-alpha])\n if isinstance(alpha,list): self.alpha = torch.Tensor(alpha)\n self.size_average = size_average\n\n def forward(self, input, target):\n if input.dim()>2:\n input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W\n input = input.transpose(1,2) # N,C,H*W => N,H*W,C\n input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C\n target = target.view(-1,1)\n\n logpt = F.log_softmax(input)\n logpt = logpt.gather(1,target)\n logpt = logpt.view(-1)\n pt = Variable(logpt.data.exp())\n\n if self.alpha is not None:\n if self.alpha.type()!=input.data.type():\n self.alpha = self.alpha.type_as(input.data)\n at = self.alpha.gather(0,target.data.view(-1))\n logpt = logpt * Variable(at)\n\n loss = -1 * (1-pt)**self.gamma * logpt\n if self.size_average: return loss.mean()\n else: return loss.sum()\n"
] |
[
[
"torch.Tensor",
"torch.nn.functional.log_softmax",
"torch.autograd.Variable"
]
] |
chenynCV/SENet
|
[
"08b22d9961e3b2a6eb1b8cd25d33287d10eaddd5"
] |
[
"pre_train.py"
] |
[
"import tensorflow as tf\r\nimport numpy as np\r\nimport os\r\nfrom tensorpack import imgaug, dataset, ModelDesc, InputDesc\r\nfrom tensorpack.dataflow import (PrefetchDataZMQ, BatchData)\r\nfrom dataflow_input import MyDataFlow\r\nimport resnet_model\r\nfrom IPython import embed\r\n\r\nos.environ['CUDA_VISIBLE_DEVICES']= '0'\r\n\r\ninit_learning_rate = 0.1\r\nbatch_size = 128\r\nimage_size = 224\r\nimg_channels = 3\r\nclass_num = 365\r\n\r\nweight_decay = 1e-4\r\nmomentum = 0.9\r\n\r\ntotal_epochs = 30\r\niteration = 14089 // 1\r\n# 128 * 14089 ~ 1,803,460\r\ntest_iteration = 10\r\n\r\ndef center_loss(features, label, alfa, nrof_classes):\r\n \"\"\"Center loss based on the paper \"A Discriminative Feature Learning Approach for Deep Face Recognition\"\r\n (http://ydwen.github.io/papers/WenECCV16.pdf)\r\n \"\"\"\r\n nrof_features = features.get_shape()[1]\r\n centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,\r\n initializer=tf.constant_initializer(0), trainable=False)\r\n label = tf.reshape(label, [-1])\r\n centers_batch = tf.gather(centers, label)\r\n diff = (1 - alfa) * (centers_batch - features)\r\n centers = tf.scatter_sub(centers, label, diff)\r\n # centers = tf.nn.l2_normalize(centers, 1, 1e-10, name='centers_norm')\r\n loss = tf.reduce_mean(tf.square(features - centers_batch))\r\n return loss, centers\r\n\r\ndef focal_loss(onehot_labels, cls_preds,\r\n alpha=0.25, gamma=2.0, name=None, scope=None):\r\n \"\"\"Compute softmax focal loss between logits and onehot labels\r\n logits and onehot_labels must have same shape [batchsize, num_classes] and\r\n the same data type (float16, 32, 64)\r\n Args:\r\n onehot_labels: Each row labels[i] must be a valid probability distribution\r\n cls_preds: Unscaled log probabilities\r\n alpha: The hyperparameter for adjusting biased samples, default is 0.25\r\n gamma: The hyperparameter for penalizing the easy labeled samples\r\n name: A name for the operation (optional)\r\n Returns:\r\n A 1-D tensor of length batch_size of same type as logits with softmax focal loss\r\n \"\"\"\r\n with tf.name_scope(scope, 'focal_loss', [cls_preds, onehot_labels]) as sc:\r\n logits = tf.convert_to_tensor(cls_preds)\r\n onehot_labels = tf.convert_to_tensor(onehot_labels)\r\n\r\n precise_logits = tf.cast(logits, tf.float32) if (\r\n logits.dtype == tf.float16) else logits\r\n onehot_labels = tf.cast(onehot_labels, precise_logits.dtype)\r\n predictions = tf.nn.sigmoid(logits)\r\n predictions_pt = tf.where(tf.equal(onehot_labels, 1), predictions, 1.-predictions)\r\n # add small value to avoid 0\r\n epsilon = 1e-8\r\n alpha_t = tf.scalar_mul(alpha, tf.ones_like(onehot_labels, dtype=tf.float32))\r\n alpha_t = tf.where(tf.equal(onehot_labels, 1.0), alpha_t, 1-alpha_t)\r\n losses = tf.reduce_sum(-alpha_t * tf.pow(1. - predictions_pt, gamma) * tf.log(predictions_pt+epsilon),\r\n name=name, axis=1)\r\n return losses\r\n\r\ndef Evaluate(sess):\r\n test_acc = 0.0\r\n test_loss = 0.0\r\n\r\n for it in range(test_iteration):\r\n batch_data = next(scene_data_val)\r\n test_batch_x = batch_data['data']\r\n test_batch_y = batch_data['label']\r\n\r\n test_feed_dict = {\r\n x: test_batch_x,\r\n label: test_batch_y,\r\n learning_rate: epoch_learning_rate,\r\n training_flag: False\r\n }\r\n\r\n loss_, acc_ = sess.run([Total_loss, accuracy], feed_dict=test_feed_dict)\r\n\r\n test_loss += loss_\r\n test_acc += acc_\r\n\r\n test_loss /= test_iteration # average loss\r\n test_acc /= test_iteration # average accuracy\r\n\r\n summary = tf.Summary(value=[tf.Summary.Value(tag='test_loss', simple_value=test_loss),\r\n tf.Summary.Value(tag='test_accuracy', simple_value=test_acc)])\r\n\r\n return test_acc, test_loss, summary\r\n\r\ndef resnet_model_fn(inputs, training):\r\n \"\"\"Our model_fn for ResNet to be used with our Estimator.\"\"\"\r\n\r\n network = resnet_model.imagenet_resnet_v2(\r\n resnet_size=18, num_classes=class_num, mode='se', data_format=None)\r\n inputs= network(inputs=inputs, is_training=training)\r\n feat = tf.nn.l2_normalize(inputs, 1, 1e-10, name='feat')\r\n inputs = tf.layers.dense(inputs=inputs, units=class_num)\r\n # inputs = tf.layers.dense(inputs=feat, units=class_num)\r\n inputs = tf.identity(inputs, 'final_dense')\r\n\r\n return inputs, feat\r\n\r\n# image_size = 32, img_channels = 3, class_num = 10 in cifar10\r\nx = tf.placeholder(tf.float32, shape=[None, image_size, image_size, img_channels])\r\nlabel = tf.placeholder(tf.float32, shape=[None,])\r\none_hot_labels = tf.one_hot(indices=tf.cast(label, tf.int32), depth=class_num)\r\n\r\ntraining_flag = tf.placeholder(tf.bool)\r\nlearning_rate = tf.placeholder(tf.float32, name='learning_rate')\r\n\r\nlogits, feat = resnet_model_fn(x, training=training_flag)\r\n\r\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_labels, logits=logits))\r\nFocal_loss = tf.reduce_mean(focal_loss(one_hot_labels, logits, alpha=0.5))\r\nl2_loss = weight_decay * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])\r\nCenter_loss, Centers = center_loss(feat, tf.cast(label, dtype=tf.int32), 0.95, class_num)\r\nTotal_loss = cost + l2_loss\r\n\r\noptimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True)\r\n# Batch norm requires update_ops to be added as a train_op dependency.\r\nupdate_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\nwith tf.control_dependencies(update_ops):\r\n train_op = optimizer.minimize(Total_loss)\r\n\r\ncorrect_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_labels, 1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n\r\n# val_dir = '/data0/AIChallenger/ai_challenger_scene_validation_20170908/scene_validation_images_20170908/'\r\n# annotations = '/data0/AIChallenger/ai_challenger_scene_validation_20170908/scene_validation_annotations_20170908.json'\r\n# # a DataFlow you implement to produce [tensor1, tensor2, ..] lists from whatever sources:\r\n# df = MyDataFlow(val_dir, annotations, is_training=False, batch_size=batch_size, img_size=image_size)\r\n# # start 3 processes to run the dataflow in parallel\r\n# df = PrefetchDataZMQ(df, nr_proc=10)\r\n# df.reset_state()\r\n# scene_data_val = df.get_data()\r\n\r\ntrain_dir = '/data0/AIChallenger/data_256'\r\nannotations = '/data0/AIChallenger/data_256.json'\r\n# a DataFlow you implement to produce [tensor1, tensor2, ..] lists from whatever sources:\r\ndf = MyDataFlow(train_dir, annotations, is_training=True, batch_size=batch_size, img_size=image_size)\r\n# start 3 processes to run the dataflow in parallel\r\ndf = PrefetchDataZMQ(df, nr_proc=10)\r\ndf.reset_state()\r\nscene_data = df.get_data()\r\n\r\nsaver = tf.train.Saver(tf.global_variables())\r\n\r\nwith tf.Session() as sess:\r\n ckpt = tf.train.get_checkpoint_state('./model_pretrain')\r\n if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):\r\n print(\"loading checkpoint...\")\r\n saver.restore(sess, ckpt.model_checkpoint_path)\r\n else:\r\n sess.run(tf.global_variables_initializer())\r\n\r\n summary_writer = tf.summary.FileWriter('./logs_pretrain', sess.graph)\r\n \r\n _x = x[:, :, :, ::-1]\r\n tf.summary.image('x', _x, 4)\r\n \r\n summary_op = tf.summary.merge_all()\r\n\r\n epoch_learning_rate = init_learning_rate\r\n for epoch in range(1, total_epochs + 1):\r\n if epoch % 10 == 0 :\r\n epoch_learning_rate = epoch_learning_rate / 10\r\n\r\n train_acc = 0.0\r\n train_loss = 0.0\r\n\r\n for step in range(1, iteration + 1):\r\n batch_data = next(scene_data)\r\n batch_x = batch_data['data']\r\n batch_y = batch_data['label']\r\n\r\n train_feed_dict = {\r\n x: batch_x,\r\n label: batch_y,\r\n learning_rate: epoch_learning_rate,\r\n training_flag: True\r\n }\r\n\r\n _, batch_loss = sess.run([train_op, Total_loss], feed_dict=train_feed_dict)\r\n batch_acc = accuracy.eval(feed_dict=train_feed_dict)\r\n\r\n print(\"epoch: %d/%d, iter: %d/%d, batch_loss: %.4f, batch_acc: %.4f \\n\" % (\r\n epoch, total_epochs, step, iteration, batch_loss, batch_acc))\r\n\r\n train_loss += batch_loss\r\n train_acc += batch_acc\r\n\r\n if step % 30 == 0 :\r\n summary_str = sess.run(summary_op, feed_dict=train_feed_dict)\r\n summary_writer.add_summary(summary=summary_str, global_step=epoch)\r\n summary_writer.flush()\r\n\r\n\r\n train_loss /= iteration # average loss\r\n train_acc /= iteration # average accuracy\r\n\r\n train_summary = tf.Summary(value=[tf.Summary.Value(tag='train_loss', simple_value=train_loss),\r\n tf.Summary.Value(tag='train_accuracy', simple_value=train_acc)])\r\n\r\n # test_acc, test_loss, test_summary = Evaluate(sess)\r\n\r\n summary_writer.add_summary(summary=train_summary, global_step=epoch)\r\n # summary_writer.add_summary(summary=test_summary, global_step=epoch)\r\n summary_writer.flush()\r\n\r\n # line = \"epoch: %d/%d, train_loss: %.4f, train_acc: %.4f, test_loss: %.4f, test_acc: %.4f \\n\" % (\r\n # epoch, total_epochs, train_loss, train_acc, test_loss, test_acc)\r\n line = \"epoch: %d/%d, train_loss: %.4f, train_acc: %.4f \\n\" % (\r\n epoch, total_epochs, train_loss, train_acc) \r\n print(line)\r\n\r\n with open('./logs_pretrain/logs.txt', 'a') as f:\r\n f.write(line)\r\n\r\n saver.save(sess=sess, save_path='./model_pretrain/model.ckpt')\r\n"
] |
[
[
"tensorflow.convert_to_tensor",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.control_dependencies",
"tensorflow.cast",
"tensorflow.global_variables",
"tensorflow.equal",
"tensorflow.nn.l2_loss",
"tensorflow.get_collection",
"tensorflow.summary.image",
"tensorflow.layers.dense",
"tensorflow.gather",
"tensorflow.train.MomentumOptimizer",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.square",
"tensorflow.trainable_variables",
"tensorflow.argmax",
"tensorflow.nn.l2_normalize",
"tensorflow.nn.sigmoid",
"tensorflow.pow",
"tensorflow.identity",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.train.checkpoint_exists",
"tensorflow.train.get_checkpoint_state",
"tensorflow.summary.FileWriter",
"tensorflow.reshape",
"tensorflow.ones_like",
"tensorflow.constant_initializer",
"tensorflow.Summary.Value",
"tensorflow.log",
"tensorflow.scatter_sub"
]
] |
yushun9897/OpenPNM
|
[
"031894a53650c3193d767b0460c8ea2e66799825"
] |
[
"openpnm/utils/misc.py"
] |
[
"import copy\nimport json\nimport inspect\nimport warnings\nimport functools\nimport numpy as _np\nimport scipy as _sp\nimport scipy.sparse\nimport time as _time\nfrom collections import OrderedDict\nfrom docrep import DocstringProcessor\n\n\nclass Docorator(DocstringProcessor):\n\n __instance__ = None\n\n def __new__(cls, *args, **kwargs):\n if Docorator.__instance__ is None:\n Docorator.__instance__ = DocstringProcessor()\n return Docorator.__instance__\n\n\nclass PrintableList(list):\n r\"\"\"\n Simple subclass of ``list`` that has nice printing. Only works flat lists.\n\n Example\n -------\n >>> from openpnm.utils import PrintableList\n >>> temp = ['item1', 'item2', 'item3']\n >>> print(PrintableList(temp))\n ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――\n 1 : item1\n 2 : item2\n 3 : item3\n ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――\n\n Each line contains the result of ``print(item)`` on each item in the list\n\n \"\"\"\n\n def __str__(self):\n horizontal_rule = \"―\" * 78\n lines = [horizontal_rule]\n self.sort()\n for i, item in enumerate(self):\n lines.append(\"{0:<5s} : {1}\".format(str(i + 1), item))\n lines.append(horizontal_rule)\n return \"\\n\".join(lines)\n\n def __repr__(self):\n self.sort()\n return super().__repr__()\n\n\nclass PrintableDict(OrderedDict):\n r\"\"\"\n Simple subclass of ``dict`` that has nicer printing.\n\n Example\n -------\n >>> from openpnm.utils import PrintableDict\n >>> from numpy import array as arr\n >>> d = {'item1': 1, 'item2': '1', 'item3': [1, 1], 'item4': arr([1, 1])}\n >>> print(PrintableDict(d))\n ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――\n key value\n ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――\n item1 1\n item2 1\n item3 [1, 1]\n item4 (2,)\n ――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――\n\n If the item is a Numpy array the value column will contain the items'\n shape, otherwise it will contain the result of ``print(item)``\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._value = \"value\"\n self._key = \"key\"\n super().__init__(*args, **kwargs)\n\n def __repr__(self):\n text = dict(self).__str__()\n return text\n\n def __str__(self):\n header = \"―\" * 78\n lines = [header, \"{0:<35s} {1}\".format(self._key, self._value), header]\n for item in list(self.keys()):\n if item.startswith('_'):\n continue\n if isinstance(self[item], _np.ndarray):\n lines.append(\"{0:<35s} {1}\".format(item, _np.shape(self[item])))\n else:\n lines.append(\"{0:<35s} {1}\".format(item, self[item]))\n lines.append(header)\n return \"\\n\".join(lines)\n\n\nclass SettingsDict(PrintableDict):\n r\"\"\"\n The SettingsDict implements the __missing__ magic method, which returns\n None instead of KeyError. This is useful for checking the value of a\n settings without first ensuring it exists.\n\n Examples\n --------\n >>> from openpnm.utils import SettingsDict\n >>> sd = SettingsDict()\n >>> sd['test'] = True\n >>> print(sd['test'])\n True\n >>> print(sd['not_a_valid_key'])\n None\n\n \"\"\"\n __doc__ = ''\n\n def __setitem__(self, key, value):\n try:\n json.dumps(value)\n except TypeError:\n raise Exception('Only serializable objects can be stored in settings')\n super().__setitem__(key, value)\n\n def __missing__(self, key):\n self[key] = None\n return self[key]\n\n def _update_settings_and_docs(self, dc):\n if isinstance(dc, type): # If dc is class then instantiate it\n dc = dc()\n self.__doc__ = dc.__doc__\n # if dc is a dataclass object. This step is only necessary to support\n # Python 3.6 which doesn't have the dataclasses module\n if hasattr(dc, '__dict__'):\n dc = copy.deepcopy(dc.__dict__)\n else:\n dc = copy.deepcopy(dc)\n for item in dc.keys():\n self[item] = dc[item]\n\n\nclass GenericSettings:\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for item in dir(self):\n if not item.startswith('__'):\n self.__dict__[item] = getattr(self, item)\n\n\nclass SubDict(dict):\n def __getitem__(self, key):\n for item in self.keys():\n if item.endswith('.' + key):\n key = item\n return super().__getitem__(key)\n\n\nclass NestedDict(dict):\n def __init__(self, mapping={}, delimiter=\"/\"):\n super().__init__()\n self.delimiter = delimiter\n self.update(mapping)\n self.unravel()\n\n def __setitem__(self, key, value):\n path = key.split(self.delimiter, 1)\n if len(path) > 1:\n if path[0] not in self.keys():\n self[path[0]] = NestedDict(delimiter=self.delimiter)\n self[path[0]][path[1]] = value\n else:\n super().__setitem__(key, value)\n\n def __missing__(self, key):\n self[key] = NestedDict(delimiter=self.delimiter)\n return self[key]\n\n def unravel(self):\n for item in self.keys():\n self[item] = self.pop(item)\n\n def to_dict(self, dct=None):\n if dct is None:\n dct = self\n plain_dict = dict()\n for key in dct.keys():\n value = dct[key]\n if hasattr(value, \"keys\"):\n plain_dict[key] = self.to_dict(value)\n else:\n plain_dict[key] = value\n return plain_dict\n\n def keys(self, dicts=True, values=True):\n k = list(super().keys())\n new_keys = []\n for item in k:\n if hasattr(self[item], \"keys\"):\n if dicts:\n new_keys.append(item)\n else:\n if values:\n new_keys.append(item)\n return new_keys\n\n def __str__(self):\n def print_level(self, p=\"\", indent=\"-\"):\n for item in self.keys():\n if hasattr(self[item], \"keys\"):\n p = print_level(self[item], p=p, indent=indent + indent[0])\n elif indent[-1] != \" \":\n indent = indent + \"\"\n p = indent + item + \"\\n\" + p\n return p\n\n p = print_level(self)\n return p\n\n\nclass HealthDict(PrintableDict):\n r\"\"\"\n This class adds a 'health' check to a standard dictionary. This check\n looks into the dict values, and considers empty lists as healthy and all\n else as unhealthy. If one or more entries is 'unhealthy' the health method\n returns False.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def _get_health(self):\n health = True\n for item in list(self.keys()):\n try:\n if len(self[item]) > 0:\n health = False\n except TypeError:\n if self[item]:\n health = False\n return health\n\n health = property(fget=_get_health)\n\n\ndef tic():\n r\"\"\"\n Homemade version of matlab tic and toc function, tic starts or resets\n the clock, toc reports the time since the last call of tic.\n\n See Also\n --------\n toc\n\n \"\"\"\n global _startTime_for_tictoc\n _startTime_for_tictoc = _time.time()\n\n\ndef toc(quiet=False):\n r\"\"\"\n Homemade version of matlab tic and toc function, tic starts or resets\n the clock, toc reports the time since the last call of tic.\n\n Parameters\n ----------\n quiet : Boolean\n If False (default) then a message is output to the console. If True\n the message is not displayed and the elapsed time is returned.\n\n See Also\n --------\n tic\n\n \"\"\"\n if \"_startTime_for_tictoc\" in globals():\n t = _time.time() - _startTime_for_tictoc\n if quiet is False:\n print(f\"Elapsed time in seconds: {t:0.2f}\")\n else:\n return t\n else:\n raise Exception(\"Start time not set, call tic first\")\n\n\ndef unique_list(input_list):\n r\"\"\"\n For a given list (of points) remove any duplicates\n\n \"\"\"\n output_list = []\n if len(input_list) > 0:\n dim = _np.shape(input_list)[1]\n for i in input_list:\n match = False\n for j in output_list:\n if dim == 3:\n if i[0] == j[0] and i[1] == j[1] and i[2] == j[2]:\n match = True\n elif dim == 2:\n if i[0] == j[0] and i[1] == j[1]:\n match = True\n elif dim == 1:\n if i[0] == j[0]:\n match = True\n if match is False:\n output_list.append(i)\n return output_list\n\n\ndef flat_list(input_list):\n r\"\"\"\n Given a list of nested lists of arbitrary depth, returns a single level or\n 'flat' list.\n\n \"\"\"\n x = input_list\n if isinstance(x, list):\n return [a for i in x for a in flat_list(i)]\n else:\n return [x]\n\n\ndef sanitize_dict(input_dict):\n r\"\"\"\n Given a nested dictionary, ensures that all nested dicts are normal\n Python dicts. This is necessary for pickling, or just converting\n an 'auto-vivifying' dict to something that acts normal.\n\n \"\"\"\n plain_dict = dict()\n for key in input_dict.keys():\n value = input_dict[key]\n if hasattr(value, \"keys\"):\n plain_dict[key] = sanitize_dict(value)\n else:\n plain_dict[key] = value\n return plain_dict\n\n\ndef methods_to_table(obj):\n r\"\"\"\n \"\"\"\n parent = obj.__class__.__mro__[1]\n temp = inspect.getmembers(parent, predicate=inspect.isroutine)\n parent_funcs = [i[0] for i in temp if not i[0].startswith(\"_\")]\n\n temp = inspect.getmembers(obj.__class__, predicate=inspect.isroutine)\n obj_funcs = [i[0] for i in temp if not i[0].startswith(\"_\")]\n funcs = set(obj_funcs).difference(set(parent_funcs))\n\n row = \"+\" + \"-\" * 22 + \"+\" + \"-\" * 49 + \"+\"\n fmt = \"{0:1s} {1:20s} {2:1s} {3:47s} {4:1s}\"\n lines = []\n lines.append(row)\n lines.append(fmt.format(\"|\", \"Method\", \"|\", \"Description\", \"|\"))\n lines.append(row.replace(\"-\", \"=\"))\n for i, item in enumerate(funcs):\n try:\n s = getattr(obj, item).__doc__.strip()\n end = s.find(\"\\n\")\n if end > 47:\n s = s[:44] + \"...\"\n lines.append(fmt.format(\"|\", item, \"|\", s[:end], \"|\"))\n lines.append(row)\n except AttributeError:\n pass\n return \"\\n\".join(lines)\n\n\ndef models_to_table(obj, params=True):\n r\"\"\"\n Converts a ModelsDict object to a ReST compatible table\n\n Parameters\n ----------\n obj : OpenPNM object\n Any object that has a ``models`` attribute\n\n params : boolean\n Indicates whether or not to include a list of parameter\n values in the table. Set to False for just a list of models, and\n True for a more verbose table with all parameter values.\n\n \"\"\"\n if not hasattr(obj, \"models\"):\n raise Exception(\"Received object does not have any models\")\n row = \"+\" + \"-\" * 4 + \"+\" + \"-\" * 22 + \"+\" + \"-\" * 18 + \"+\" + \"-\" * 26 + \"+\"\n fmt = \"{0:1s} {1:2s} {2:1s} {3:20s} {4:1s} {5:16s} {6:1s} {7:24s} {8:1s}\"\n lines = []\n lines.append(row)\n lines.append(\n fmt.format(\"|\", \"#\", \"|\", \"Property Name\", \"|\", \"Parameter\", \"|\", \"Value\", \"|\")\n )\n lines.append(row.replace(\"-\", \"=\"))\n for i, item in enumerate(obj.models.keys()):\n prop = item\n if len(prop) > 20:\n prop = item[:17] + \"...\"\n temp = obj.models[item].copy()\n model = str(temp.pop(\"model\")).split(\" \")[1]\n lines.append(\n fmt.format(\"|\", str(i + 1), \"|\", prop, \"|\", \"model:\", \"|\", model, \"|\")\n )\n lines.append(row)\n if params:\n for param in temp.keys():\n p1 = param\n if len(p1) > 16:\n p1 = p1[:14] + \"...\"\n p2 = str(temp[param])\n if len(p2) > 24:\n p2 = p2[:21] + \"...\"\n lines.append(fmt.format(\"|\", \"\", \"|\", \"\", \"|\", p1, \"|\", p2, \"|\"))\n lines.append(row)\n return \"\\n\".join(lines)\n\n\ndef catch_module_not_found(function):\n r\"\"\"\n A decorator that wraps the passed in function and catches\n ModuleNotFound exception.\n \"\"\"\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n try:\n return function(*args, **kwargs)\n except ModuleNotFoundError:\n pass\n return wrapper\n\n\ndef ignore_warnings(warning=RuntimeWarning):\n r\"\"\"\n Decorator for catching warnings. Useful in pore-scale models where nans\n are inevitable, and numpy gets annoying by throwing lots of RuntimeWarnings.\n\n Parameters\n ----------\n warning : Python Warning object\n Python warning type that you want to temporarily ignore\n\n Examples\n --------\n >>> from openpnm.utils.misc import ignore_warnings\n >>> @ignore_warnings()\n ... def myfun(x):\n ... return 1/x\n\n >>> import numpy as np\n >>> x = np.arange(5)\n >>> myfun(x)\n array([ inf, 1. , 0.5 , 0.33333333, 0.25 ])\n\n \"\"\"\n\n def _ignore_warning(function):\n @functools.wraps(function)\n def __ignore_warning(*args, **kwargs):\n with warnings.catch_warnings(record=True):\n # Catch all warnings of this type\n warnings.simplefilter(\"always\", warning)\n # Execute the function\n result = function(*args, **kwargs)\n return result\n\n return __ignore_warning\n\n return _ignore_warning\n\n\ndef conduit_lengths(network, throats=None, mode=\"pore\"):\n r\"\"\"\n Return the respective lengths of the conduit components defined by the throat\n conns P1 - T - P2\n\n Notes\n -----\n mode = 'pore' - uses pore coordinates\n mode = 'centroid' uses pore and throat centroids\n\n \"\"\"\n if throats is None:\n throats = network.throats()\n Ps = network[\"throat.conns\"]\n pdia = network[\"pore.diameter\"]\n Lt = network[\"throat.length\"]\n\n if mode == \"centroid\":\n try:\n pcentroids = network[\"pore.centroid\"]\n tcentroids = network[\"throat.centroid\"]\n if _np.sum(_np.isnan(pcentroids)) + _np.sum(_np.isnan(tcentroids)) > 0:\n mode = \"pore\"\n else:\n plen1 = (\n _np.sqrt(_np.sum(_np.square(pcentroids[Ps[:, 0]] - tcentroids), 1))\n - Lt / 2\n )\n plen2 = (\n _np.sqrt(_np.sum(_np.square(pcentroids[Ps[:, 1]] - tcentroids), 1))\n - Lt / 2\n )\n except KeyError:\n mode = \"pore\"\n if mode == \"pore\":\n # Find half-lengths of each pore\n pcoords = network[\"pore.coords\"]\n # Find the pore-to-pore distance, minus the throat length\n lengths = (\n _np.sqrt(_np.sum(_np.square(pcoords[Ps[:, 0]] - pcoords[Ps[:, 1]]), 1)) - Lt\n )\n lengths[lengths < 0.0] = 2e-9\n # Calculate the fraction of that distance from the first pore\n try:\n fractions = pdia[Ps[:, 0]] / (pdia[Ps[:, 0]] + pdia[Ps[:, 1]])\n # Don't allow zero lengths\n # fractions[fractions == 0.0] = 0.5\n # fractions[fractions == 1.0] = 0.5\n except Exception:\n fractions = 0.5\n plen1 = lengths * fractions\n plen2 = lengths * (1 - fractions)\n\n return _np.vstack((plen1, Lt, plen2)).T[throats]\n\n\ndef is_symmetric(a, rtol=1e-10):\n r\"\"\"\n Is ``a`` a symmetric matrix?\n\n Parameters\n ----------\n a : ndarray, sparse matrix\n Object to check for being a symmetric matrix.\n\n rtol : float\n Relative tolerance with respect to the smallest entry in ``a`` that\n is used to determine if ``a`` is symmetric.\n\n Returns\n -------\n bool\n ``True`` if ``a`` is a symmetric matrix, ``False`` otherwise.\n\n \"\"\"\n if not isinstance(a, _np.ndarray) and not _sp.sparse.issparse(a):\n raise Exception(\"'a' must be either a sparse matrix or an ndarray.\")\n if a.shape[0] != a.shape[1]:\n raise Exception(\"'a' must be a square matrix.\")\n\n atol = _np.amin(_np.absolute(a.data)) * rtol\n if _sp.sparse.issparse(a):\n issym = False if ((a - a.T) > atol).nnz else True\n elif isinstance(a, _np.ndarray):\n issym = False if _np.any((a - a.T) > atol) else True\n\n return issym\n\n\ndef is_valid_propname(propname):\n r\"\"\"\n Check if ``propname`` is a valid OpenPNM propname, i.e. starts with\n 'pore.' or 'throat.'\n\n Parameters\n ----------\n propname : str\n Property name to check whether it's a valid OpenPNM propname.\n\n Returns\n -------\n bool\n Whether or not ``propname`` is a valid name\n\n \"\"\"\n if not isinstance(propname, str):\n return False\n temp = propname.split(\".\")\n if temp[0] not in [\"pore\", \"throat\"]:\n return False\n if len(temp) == 1:\n return False\n for field in temp:\n if len(field) == 0:\n return False\n return True\n\n\ndef nbr_to_str(nbr, t_precision):\n r\"\"\"\n Converts a scalar into a string in scientific (exponential) notation\n without the decimal point.\n\n Parameters\n ----------\n nbr : scalar\n The number to be converted into a scalar.\n\n t_precision : integer\n The time precision (number of decimal places). Default value is 12.\n\n \"\"\"\n from decimal import Decimal as dc\n n = int(-dc(str(round(nbr, t_precision))).as_tuple().exponent\n * (round(nbr, t_precision) != int(nbr)))\n nbr_str = (str(int(round(nbr, t_precision) * 10**n)) + (f'e-{n}') * (n != 0))\n return nbr_str\n"
] |
[
[
"numpy.square",
"numpy.absolute",
"scipy.sparse.issparse",
"numpy.isnan",
"numpy.shape",
"numpy.any",
"numpy.vstack"
]
] |
martin-hunt/nglview
|
[
"9adbf974705e69b90ec1d99161ba4a64b665b80c"
] |
[
"nglview/tests/test_widget.py"
] |
[
"from __future__ import print_function\nimport os\nimport sys\nfrom itertools import chain\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\nfrom mock import patch, MagicMock\nimport gzip\nimport time\nimport unittest\nimport pytest\nfrom numpy.testing import assert_almost_equal as aa_eq\nimport numpy as np\nimport traitlets\nfrom ipykernel.comm import Comm\nimport ipywidgets\nfrom ipywidgets import Widget, IntText, BoundedFloatText, HBox, Layout, Button\nfrom traitlets import TraitError\nimport ipywidgets as widgets\nfrom traitlets import TraitError, link\nfrom IPython import display\nimport nglview as nv\nfrom nglview import NGLWidget\nfrom nglview import widget_utils\nfrom nglview.utils.py_utils import PY2, PY3, click, submit\nfrom nglview import js_utils\nfrom nglview.representation import RepresentationControl\nfrom nglview.utils.py_utils import encode_base64, decode_base64\nfrom nglview import interpolate\n\nfrom make_dummy_comm import * # to initialize\n\ntry:\n import simpletraj\n has_simpletraj = True\nexcept ImportError:\n has_simpletraj = False\n\ntry:\n import pytraj as pt\n has_pytraj = True\nexcept ImportError:\n has_pytraj = False\n\ntry:\n import mdtraj as md\n has_mdtraj = True\nexcept ImportError:\n has_mdtraj = False\ntry:\n import parmed as pmd\n has_parmed = True\nexcept ImportError:\n has_parmed = False\n\ntry:\n import rdkit\n from rdkit import Chem\n from rdkit.Chem import AllChem\n has_rdkit = True\nexcept ImportError:\n rdkit = AllChem = None\n has_rdkit = False\n\ntry:\n import MDAnalysis\n has_MDAnalysis = True\nexcept ImportError:\n has_MDAnalysis = False\n\ntry:\n import htmd\n has_HTMD = True\nexcept ImportError:\n has_HTMD = False\n\ntry:\n import ase\n has_ase = True\nexcept ImportError:\n has_ase = False\n\ntry:\n import Bio.PDB\n has_bio = True\nexcept ImportError:\n has_bio = False\n\n# local\nfrom utils import get_fn, repr_dict as REPR_DICT\n\n\ndef default_view():\n traj = pt.load(nv.datafiles.TRR, nv.datafiles.PDB)\n return nv.show_pytraj(traj)\n\n\n#-----------------------------------------------------------------------------\n# NGLView stuff\n#-----------------------------------------------------------------------------\n\nDEFAULT_REPR = [{\n 'params': {\n 'sele': 'polymer'\n },\n 'type': 'cartoon'\n}, {\n 'params': {\n 'sele': 'hetero OR mol'\n },\n 'type': 'ball+stick'\n}, {\n \"type\": \"ball+stick\",\n \"params\": {\n \"sele\": \"not protein and not nucleic\"\n }\n}]\n\n\ndef _assert_dict_list_equal(listdict0, listdict1):\n for (dict0, dict1) in zip(listdict0, listdict1):\n for (key0, key1) in zip(sorted(dict0.keys()), sorted(dict1.keys())):\n assert key0 == key1\n assert dict0.get(key0) == dict1.get(key1)\n\n\ndef test_API_promise_to_have():\n\n # for Jupyter notebook extension\n nv._jupyter_nbextension_paths()\n\n view = nv.demo()\n\n # Structure\n structure = nv.Structure()\n structure.get_structure_string\n assert hasattr(structure, 'id')\n assert hasattr(structure, 'ext')\n assert hasattr(structure, 'params')\n\n # Widget\n nv.NGLWidget._set_coordinates\n\n nv.NGLWidget.add_component\n nv.NGLWidget.add_trajectory\n nv.NGLWidget._coordinates_dict\n nv.NGLWidget.set_representations\n nv.NGLWidget.clear\n nv.NGLWidget.center\n\n # add component\n view.add_component('rcsb://1tsu.pdb')\n view.add_pdbid('1tsu')\n\n # display\n js_utils.clean_error_output()\n display.display(view.player.widget_repr)\n view.player._display()\n view._display_image()\n\n # show\n try:\n nv.show_pdbid('1tsu')\n except:\n pass\n nv.show_url('https://dummy.pdb')\n # other backends will be tested in other sections\n\n # constructor\n ngl_traj = nv.PyTrajTrajectory(pt.datafiles.load_ala3())\n nv.NGLWidget(ngl_traj, parameters=dict(background_color='black'))\n nv.NGLWidget(ngl_traj, representations=[dict(type='cartoon', params={})])\n\n view.parameters\n view.camera\n view.camera = 'perspective'\n view._request_stage_parameters()\n view._ngl_repr_dict = REPR_DICT\n view._handle_repr_dict_changed(dict(new=dict(c0={})))\n\n # dummy\n class DummWidget():\n value = ''\n\n view.player.picked_widget = DummWidget()\n\n view._update_background_color(change=dict(new='blue'))\n view.on_update_dragged_file(change=dict(new=2, old=1))\n view.on_update_dragged_file(change=dict(new=1, old=1))\n tab = view.player._display()\n\n view.player.widget_repr = view.player._make_widget_repr()\n view._handle_n_components_changed(change=dict(new=2, old=1))\n view._handle_n_components_changed(change=dict(new=1, old=1))\n view._handle_n_components_changed(change=dict(new=1, old=0))\n view.on_loaded(change=dict(new=True))\n view.on_loaded(change=dict(new=False))\n view._refresh_render()\n view.sync_view()\n\n view._first_time_loaded = False\n view._ipython_display_()\n view._first_time_loaded = True\n view._ipython_display_()\n view._init_gui = True\n view._ipython_display_()\n view._theme = 'dark'\n view._ipython_display_()\n\n view.display(gui=True)\n view.display(gui=False)\n view.display(gui=True, use_box=True)\n view._set_draggable(True)\n view._set_draggable(False)\n view._set_sync_frame()\n view._set_sync_camera()\n view._set_spin([0, 1, 0], 0.5)\n view._set_selection('.CA')\n view.color_by('atomindex')\n representations = [dict(type='cartoon', params=dict())]\n view.representations = representations\n repr_parameters = dict(opacity=0.3, params=dict())\n view.update_representation(parameters=repr_parameters)\n view._remove_representation()\n view.clear()\n view.add_representation('surface', selection='*', useWorker=True)\n view.add_representation('surface', selection='*', component=1)\n view.center()\n view._hold_image = True\n view._on_render_image(change=dict(new=u'xyz'))\n view._hold_image = False\n view._on_render_image(change=dict(new=u'xyz'))\n view.render_image()\n view.render_image(frame=2)\n view.download_image()\n\n msg = dict(type='request_frame', data=dict())\n view._ngl_handle_msg(view, msg=msg, buffers=[])\n msg = dict(type='repr_parameters', data=dict(name='hello'))\n view._ngl_handle_msg(view, msg=msg, buffers=[])\n view.loaded = True\n msg = dict(type='request_loaded', data=True)\n view._ngl_handle_msg(view, msg=msg, buffers=[])\n view.loaded = False\n msg = dict(type='request_loaded', data=True)\n view._ngl_handle_msg(view, msg=msg, buffers=[])\n msg = dict(type='all_reprs_info', data=REPR_DICT)\n view._ngl_handle_msg(view, msg=msg, buffers=[])\n msg = dict(type='stage_parameters', data=dict())\n view._ngl_handle_msg(view, msg=msg, buffers=[])\n # test negative frame (it will be set to self.count - 1)\n view.frame = -1\n msg = dict(type='request_frame', data=dict())\n\n view.loaded = True\n view.show_only([\n 0,\n ])\n view._js_console()\n view._get_full_params()\n # view._set_place_proxy(HBox())\n\n # iter\n for c in view:\n assert isinstance(c, nv.widget.ComponentViewer)\n\n\n@unittest.skipUnless(has_pytraj, 'skip if not having pytraj')\n@unittest.skipUnless(has_mdtraj, 'skip if not having mdtraj')\ndef test_add_trajectory():\n view = nv.NGLWidget()\n\n def update_coords(view=view):\n view.frame = 1000\n view.frame = 0\n\n p_traj = pt.load(nv.datafiles.TRR, nv.datafiles.PDB)\n view.add_trajectory(p_traj)\n m_traj = md.load(nv.datafiles.XTC, top=nv.datafiles.PDB)\n view.add_trajectory(m_traj)\n # trigger updating coordinates\n update_coords()\n assert len(view._coordinates_dict.keys()) == 2\n if has_MDAnalysis:\n from MDAnalysis import Universe\n mda_traj = Universe(nv.datafiles.PDB, nv.datafiles.TRR)\n view.add_trajectory(mda_traj)\n update_coords()\n assert len(view._coordinates_dict.keys()) == 3\n if has_HTMD:\n from htmd import Molecule\n htmd_traj = Molecule(nv.datafiles.PDB)\n htmd_traj.filter('protein')\n view.add_trajectory(htmd_traj)\n update_coords()\n if has_MDAnalysis:\n assert len(view._coordinates_dict.keys()) == 4\n else:\n assert len(view._coordinates_dict.keys()) == 3\n\n\ndef test_API_promise_to_have_add_more_backend():\n @nv.register_backend('dummy')\n class MyLovelyClass(nv.Structure, nv.Trajectory):\n pass\n\n assert 'dummy' in nv.BACKENDS\n\n\ndef test_handling_n_components_changed():\n view = nv.NGLWidget()\n n_traj = nv.PyTrajTrajectory(pt.load(nv.datafiles.PDB))\n view.add_trajectory(n_traj)\n # fake updating n_components and _repr_dict from front-end\n view._ngl_repr_dict = REPR_DICT\n view.n_components = 1\n view.player.widget_repr = view.player._make_widget_repr()\n view.remove_component(n_traj.id)\n # fake updating n_components from front-end\n view._ngl_repr_dict = {'c0': {}}\n view.n_components = 0\n\n\ndef test_base_adaptor():\n # abstract base class\n def func_0():\n nv.Structure().get_structure_string()\n\n def func_1():\n nv.Trajectory().get_coordinates(1)\n\n def func_2():\n nv.Trajectory().n_frames\n\n pytest.raises(NotImplementedError, func_0)\n pytest.raises(NotImplementedError, func_1)\n pytest.raises(NotImplementedError, func_2)\n\n\ndef test_coordinates_dict():\n traj = pt.load(nv.datafiles.TRR, nv.datafiles.PDB)\n view = nv.show_pytraj(traj)\n view.frame = 1\n coords = view._coordinates_dict[0]\n aa_eq(coords, traj[1].xyz)\n\n # dummy\n view._send_binary = False\n view._coordinates_dict = {0: coords}\n # increase coverage for IndexError: make index=1000 (which is larger than n_frames)\n view._set_coordinates(1000)\n\n\ndef test_load_data():\n view = nv.show_pytraj(pt.datafiles.load_tz2())\n\n # load blob with ext\n blob = open(nv.datafiles.PDB).read()\n view._load_data(blob, ext='pdb')\n\n # raise if passing blob but does not provide ext\n with pytest.raises(ValueError):\n view._load_data(blob)\n\n # raise if passing dummy name\n with pytest.raises(NameError):\n view._load_data(hahahaha)\n\n # load PyTrajectory\n t0 = nv.PyTrajTrajectory(pt.datafiles.load_ala3())\n view._load_data(t0)\n\n # load current folder\n view._load_data(get_fn('tz2.pdb'))\n\n\ndef test_representations():\n view = nv.show_pytraj(pt.datafiles.load_tz2())\n view.representations = DEFAULT_REPR\n view.add_cartoon()\n representations_2 = DEFAULT_REPR[:]\n representations_2.append({'type': 'cartoon', 'params': {'sele': 'all'}})\n _assert_dict_list_equal(view.representations, representations_2)\n\n # Representations\n # make fake params\n try:\n view._ngl_repr_dict = {'c0': {'0': {'parameters': {}}}}\n except (KeyError, TraitError):\n # in real application, we are not allowed to assign values\n pass\n\n view._ngl_repr_dict = REPR_DICT\n representation_widget = RepresentationControl(view, 0, 0)\n representation_widget\n representation_widget._on_parameters_changed(change=dict(new=dict()))\n\n\ndef test_representation_control():\n view = nv.demo()\n repr_control = view._display_repr()\n\n repr_control.name = 'surface'\n repr_control.name = 'cartoon'\n repr_control.repr_index = 1\n repr_control.component_index = 1\n\n\ndef test_add_repr_shortcut():\n view = nv.show_pytraj(pt.datafiles.load_tz2())\n assert isinstance(view, nv.NGLWidget), 'must be instance of NGLWidget'\n\n # add\n view.add_cartoon(color='residueindex')\n view.add_rope(color='red')\n\n # update\n view.update_cartoon(opacity=0.4)\n view.update_rope(coor='blue')\n\n # remove\n view.remove_cartoon()\n view.remove_rope()\n\n\ndef test_color_scheme():\n view = nv.demo()\n scheme = nv.color._ColorScheme([\n ['red', '1-6'],\n ['yellow', '20-30'],\n \"hey\"\n ])\n view.clear()\n view.add_cartoon(color=scheme)\n\n\ndef test_add_new_shape():\n view = nv.NGLWidget()\n sphere = ('sphere', [0, 0, 9], [1, 0, 0], 1.5)\n arrow = ('arrow', [1, 2, 7], [30, 3, 3], [1, 0, 1], 1.0)\n view._add_shape([sphere, arrow], name='my_shape')\n\n # Shape\n view.shape.add_arrow([1, 2, 7], [30, 3, 3], [1, 0, 1], 1.0)\n\n\ndef test_add_buffer():\n view = nv.NGLWidget()\n view\n kwargs = {\n \"position\": [0, 0, 0, 1, 1, 1],\n \"color\": [1, 0, 0, 255, 0, 0],\n \"radius\": [1., 2.]\n }\n\n view.shape.add_buffer('sphere', **kwargs)\n\n\ndef test_add_new_shape():\n view = nv.demo()\n view\n view.stage.set_parameters(mousePreset='default')\n\n\ndef test_remote_call():\n # how to test JS?\n view = nv.show_pytraj(pt.datafiles.load_tz2())\n view._remote_call('centerView', target='stage')\n\n fn = 'notebooks/tz2.pdb'\n kwargs = {'defaultRepresentation': True}\n view._remote_call(\n 'loadFile', target='stage', args=[\n fn,\n ], kwargs=kwargs)\n\n\ndef test_download_image():\n \"\"\"just make sure it can be called\n \"\"\"\n view = nv.show_pytraj(pt.datafiles.load_tz2())\n view.download_image('myname.png', 2, False, False, True)\n\n\ndef test_show_structure_file():\n view = nv.show_structure_file(nv.datafiles.PDB)\n\n\ndef test_show_file():\n view = nv.show_file(nv.datafiles.PDB)\n\n\ndef test_show_text():\n text = open(nv.datafiles.PDB).read()\n nv.show_text(text)\n\n\n@unittest.skipUnless(has_ase, 'skip if not having ase')\ndef test_show_ase():\n from ase import Atom, Atoms\n dimer = Atoms([Atom('X', (0, 0, 0)), Atom('X', (0, 0, 1))])\n dimer.set_positions([(1, 2, 3), (4, 5, 6.2)])\n nv.show_ase(dimer)\n\n\n@unittest.skipUnless(has_bio, 'skip if not having biopython')\ndef test_show_biopython():\n from Bio.PDB import PDBParser\n parser = PDBParser()\n structure = parser.get_structure('protein', nv.datafiles.PDB)\n nv.show_biopython(structure)\n\n\n@unittest.skipUnless(has_simpletraj, 'skip if not having simpletraj')\ndef test_show_simpletraj():\n traj = nv.SimpletrajTrajectory(nv.datafiles.XTC, nv.datafiles.GRO)\n view = nv.show_simpletraj(traj)\n view\n view.frame = 3\n\n\n@unittest.skipUnless(has_mdtraj, 'skip if not having mdtraj')\ndef test_show_mdtraj():\n import mdtraj as md\n from mdtraj.testing import get_fn\n fn = nv.datafiles.PDB\n traj = md.load(fn)\n view = nv.show_mdtraj(traj)\n\n\n@unittest.skipUnless(has_HTMD, 'skip if not having HTMD')\ndef test_show_htmd():\n from htmd import Molecule\n fn = nv.datafiles.PDB\n traj = Molecule(fn)\n view = nv.show_htmd(traj)\n # trigger updating cooridnates\n view.frame = 100\n index = 0\n view.frame = index\n xyz_htmd = np.squeeze(traj.coords[:, :, index])\n aa_eq(view._coordinates_dict[0], xyz_htmd)\n\n\n@unittest.skipUnless(has_MDAnalysis, 'skip if not having MDAnalysis')\ndef test_show_MDAnalysis():\n from MDAnalysis import Universe\n tn, fn = nv.datafiles.PDB, nv.datafiles.PDB\n u = Universe(fn, tn)\n view = nv.show_mdanalysis(u)\n\n\n@unittest.skipUnless(has_parmed, 'skip if not having ParmEd')\ndef test_show_parmed():\n import parmed as pmd\n fn = nv.datafiles.PDB\n parm = pmd.load_file(fn)\n view = nv.show_parmed(parm)\n\n ngl_traj = nv.ParmEdTrajectory(parm)\n ngl_traj.only_save_1st_model = False\n ngl_traj.get_structure_string()\n\n\n@unittest.skipUnless(has_rdkit, 'must have rdkit')\ndef test_show_rdkit():\n rdkit_mol = Chem.AddHs(\n Chem.MolFromSmiles(\n 'COc1ccc2[C@H](O)[C@@H](COc2c1)N3CCC(O)(CC3)c4ccc(F)cc4'))\n AllChem.EmbedMultipleConfs(\n rdkit_mol, useExpTorsionAnglePrefs=True, useBasicKnowledge=True)\n view = nv.show_rdkit(rdkit_mol, parmed=False)\n assert not view._trajlist\n view = nv.show_rdkit(rdkit_mol, parmed=True)\n assert view._trajlist\n\n view = nv.RdkitStructure(rdkit_mol)\n\n\ndef test_encode_and_decode():\n xyz = np.arange(100).astype('f4')\n shape = xyz.shape\n\n b64_str = encode_base64(xyz)\n new_xyz = decode_base64(b64_str, dtype='f4', shape=shape)\n aa_eq(xyz, new_xyz)\n\n\n@unittest.skipUnless(has_MDAnalysis, 'skip if not having MDAnalysis')\ndef test_coordinates_meta():\n from mdtraj.testing import get_fn\n fn, tn = [\n get_fn('frame0.pdb'),\n ] * 2\n trajs = [pt.load(fn, tn), md.load(fn, top=tn), pmd.load_file(tn, fn)]\n\n N_FRAMES = trajs[0].n_frames\n\n from MDAnalysis import Universe\n u = Universe(tn, fn)\n trajs.append(Universe(tn, fn))\n\n views = [\n nv.show_pytraj(trajs[0]),\n nv.show_mdtraj(trajs[1]),\n nv.show_parmed(trajs[2])\n ]\n views.append(nv.show_mdanalysis(trajs[3]))\n\n for index, (view, traj) in enumerate(zip(views, trajs)):\n view.frame = 3\n\n assert view._trajlist[0].n_frames == N_FRAMES\n\n\ndef test_structure_file():\n for fn in [get_fn('tz2.pdb'), nv.datafiles.GRO]:\n content = open(fn, 'r').read()\n fs1 = nv.FileStructure(fn)\n assert content == fs1.get_structure_string()\n\n # gz\n fn = get_fn('tz2_2.pdb.gz')\n fs2 = nv.FileStructure(fn)\n content = gzip.open(fn).read()\n assert content == fs2.get_structure_string()\n\n\ndef test_camelize_parameters():\n view = nv.NGLWidget()\n view.parameters = dict(background_color='black')\n assert 'backgroundColor' in view._parameters\n\n\ndef test_component_for_duck_typing():\n view = NGLWidget()\n traj = pt.load(nv.datafiles.PDB)\n view.add_component(get_fn('tz2.pdb'))\n view.add_component(get_fn('tz2_2.pdb.gz'))\n view.add_trajectory(nv.PyTrajTrajectory(traj))\n view.component_0.add_representation('cartoon')\n\n c0 = view[0]\n c1 = view[1]\n assert hasattr(view, 'component_0')\n assert hasattr(view, 'component_1')\n assert hasattr(view, 'trajectory_0')\n assert hasattr(view.trajectory_0, 'n_frames')\n assert hasattr(view.trajectory_0, 'get_coordinates')\n assert hasattr(view.trajectory_0, 'get_structure_string')\n\n c0.show()\n c0.hide()\n\n view.remove_component(c0.id)\n assert not hasattr(view, 'component_2')\n\n # negative indexing\n assert view[-1]._index == c1._index\n\n\ndef test_trajectory_show_hide_sending_cooridnates():\n view = NGLWidget()\n\n traj0 = pt.datafiles.load_tz2()\n traj1 = pt.datafiles.load_trpcage()\n\n view.add_trajectory(nv.PyTrajTrajectory(traj0))\n view.add_trajectory(nv.PyTrajTrajectory(traj1))\n\n for traj in view._trajlist:\n assert traj.shown\n\n view.frame = 1\n\n def copy_coordinate_dict(view):\n # make copy to avoid memory free\n return dict((k, v.copy()) for k, v in view._coordinates_dict.items())\n\n coordinates_dict = copy_coordinate_dict(view)\n aa_eq(coordinates_dict[0], traj0[1].xyz)\n aa_eq(coordinates_dict[1], traj1[1].xyz)\n\n # hide 0\n view.hide([\n 0,\n ])\n assert not view._trajlist[0].shown\n assert view._trajlist[1].shown\n\n # update frame so view can update its coordinates\n view.frame = 2\n coordinates_dict = copy_coordinate_dict(view)\n assert coordinates_dict[0].shape[0] == 0\n aa_eq(coordinates_dict[1], traj1[2].xyz)\n\n # hide 0, 1\n view.hide([0, 1])\n assert not view._trajlist[0].shown\n assert not view._trajlist[1].shown\n view.frame = 3\n coordinates_dict = copy_coordinate_dict(view)\n assert coordinates_dict[0].shape[0] == 0\n assert coordinates_dict[1].shape[0] == 0\n\n # slicing, show only component 1\n view[1].show()\n view.frame = 0\n assert not view._trajlist[0].shown\n assert view._trajlist[1].shown\n coordinates_dict = copy_coordinate_dict(view)\n assert coordinates_dict[0].shape[0] == 0\n aa_eq(coordinates_dict[1], traj1[0].xyz)\n\n # show all\n view[1].show()\n view[0].show()\n view.show(indices='all')\n view.show(indices=[\n 0,\n ])\n view.show(indices=[0, 1])\n view.frame = 1\n assert view._trajlist[1].shown\n coordinates_dict = copy_coordinate_dict(view)\n aa_eq(coordinates_dict[0], traj0[1].xyz)\n aa_eq(coordinates_dict[1], traj1[1].xyz)\n\n # hide all\n view[1].hide()\n view[0].hide()\n view.frame = 2\n assert not view._trajlist[0].shown\n assert not view._trajlist[1].shown\n coordinates_dict = copy_coordinate_dict(view)\n assert coordinates_dict[0].shape[0] == 0\n assert coordinates_dict[1].shape[0] == 0\n\n\ndef test_existing_js_files():\n from glob import glob\n jsfiles = glob(os.path.join(os.path.dirname(nv.__file__), 'static', '*js'))\n mapfiles = glob(\n os.path.join(os.path.dirname(nv.__file__), 'static', '*map'))\n\n assert len(jsfiles) == 2\n assert len(mapfiles) == 1\n\n\ndef test_add_structure():\n view = nv.NGLWidget()\n with pytest.raises(ValueError):\n # raise if not is instance of nv.Structure\n view.add_structure(nv.datafiles.PDB)\n\n\ndef test_add_struture_then_trajectory():\n view = nv.show_structure_file(get_fn('tz2.pdb'))\n view.loaded = True\n traj = pt.datafiles.load_trpcage()\n view.add_trajectory(traj)\n view.frame = 3\n coords = view._coordinates_dict[1].copy()\n aa_eq(coords, traj[3].xyz)\n view.loaded = False\n view.add_trajectory(traj)\n\n\ndef test_loaded_attribute():\n traj = pt.datafiles.load_tz2()\n structure = nv.FileStructure(nv.datafiles.PDB)\n\n # False, empty constructor\n view = nv.NGLWidget()\n view.loaded = False\n view.add_structure(structure)\n view.add_trajectory(traj)\n view._ipython_display_()\n\n # False, constructor with a single Structure\n view = nv.NGLWidget(structure)\n view.loaded = False\n view.add_trajectory(traj)\n view._ipython_display_()\n\n # True\n view = nv.NGLWidget()\n view.loaded = True\n view.add_structure(structure)\n view.add_trajectory(traj)\n view._ipython_display_()\n\n # False then True, empty constructor\n view = nv.NGLWidget()\n view.loaded = False\n view.add_structure(structure)\n view.loaded = True\n view.add_trajectory(traj)\n view._ipython_display_()\n\n # False then True, constructor with a Trajectory\n view = nv.NGLWidget(nv.PyTrajTrajectory(traj))\n view.loaded = False\n view.add_structure(structure)\n view.loaded = True\n view.add_trajectory(traj)\n view._ipython_display_()\n\n\ndef test_player_simple():\n traj = pt.datafiles.load_tz2()\n view = nv.show_pytraj(traj)\n assert not view.player.sync_frame\n\n # dummy\n component_slider = ipywidgets.IntSlider()\n repr_slider = ipywidgets.IntSlider()\n\n # dummy test\n player = nv.player.TrajectoryPlayer(view)\n player.smooth()\n player.camera = 'perspective'\n player.camera = 'orthographic'\n player.frame\n player.frame = 10\n player.count\n player.sync_frame = False\n player.sync_frame = True\n player.parameters = dict(step=2)\n player._display()\n player._make_button_center()\n player._make_button_theme()\n player._make_button_reset_theme()\n w = player._make_widget_preference()\n w.children[0].value = 1.\n player.widget_preference = None\n w = player._make_widget_preference()\n w.children[0].value = 1.\n player._show_download_image()\n player._make_button_url('dummy_url', description='dummy_url')\n player._show_website()\n player._make_button_qtconsole()\n player._make_text_picked()\n player._refresh(component_slider, repr_slider)\n player._make_widget_repr()\n player._make_resize_notebook_slider()\n player._make_button_export_image()\n player._make_repr_playground()\n player._make_drag_widget()\n player._make_spin_box()\n player._make_widget_picked()\n player._make_export_image_widget()\n player._make_theme_box()\n player._make_general_box()\n player._update_padding()\n player.spin = True\n player._on_spin_changed(change=dict(new=True))\n player._on_spin_x_changed(change=dict(new=1))\n player._on_spin_y_changed(change=dict(new=1))\n player._on_spin_z_changed(change=dict(new=1))\n player._on_spin_speed_changed(change=dict(new=0.5))\n player._spin_x = 2\n player._spin_y = 2\n player._spin_z = 2\n player.spin = False\n player._on_spin_changed(change=dict(new=True))\n player._on_spin_x_changed(change=dict(new=1))\n player._on_spin_y_changed(change=dict(new=1))\n player._on_spin_z_changed(change=dict(new=1))\n player._on_spin_speed_changed(change=dict(new=0.5))\n player._real_time_update = True\n player._make_widget_repr()\n player.widget_component_slider\n player.widget_repr_slider\n player._create_all_tabs()\n player._create_all_widgets()\n player.widget_tab = None\n player._create_all_widgets()\n player._simplify_repr_control()\n\n player._real_time_update = True\n player.widget_repr_slider.value = 0\n player.widget_repr_slider.value = 1\n slider_notebook = player._make_resize_notebook_slider()\n slider_notebook.value = 300\n\n player.widget_repr_name.value = 'surface'\n player.widget_repr_name.value = 'cartoon'\n\n\ndef test_player_submit_text():\n \"\"\" test_player_click_button \"\"\"\n view = nv.demo(gui=True)\n submit(view.player._make_command_box())\n\n\ndef test_player_click_button():\n \"\"\" test_player_click_button \"\"\"\n view = nv.demo(gui=True)\n view._ipython_display_()\n view._ngl_repr_dict = REPR_DICT\n view.player._create_all_widgets()\n view.player.widget_export_image = view.player._make_button_export_image()\n button_iter = chain.from_iterable([\n view.player.widget_repr_control_buttons.children,\n view.player.widget_theme.children,\n view.player.widget_drag.children,\n [\n view.player._show_download_image(),\n view.player._make_button_url(\"\", \"\"),\n view.player._make_button_center(),\n view.player._make_button_qtconsole(),\n view.player.widget_export_image.children[0].children[0],\n view.player.widget_repr_add.children[0],\n ],\n view.player.widget_drag.children,\n [\n w for w in view.player.widget_preference.children\n if isinstance(w, Button)\n ],\n ])\n for button in button_iter:\n click(button)\n\n\ndef test_player_link_to_ipywidgets():\n traj = pt.datafiles.load_tz2()\n view = nv.show_pytraj(traj)\n\n int_text = IntText(2)\n float_text = BoundedFloatText(40, min=10)\n HBox([int_text, float_text])\n link((int_text, 'value'), (view.player, 'step'))\n link((float_text, 'value'), (view.player, 'delay'))\n\n assert view.player.step == 2\n assert view.player.delay == 40\n\n float_text.value = 100\n assert view.player.delay == 100\n\n float_text.value = 0.00\n # we set min=10\n assert view.player.delay == 10\n\n\ndef test_player_interpolation():\n view = default_view()\n\n view.player.interpolate = True\n assert view.player.iparams.get('type') == 'linear'\n assert view.player.iparams.get('step') == 1\n\n\ndef test_player_picked():\n view = nv.demo()\n s = dict(x=3)\n view.player.widget_picked = view.player._make_text_picked()\n view.picked = s\n assert view.player.widget_picked.value == '{\"x\": 3}'\n\n\ndef test_layout_BoxNGL():\n view = nv.demo()\n box = nv.widget_box.BoxNGL([view])\n box._ipython_display_()\n box.layout = Layout()\n box._gui_style = 'row'\n box._gui_style = 'column'\n box._gui_style = 'row'\n\n\ndef test_widget_utils():\n box = HBox()\n i0 = IntText()\n i0._ngl_name = 'i0'\n i1 = IntText()\n i1._ngl_name = 'i1'\n box.children = [i0, i1]\n\n assert i0 is widget_utils.get_widget_by_name(box, 'i0')\n assert i1 is widget_utils.get_widget_by_name(box, 'i1')\n\n box.children = [i1, i0]\n assert i0 is widget_utils.get_widget_by_name(box, 'i0')\n assert i1 is widget_utils.get_widget_by_name(box, 'i1')\n\n assert widget_utils.get_widget_by_name(box, 'i100') is None\n assert widget_utils.get_widget_by_name(None, 'i100') is None\n\n\ndef test_adaptor_raise():\n with pytest.raises(ValueError):\n nv.FileStructure('hellotheredda.pdb')\n\n\ndef test_theme():\n from nglview import theme\n theme.oceans16()\n theme.reset()\n theme._get_theme('oceans16.css')\n\n\ndef test_player_click_tab():\n view = nv.demo()\n gui = view.player._display()\n assert isinstance(gui, ipywidgets.Tab)\n\n for i, child in enumerate(gui.children):\n try:\n gui.selected_index = i\n assert isinstance(child, ipywidgets.Box)\n except TraitError:\n pass\n\n\ndef test_interpolate():\n # dummy test\n traj = pt.datafiles.load_tz2()\n ngl_traj = nv.PyTrajTrajectory(traj)\n interpolate.linear(0, 0.4, ngl_traj, step=1)\n\n\ndef dummy_test_to_increase_coverage():\n nv.__version__\n\n\ndef test_widget_box():\n # empty\n box = nv.widget_box.BoxNGL()\n try:\n box.layout = Layout()\n except traitlets.TraitError:\n pass\n box._update_size()\n view = nv.demo()\n box = nv.widget_box.BoxNGL([view])\n box._update_size()\n\n box._is_beautified = True\n box._beautify()\n box._is_beautified = False\n box._beautify()\n\n\ndef test_viewer_control():\n view = nv.demo()\n view\n\n mat = [11, 12, 13, 14, 21, 22, 23, 24, 31, 32, 33, 34, 41, 42, 43, 44]\n\n vector = [0, 1, 2]\n\n view.control.align(mat)\n view.control.rotate(mat)\n view.control.translate(vector)\n view.control.apply_matrix(mat)\n view.control.center(vector)\n view.control.orient(mat)\n view.control.zoom(0.3)\n view.control.rotate(mat)\n view.control.spin(vector, 0.1)\n\n\ndef test_ambermd():\n from nglview.sandbox import amber\n with patch(\"pytraj.load\") as mock_pytraj_load, \\\n patch(\"pytraj.superpose\") as mock_pytraj_superpose, \\\n patch(\"os.path.exists\") as mock_exists:\n ambermd = amber.AmberMD(\n top='hey.parm7', restart='hey.rst7', reference='hey.ref')\n view = ambermd.initialize()\n ambermd.event = MagicMock()\n ambermd.event.is_set = MagicMock()\n ambermd.event.is_set.return_value = False\n ambermd.update(timeout=2)\n time.sleep(5)\n\n assert mock_pytraj_load.called\n assert mock_pytraj_superpose.called\n\n ambermd.stop()\n\n\ndef test_queuing_messages():\n view = nv.NGLWidget()\n view.add_component(nv.datafiles.PDB)\n view.download_image()\n view\n assert [f._method_name for f in view._ngl_displayed_callbacks_before_loaded] == \\\n ['setUnSyncCamera', 'setSelector', 'setUnSyncFrame', 'setDelay',\n 'loadFile',\n '_downloadImage']\n assert [f._method_name for f in view._ngl_displayed_callbacks_after_loaded] == \\\n ['loadFile']\n\n # display 2nd time\n view.sync_view()\n assert [f._method_name for f in view._ngl_displayed_callbacks_after_loaded] == \\\n ['loadFile']\n\n\n@patch('nglview.NGLWidget._unset_serialization')\ndef test_write_html(mock_unset):\n traj0 = pt.datafiles.load_trpcage()\n traj1 = pt.datafiles.load_tz2()\n view = nv.NGLWidget()\n view.add_trajectory(traj0)\n view.add_trajectory(traj1)\n view\n fp = StringIO()\n nv.write_html(fp, [view], frame_range=(0, 3))\n mock_unset.assert_called_with()\n assert len(view._ngl_coordinate_resource[0]) == 3\n assert len(view._ngl_coordinate_resource[1]) == 3\n"
] |
[
[
"numpy.testing.assert_almost_equal",
"numpy.squeeze",
"numpy.arange"
]
] |
huixiancheng/SENet
|
[
"d9f2339c689d695d3ab304c601423c845a5dc543"
] |
[
"train.py"
] |
[
"#!/usr/bin/env python3\n# This file is covered by the LICENSE file in the root of this project.\n\nimport argparse\nimport shutil\nfrom shutil import copyfile\nimport yaml\nimport os\nfrom modules.trainer import Trainer\n\n\nif __name__ == '__main__':\n\n def seed_torch(seed=1024):\n import random\n import torch\n import os\n import numpy as np\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.\n# torch.backends.cudnn.deterministic = True\n print(\"We use the seed: {}\".format(seed))\n seed_torch()\n\n parser = argparse.ArgumentParser(\"./train.py\")\n parser.add_argument(\n '--dataset', '-d',\n type=str,\n required=True,\n default=None,\n help='Dataset to train with. No Default',\n )\n parser.add_argument(\n '--arch_cfg', '-ac',\n type=str,\n required=False,\n default='config/arch/senet-512.yml',\n help='Architecture yaml cfg file. See /config/arch for sample. No default!',\n )\n parser.add_argument(\n '--data_cfg', '-dc',\n type=str,\n required=False,\n default='config/labels/semantic-kitti.yaml',\n help='Classification yaml cfg file. See /config/labels for sample. No default!',\n )\n parser.add_argument(\n '--log', '-l',\n type=str,\n default='logs',\n help='Directory to put the log data. Default: ~/logs/date+time'\n )\n parser.add_argument(\n '--name', '-n',\n type=str,\n default=None,\n help='If you want to give an aditional discriptive name'\n )\n parser.add_argument(\n '--pretrained', '-p',\n type=str,\n required=False,\n default=None,\n help='Directory to get the pretrained model. If not passed, do from scratch!'\n )\n\n FLAGS, unparsed = parser.parse_known_args()\n FLAGS.log = FLAGS.log + '/' + FLAGS.name\n # print summary of what we will do\n print(\"----------\")\n print(\"INTERFACE:\")\n print(\"dataset\", FLAGS.dataset)\n print(\"arch_cfg\", FLAGS.arch_cfg)\n print(\"data_cfg\", FLAGS.data_cfg)\n print(\"log\", FLAGS.log)\n print(\"pretrained\", FLAGS.pretrained)\n print(\"----------\\n\")\n\n # open arch config file\n try:\n print(\"Opening arch config file %s\" % FLAGS.arch_cfg)\n ARCH = yaml.safe_load(open(FLAGS.arch_cfg, 'r'))\n except Exception as e:\n print(e)\n print(\"Error opening arch yaml file.\")\n quit()\n\n # open data config file\n try:\n print(\"Opening data config file %s\" % FLAGS.data_cfg)\n DATA = yaml.safe_load(open(FLAGS.data_cfg, 'r'))\n except Exception as e:\n print(e)\n print(\"Error opening data yaml file.\")\n quit()\n\n # create log folder\n try:\n if os.path.isdir(FLAGS.log):\n shutil.rmtree(FLAGS.log)\n os.makedirs(FLAGS.log)\n except Exception as e:\n print(e)\n print(\"Error creating log directory. Check permissions!\")\n quit()\n\n # does model folder exist?\n if FLAGS.pretrained is not None:\n if os.path.isdir(FLAGS.pretrained):\n print(\"model folder exists! Using model from %s\" % (FLAGS.pretrained))\n else:\n print(\"model folder doesnt exist! Start with random weights...\")\n else:\n print(\"No pretrained directory found.\")\n\n # copy all files to log folder (to remember what we did, and make inference\n # easier). Also, standardize name to be able to open it later\n try:\n print(\"Copying files to %s for further reference.\" % FLAGS.log)\n copyfile(FLAGS.arch_cfg, FLAGS.log + \"/arch_cfg.yaml\")\n copyfile(FLAGS.data_cfg, FLAGS.log + \"/data_cfg.yaml\")\n except Exception as e:\n print(e)\n print(\"Error copying files, check permissions. Exiting...\")\n quit()\n\n # create trainer and start the training\n trainer = Trainer(ARCH, DATA, FLAGS.dataset, FLAGS.log, FLAGS.pretrained)\n trainer.train()\n"
] |
[
[
"torch.manual_seed",
"torch.cuda.manual_seed",
"numpy.random.seed",
"torch.cuda.manual_seed_all"
]
] |
wenh06/cinc2020
|
[
"38105ed9dac6554e2dd51b94e5553fb8ba22dbe6"
] |
[
"references/encase/code/resNet.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function, absolute_import\n\nimport tflearn\nimport tflearn.data_utils as du\n\nimport numpy as np\nimport ReadData\nimport tensorflow as tf\nfrom tensorflow.contrib import learn\nfrom tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib\nfrom sklearn.model_selection import StratifiedKFold\nimport MyEval\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef truncate_long(ts, my_len):\n if len(ts) >= my_len:\n return ts[:my_len]\n else:\n ts += [0] * (my_len - len(ts))\n return ts\n \ndef sample_long(ts, interv):\n ts1 = []\n for i in range(len(ts) // interv):\n ts1.append(ts[i * interv])\n return ts1\n\ndef read_data():\n long_pid, long_data, long_label = ReadData.ReadData( '../data1/long.csv' )\n \n \n mat1 = [truncate_long(ts, 9000) for ts in long_data]\n# mat2 = [truncate_long(ts, 6000) for ts in long_data]\n# mat3 = [truncate_long(ts, 3000) for ts in long_data]\n \n# mat4 = [sample_long(ts, 10) for ts in mat1]\n# mat5 = [sample_long(ts, 10) for ts in mat2]\n# mat6 = [sample_long(ts, 10) for ts in mat3]\n\n \n label_onehot = ReadData.Label2OneHot(long_label)\n \n# plt.plot(mat1[0])\n# plt.plot(mat4[0])\n\n all_feature = np.array(mat1, dtype=np.float32)\n all_label = np.array(label_onehot, dtype=np.float32)\n\n kf = StratifiedKFold(n_splits=5, shuffle=True)\n \n for train_index, test_index in kf.split(all_feature, long_label):\n train_data = all_feature[train_index]\n train_label = all_label[train_index]\n test_data = all_feature[test_index]\n test_label = all_label[test_index]\n break\n \n train_data = np.expand_dims(np.array(train_data, dtype=np.float32), axis=2)\n test_data = np.expand_dims(np.array(test_data, dtype=np.float32), axis=2)\n \n return train_data, train_label, test_data, test_label\n\n\ntf.reset_default_graph()\nX, Y, testX, testY = read_data()\nX = X.reshape([-1, 9000, 1])\ntestX = testX.reshape([-1, 9000, 1])\n\n# Building Residual Network\nnet = tflearn.input_data(shape=[None, 9000, 1])\nnet = tflearn.conv_1d(net, 64, 16, 2, activation='relu', bias=False)\n\n# Residual blocks\nnet = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True, is_first_block = True)\nprint(\"resn2\", net.get_shape())\n'''net = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)\nprint(\"resn4\", net.get_shape())\nnet = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)\nprint(\"resn6\", net.get_shape())\nnet = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)\nprint(\"resn8\", net.get_shape())'''\nnet = tflearn.residual_bottleneck(net, 2, 16, 64, downsample_strides = 2, downsample=True)\nprint(\"resn4\", net.get_shape())\nnet = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)\nprint(\"resn6\", net.get_shape())\nnet = tflearn.residual_bottleneck(net, 2, 16, 128, downsample_strides = 2, downsample=True)\nprint(\"resn8\", net.get_shape())\nnet = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)\nprint(\"resn10\", net.get_shape()) \nnet = tflearn.residual_bottleneck(net, 2, 16, 256, downsample_strides = 2, downsample=True)\nprint(\"resn12\", net.get_shape())\nnet = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)\nprint(\"resn14\", net.get_shape())\nnet = tflearn.residual_bottleneck(net, 2, 16, 512, downsample_strides = 2, downsample=True)\nprint(\"resn16\", net.get_shape())\nnet = tflearn.batch_normalization(net)\nnet = tflearn.activation(net, 'relu')\nnet = tflearn.global_avg_pool(net)\nprint(\"beforeDense\", net.get_shape())\n# Regression\nnet = tflearn.fully_connected(net, 2, activation='softmax')\nprint(\"dense\", net.get_shape())\nnet = tflearn.regression(net, optimizer='momentum',\n loss='categorical_crossentropy',\n learning_rate=0.1)\n# Training\nmodel = tflearn.DNN(net, checkpoint_path='model_resnet',\n max_checkpoints=10, tensorboard_verbose=0)\nmodel.fit(X, Y, n_epoch=10, validation_set=(testX, testY),\n show_metric=True, batch_size=300, run_id='resnet', snapshot_step=10,\n snapshot_epoch=False)\n\n#Predict\ny_predicted=[i for i in model.predict(testX)]\n#Calculate F1Score\nMyEval.F1Score3_num(y_predicted, testY)\n"
] |
[
[
"numpy.array",
"tensorflow.reset_default_graph",
"tensorflow.logging.set_verbosity",
"sklearn.model_selection.StratifiedKFold"
]
] |
EMBEDDIA/autoBOT
|
[
"d8ad6aef23f0ae4e47d6fffacbca73d6874123b1",
"d8ad6aef23f0ae4e47d6fffacbca73d6874123b1"
] |
[
"autoBOTLib/optimization/optimization_random.py",
"autoBOTLib/features/features_sentence_embeddings.py"
] |
[
"\"\"\"\nThis file contains code on random search across tfidf parameter space. Skrlj 2019\n\"\"\"\n\nimport logging\nlogging.basicConfig(format='%(asctime)s - %(message)s',\n datefmt='%d-%b-%y %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\nfrom feature_constructors import Normalizer, build_dataframe, get_simple_features, pipeline\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import f1_score\nimport numpy as np\nimport time\n\n# import warnings filter\nfrom warnings import simplefilter\n# ignore all future warnings\nsimplefilter(action='ignore')\n\n\ndef random_search():\n params = np.random.uniform(low=0.00001, high=1, size=2)\n return params\n\n\ndef evaluate_learner(cvar, train_features, train_targets, dev_features,\n dev_targets):\n \"\"\"\n Learner evaluation method.\n\n :param cvar: Regularization level.\n :param train_features: Train feature space.\n :param train_targets: Train target space.\n :param dev_features: Development feature space.\n :param dev_targets: Development target space.\n :return f1,clf: The F1 score and the classifier.\n \"\"\"\n\n clf = LinearSVC(C=cvar).fit(train_features, train_targets)\n predictions = clf.predict(dev_features)\n\n try:\n f1 = f1_score(dev_targets, predictions)\n\n except:\n f1 = f1_score(dev_targets, predictions, average=\"micro\")\n\n return f1, clf\n\n\ndef evolve_representation_random(train_seq,\n dev_seq,\n train_targets,\n dev_targets,\n time_constraint=1):\n \"\"\"\n Random representation exploration.\n \n :param train_seq: training sequences.\n :param dev_seq: development sequences.\n :param train_targets: training targets.\n :param dev taragets: dev targets.\n :param time_constraint: time in hours (int)\n \"\"\"\n\n copt = 0\n initial_time = time.time()\n time_diff = 0\n total_iterations = 0\n top_vectorizer = None\n top_learner = None\n mnf = 0\n while time_diff <= time_constraint:\n total_iterations += 1\n pars = random_search()\n if total_iterations % 2 == 0:\n logging.info(\n \"Best: {}, running time: {}min, iterations: {}\".format(\n copt,\n np.round(time_diff, 1) * 60, total_iterations))\n time_diff = ((time.time() - initial_time) / 60) / 60\n train_seq1 = build_dataframe(train_seq)\n dev_seq1 = build_dataframe(dev_seq)\n vectorizer, feature_names, _ = get_simple_features(train_seq1,\n max_num_feat=2048)\n svm_c = pars[0]\n if vectorizer:\n train_bow = vectorizer.transform(train_seq1)\n num_features = train_bow.shape[1]\n dev_bow = vectorizer.transform(dev_seq1)\n f1_perf, clf = evaluate_learner(svm_c, train_bow, train_targets,\n dev_bow, dev_targets)\n f1_perf = np.round(f1_perf, 3)\n if f1_perf > copt:\n copt = f1_perf\n top_vectorizer = vectorizer\n mnf = int(pars[1] * 5000)\n top_learner = clf\n logging.info(\n \"Improved performance to {}! num features: {}, running time: {}min\"\n .format(f1_perf, num_features,\n np.round(time_diff, 0) * 60))\n\n logging.info(\n \"Finished optimization with best performance: {}\".format(copt))\n auml_pip = pipeline.Pipeline([('union', top_vectorizer),\n ('scale', Normalizer()),\n ('classifier', top_learner)])\n logging.info(\"Optimization finished!\")\n return (auml_pip, mnf)\n",
"### relation extractor\n\nimport logging\nlogging.basicConfig(format='%(asctime)s - %(message)s',\n datefmt='%d-%b-%y %H:%M:%S')\nlogging.getLogger().setLevel(logging.INFO)\n\nimport pandas as pd\nimport string\nimport numpy as np\nimport tqdm\nimport multiprocessing as mp\n\nfrom scipy import sparse\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\nfrom gensim.utils import simple_preprocess\n\n\nclass documentEmbedder:\n \"\"\"\n Core class describing sentence embedding methodology employed here. \n The class functions as a sklearn-like object.\n \"\"\"\n def __init__(self,\n max_features=10000,\n num_cpu=8,\n dm=1,\n pretrained_path=\"doc2vec.bin\",\n ndim=512):\n \"\"\"The standard sgn function.\n\n :param max_features: integer, number of latent dimensions\n :param num_cpu: integer, number of CPUs to be used\n :param dm: Whether to use the \"distributed memory\" model\n :param pretrained_path: The path where a pretrained model is located (if any)\n \"\"\"\n\n self.max_features = max_features\n self.dm = dm\n self.pretrained_path = pretrained_path\n self.vocabulary = {}\n self.ndim = ndim\n self.model = None\n if num_cpu == \"all\":\n self.num_cpu = mp.cpu_count()\n\n else:\n self.num_cpu = num_cpu\n\n \n def fit(self, text_vector, b=None, refit=False):\n \"\"\"\n Fit the model to a text vector.\n :param text_vector: a list of texts\n \"\"\"\n\n if self.model is None and not refit:\n\n documents = [\n TaggedDocument(simple_preprocess(doc), [i])\n for i, doc in enumerate(text_vector.values.tolist())\n ]\n self.model = Doc2Vec(vector_size=self.ndim,\n window=5,\n min_count=1,\n workers=self.num_cpu,\n dm=self.dm)\n self.model.build_vocab(documents)\n self.model.train(documents,\n total_examples=self.model.corpus_count,\n epochs=32)\n\n \n def transform(self, text_vector):\n \"\"\"\n Transform the data into suitable form.\n :param text_vector: The text vector to be transformed via a trained model\n \"\"\"\n\n set(string.printable)\n final_matrix = np.zeros((len(text_vector), self.ndim))\n for enx, doc in enumerate(tqdm.tqdm(text_vector)):\n if len(doc) > 1:\n try:\n vector = self.model.infer_vector(simple_preprocess(doc))\n final_matrix[enx] = vector\n except:\n ## invalid inference.\n pass\n\n logging.info(\"Generated embeddings ({}) of shape {}\".format(\n self.dm, final_matrix.shape))\n\n return sparse.csr_matrix(final_matrix)\n\n \n def get_feature_names(self):\n\n return [str(x) + \"_\" + str(self.dm) for x in list(range(self.ndim))]\n\n \n def fit_transform(self, text_vector, a2=None):\n \"\"\"\n A classifc fit-transform method.\n :param text_vector: a text vector used to build and transform a corpus.\n \"\"\"\n\n self.fit(text_vector)\n return self.transform(text_vector)\n\n\nif __name__ == \"__main__\":\n\n example_text = pd.read_csv(\"../../data/dontpatronize/train.tsv\",\n sep=\"\\t\")['text_a']\n\n rex = documentEmbedder(dm=1)\n rex.fit(example_text)\n\n m = rex.transform(example_text)\n\n print(\"+\" * 100)\n m = rex.fit_transform(example_text)\n print(m)\n"
] |
[
[
"numpy.round",
"numpy.random.uniform",
"sklearn.metrics.f1_score",
"sklearn.svm.LinearSVC"
],
[
"pandas.read_csv",
"scipy.sparse.csr_matrix"
]
] |
hanyas/trajopt
|
[
"1cad9010be45851ec12fe4156ae73d9261304cb9"
] |
[
"examples/rgps/mb_lqr.py"
] |
[
"import numpy as np\n\nimport gym\n\nfrom trajopt.rgps import LRGPS\n\nfrom matplotlib import rc\nimport matplotlib.pyplot as plt\n\nimport tikzplotlib\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nrc('lines', **{'linewidth': 1})\nrc('text', usetex=True)\n\n\ndef beautify(ax):\n ax.set_frame_on(True)\n ax.minorticks_on()\n\n ax.grid(True)\n ax.grid(linestyle='--')\n\n ax.tick_params(which='both', direction='in',\n bottom=True, labelbottom=True,\n top=True, labeltop=False,\n right=True, labelright=False,\n left=True, labelleft=True)\n\n ax.tick_params(which='major', length=6)\n ax.tick_params(which='minor', length=3)\n\n # ax.autoscale(tight=True)\n # ax.set_aspect('equal')\n\n if ax.get_legend():\n ax.legend(loc='best')\n\n return ax\n\n\n# lqr task\nenv = gym.make('LQR-TO-v1')\nenv.env._max_episode_steps = 75\nenv.env.sigma0 = 1e-2 * np.eye(2)\nenv.env.sigma = 1e-4 * np.eye(2)\n\nnp.random.seed(1337)\nenv.seed(1337)\n\nrgps = LRGPS(env, nb_steps=75,\n policy_kl_bound=0.25,\n param_nominal_kl_bound=75e1,\n param_regularizer_kl_bound=10,\n init_state=env.init(),\n init_action_sigma=100.)\nrgps.run(nb_iter=100, verbose=True)\n\nnp.random.seed(1337)\nenv.seed(1337)\n\ngps = LRGPS(env, nb_steps=75,\n policy_kl_bound=0.25,\n param_nominal_kl_bound=75e1,\n param_regularizer_kl_bound=10,\n init_state=env.init(),\n init_action_sigma=100.)\ngps.run(nb_iter=100, verbose=True,\n optimize_adversary=False)\n\n# compute attack on final standard controller\ngps.param, gps.eta = gps.reguarlized_parameter_optimization(gps.ctl)\nprint(\"Disturbance KL:\", gps.parameter_nominal_kldiv(gps.param).sum())\n\nfig = plt.figure(figsize=(6, 12))\nplt.suptitle(\"Standard vs Robust Ctl: Feedback Controller\")\nfor i in range(rgps.dm_state):\n plt.subplot(rgps.dm_state + rgps.dm_act, 1, i + 1)\n plt.plot(rgps.ctl.K[0, i, ...], color='r', marker='x', markersize=2)\n plt.plot(gps.ctl.K[0, i, ...], color='b', marker='o', markersize=2)\n\nfor i in range(rgps.dm_act):\n plt.subplot(rgps.dm_state + rgps.dm_act, 1, rgps.dm_state + i + 1)\n plt.plot(rgps.ctl.kff[i, ...], color='r', marker='x', markersize=2)\n plt.plot(gps.ctl.kff[i, ...], color='b', marker='o', markersize=2)\n\naxs = fig.get_axes()\naxs = [beautify(ax) for ax in axs]\nplt.show()\n\ntikzplotlib.save(\"linear_feedback_gains.tex\")\n\nstd_xdist, std_udist, _ = gps.cubature_forward_pass(gps.ctl, gps.nominal)\nrobust_xdist, robust_udist, _ = rgps.cubature_forward_pass(rgps.ctl, rgps.nominal)\n\ncost_nom_env_std_ctl = gps.cost.evaluate(std_xdist, std_udist)\ncost_nom_env_rbst_ctl = rgps.cost.evaluate(robust_xdist, robust_udist)\n\nprint(\"Expected Cost of Standard and Robust Control on Nominal Env\")\nprint(\"Std. Ctl.: \", cost_nom_env_std_ctl, \"Rbst. Ctl.\", cost_nom_env_rbst_ctl)\n\nstd_worst_xdist, std_worst_udist, _ = gps.cubature_forward_pass(gps.ctl, gps.param)\nrobust_worst_xdist, robust_worst_udist, _ = rgps.cubature_forward_pass(rgps.ctl, gps.param)\n\ncost_adv_env_std_ctl = gps.cost.evaluate(std_worst_xdist, std_worst_udist)\ncost_adv_env_rbst_ctl = rgps.cost.evaluate(robust_worst_xdist, robust_worst_udist)\n\nprint(\"Expected Cost of Standard and Robust Control on Adverserial Env\")\nprint(\"Std. Ctl.: \", cost_adv_env_std_ctl, \"Rbst. Ctl.\", cost_adv_env_rbst_ctl)\n\nfig = plt.figure()\nplt.suptitle('Standard and Robust Control Without Adversary')\nfor k in range(rgps.dm_state):\n plt.subplot(rgps.dm_state + rgps.dm_act, 1, k + 1)\n\n t = np.linspace(0, rgps.nb_steps, rgps.nb_steps + 1)\n\n plt.plot(t, std_xdist.mu[k, :], '-b')\n lb = std_xdist.mu[k, :] - 2. * np.sqrt(std_xdist.sigma[k, k, :])\n ub = std_xdist.mu[k, :] + 2. * np.sqrt(std_xdist.sigma[k, k, :])\n plt.fill_between(t, lb, ub, color='blue', alpha=0.1)\n\nfor k in range(rgps.dm_act):\n plt.subplot(rgps.dm_state + rgps.dm_act, 1, rgps.dm_state + k + 1)\n\n t = np.linspace(0, rgps.nb_steps - 1, rgps.nb_steps)\n\n plt.plot(t, std_udist.mu[k, :], '-b')\n lb = std_udist.mu[k, :] - 2. * np.sqrt(std_udist.sigma[k, k, :])\n ub = std_udist.mu[k, :] + 2. * np.sqrt(std_udist.sigma[k, k, :])\n plt.fill_between(t, lb, ub, color='blue', alpha=0.1)\n\nfor k in range(gps.dm_state):\n plt.subplot(gps.dm_state + gps.dm_act, 1, k + 1)\n\n t = np.linspace(0, rgps.nb_steps, rgps.nb_steps + 1)\n\n plt.plot(t, robust_xdist.mu[k, :], '-r')\n lb = robust_xdist.mu[k, :] - 2. * np.sqrt(robust_xdist.sigma[k, k, :])\n ub = robust_xdist.mu[k, :] + 2. * np.sqrt(robust_xdist.sigma[k, k, :])\n plt.fill_between(t, lb, ub, color='red', alpha=0.1)\n\nfor k in range(rgps.dm_act):\n plt.subplot(gps.dm_state + gps.dm_act, 1, gps.dm_state + k + 1)\n\n t = np.linspace(0, rgps.nb_steps - 1, rgps.nb_steps)\n\n plt.plot(t, robust_udist.mu[k, :], '-r')\n lb = robust_udist.mu[k, :] - 2. * np.sqrt(robust_udist.sigma[k, k, :])\n ub = robust_udist.mu[k, :] + 2. * np.sqrt(robust_udist.sigma[k, k, :])\n plt.fill_between(t, lb, ub, color='red', alpha=0.1)\n\naxs = fig.get_axes()\naxs = [beautify(ax) for ax in axs]\nplt.show()\n\n# tikzplotlib.save(\"linear_trajectories_nominal.tex\")\n\nfig = plt.figure()\nplt.suptitle('Standard and Robust Control With Adversary')\nfor k in range(gps.dm_state):\n plt.subplot(gps.dm_state + gps.dm_act, 1, k + 1)\n\n t = np.linspace(0, rgps.nb_steps, rgps.nb_steps + 1)\n\n plt.plot(t, std_worst_xdist.mu[k, :], '-g')\n lb = std_worst_xdist.mu[k, :] - 2. * np.sqrt(std_worst_xdist.sigma[k, k, :])\n ub = std_worst_xdist.mu[k, :] + 2. * np.sqrt(std_worst_xdist.sigma[k, k, :])\n plt.fill_between(t, lb, ub, color='green', alpha=0.1)\n\nfor k in range(rgps.dm_act):\n plt.subplot(gps.dm_state + gps.dm_act, 1, gps.dm_state + k + 1)\n\n t = np.linspace(0, rgps.nb_steps - 1, rgps.nb_steps)\n\n plt.plot(t, std_worst_udist.mu[k, :], '-g')\n lb = std_worst_udist.mu[k, :] - 2. * np.sqrt(std_worst_udist.sigma[k, k, :])\n ub = std_worst_udist.mu[k, :] + 2. * np.sqrt(std_worst_udist.sigma[k, k, :])\n plt.fill_between(t, lb, ub, color='green', alpha=0.1)\n\nfor k in range(gps.dm_state):\n plt.subplot(gps.dm_state + gps.dm_act, 1, k + 1)\n\n t = np.linspace(0, rgps.nb_steps, rgps.nb_steps + 1)\n\n plt.plot(t, robust_worst_xdist.mu[k, :], '-m')\n lb = robust_worst_xdist.mu[k, :] - 2. * np.sqrt(robust_worst_xdist.sigma[k, k, :])\n ub = robust_worst_xdist.mu[k, :] + 2. * np.sqrt(robust_worst_xdist.sigma[k, k, :])\n plt.fill_between(t, lb, ub, color='magenta', alpha=0.1)\n\nfor k in range(rgps.dm_act):\n plt.subplot(gps.dm_state + gps.dm_act, 1, gps.dm_state + k + 1)\n\n t = np.linspace(0, rgps.nb_steps - 1, rgps.nb_steps)\n\n plt.plot(t, robust_worst_udist.mu[k, :], '-m')\n lb = robust_worst_udist.mu[k, :] - 2. * np.sqrt(robust_worst_udist.sigma[k, k, :])\n ub = robust_worst_udist.mu[k, :] + 2. * np.sqrt(robust_worst_udist.sigma[k, k, :])\n plt.fill_between(t, lb, ub, color='magenta', alpha=0.1)\n\naxs = fig.get_axes()\naxs = [beautify(ax) for ax in axs]\nplt.show()\n\n# tikzplotlib.save(\"linear_trajectories_adversarial.tex\")\n\nfrom trajopt.rgps.objects import MatrixNormalParameters\ninterp = MatrixNormalParameters(rgps.dm_state, rgps.dm_act, rgps.nb_steps)\n\nalphas = np.linspace(0., 2., 21)\n\ncost_adv_env_std_ctl = []\ncost_adv_env_rbst_ctl = []\nkl_distance = []\n\nfor alpha in alphas:\n print('Alpha:', alpha)\n\n interp.mu, interp.sigma = gps.interp_gauss_kl(gps.nominal.mu, gps.nominal.sigma,\n gps.param.mu, gps.param.sigma, alpha)\n\n kl_distance.append(np.sum(gps.parameter_nominal_kldiv(interp)))\n\n std_worst_xdist, std_worst_udist, _ = gps.cubature_forward_pass(gps.ctl, interp)\n robust_worst_xdist, robust_worst_udist, _ = gps.cubature_forward_pass(rgps.ctl, interp)\n\n cost_adv_env_std_ctl.append(gps.cost.evaluate(std_worst_xdist, std_worst_udist))\n cost_adv_env_rbst_ctl.append(gps.cost.evaluate(robust_worst_xdist, robust_worst_udist))\n\n print(\"Expected Cost of Standard and Robust Control on Adverserial Env\")\n print(\"Std. Ctl.: \", cost_adv_env_std_ctl[-1], \"Rbst. Ctl.\", cost_adv_env_rbst_ctl[-1])\n\nfig = plt.figure()\nplt.plot(kl_distance, cost_adv_env_std_ctl, 'b', marker='o')\nplt.xscale('log')\nplt.yscale('log')\nplt.plot(kl_distance, cost_adv_env_rbst_ctl, 'r', marker='*')\nplt.xscale('log')\nplt.yscale('log')\n\naxs = fig.gca()\naxs = beautify(axs)\nplt.show()\n\n# tikzplotlib.save(\"linear_cost_over_distance.tex\")\n\nkl_over_time = rgps.parameter_nominal_kldiv(gps.param)\n\nfig = plt.figure()\nplt.plot(kl_over_time, 'k', marker='.')\nplt.yscale('log')\n\naxs = fig.gca()\naxs = beautify(axs)\nplt.show()\n\n# tikzplotlib.save(\"linear_kl_over_time.tex\")\n"
] |
[
[
"numpy.sqrt",
"numpy.random.seed",
"numpy.linspace",
"numpy.eye",
"matplotlib.pyplot.yscale",
"matplotlib.rc",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
btw08/dask
|
[
"6e8c1b76feb12337574f40032dbd3818626b8e28"
] |
[
"dask/array/wrap.py"
] |
[
"from __future__ import absolute_import, division, print_function\n\nfrom functools import partial\nfrom itertools import product\n\nimport numpy as np\n\ntry:\n from cytoolz import curry\nexcept ImportError:\n from toolz import curry\n\nfrom ..base import tokenize\nfrom ..utils import funcname\nfrom .core import Array, normalize_chunks\nfrom .utils import meta_from_array\n\n\ndef _parse_wrap_args(func, args, kwargs, shape):\n if isinstance(shape, np.ndarray):\n shape = shape.tolist()\n\n if not isinstance(shape, (tuple, list)):\n shape = (shape,)\n\n chunks = kwargs.pop('chunks', 'auto')\n\n dtype = kwargs.pop('dtype', None)\n if dtype is None:\n dtype = func(shape, *args, **kwargs).dtype\n dtype = np.dtype(dtype)\n\n chunks = normalize_chunks(chunks, shape, dtype=dtype)\n name = kwargs.pop('name', None)\n\n name = name or funcname(func) + '-' + tokenize(func, shape, chunks, dtype, args, kwargs)\n\n return {'shape': shape, 'dtype': dtype, 'kwargs': kwargs,\n 'chunks': chunks, 'name': name}\n\n\ndef wrap_func_shape_as_first_arg(func, *args, **kwargs):\n \"\"\"\n Transform np creation function into blocked version\n \"\"\"\n if 'shape' not in kwargs:\n shape, args = args[0], args[1:]\n else:\n shape = kwargs.pop('shape')\n\n if isinstance(shape, Array):\n raise TypeError('Dask array input not supported. '\n 'Please use tuple, list, or a 1D numpy array instead.')\n\n parsed = _parse_wrap_args(func, args, kwargs, shape)\n shape = parsed['shape']\n dtype = parsed['dtype']\n chunks = parsed['chunks']\n name = parsed['name']\n kwargs = parsed['kwargs']\n\n keys = product([name], *[range(len(bd)) for bd in chunks])\n shapes = product(*chunks)\n func = partial(func, dtype=dtype, **kwargs)\n vals = ((func,) + (s,) + args for s in shapes)\n\n dsk = dict(zip(keys, vals))\n return Array(dsk, name, chunks, dtype=dtype)\n\n\ndef wrap_func_like(func, *args, **kwargs):\n \"\"\"\n Transform np creation function into blocked version\n \"\"\"\n x = args[0]\n meta = meta_from_array(x, x.ndim)\n shape = kwargs.get('shape', x.shape)\n\n parsed = _parse_wrap_args(func, args, kwargs, shape)\n shape = parsed['shape']\n dtype = parsed['dtype']\n chunks = parsed['chunks']\n name = parsed['name']\n kwargs = parsed['kwargs']\n\n keys = product([name], *[range(len(bd)) for bd in chunks])\n shapes = product(*chunks)\n shapes = list(shapes)\n kw = [kwargs for _ in shapes]\n for i, s in enumerate(list(shapes)):\n kw[i]['shape'] = s\n vals = ((partial(func, dtype=dtype, **k),) + args for (k, s) in zip(kw, shapes))\n\n dsk = dict(zip(keys, vals))\n\n return Array(dsk, name, chunks, meta=meta.astype(dtype))\n\n\ndef wrap_func_like_safe(func, func_like, *args, **kwargs):\n \"\"\"\n Safe implementation for wrap_func_like(), attempts to use func_like(),\n if the shape keyword argument, falls back to func().\n \"\"\"\n try:\n return func_like(*args, **kwargs)\n except TypeError:\n return func(*args, **kwargs)\n\n\n@curry\ndef wrap(wrap_func, func, **kwargs):\n func_like = kwargs.pop('func_like', None)\n if func_like is None:\n f = partial(wrap_func, func, **kwargs)\n else:\n f = partial(wrap_func, func_like, **kwargs)\n template = \"\"\"\n Blocked variant of %(name)s\n\n Follows the signature of %(name)s exactly except that it also requires a\n keyword argument chunks=(...)\n\n Original signature follows below.\n \"\"\"\n if func.__doc__ is not None:\n f.__doc__ = template % {'name': func.__name__} + func.__doc__\n f.__name__ = 'blocked_' + func.__name__\n return f\n\n\nw = wrap(wrap_func_shape_as_first_arg)\n\nones = w(np.ones, dtype='f8')\nzeros = w(np.zeros, dtype='f8')\nempty = w(np.empty, dtype='f8')\nfull = w(np.full)\n\n\nw_like = wrap(wrap_func_like_safe)\n\n\nones_like = w_like(np.ones, func_like=np.ones_like)\nzeros_like = w_like(np.zeros, func_like=np.zeros_like)\nempty_like = w_like(np.empty, func_like=np.empty_like)\nfull_like = w_like(np.full, func_like=np.full_like)\n"
] |
[
[
"numpy.dtype"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.