repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
ahzz1207/ALBERT-TINYBERT
[ "d7065a8c7316fd46bc6855ed238c45533e926c0e" ]
[ "utils/tf_utils.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common TF utilities.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport math\n\nimport six\nimport tensorflow as tf\n\ndef gather_indexes(sequence_tensor, positions):\n \"\"\"Gathers the vectors at the specific positions.\n Args:\n sequence_tensor: Sequence output of `BertModel` layer of shape\n (`batch_size`, `seq_length`, num_hidden) where num_hidden is number of\n hidden units of `BertModel` layer.\n positions: Positions ids of tokens in sequence to mask for pretraining of\n with dimension (batch_size, max_predictions_per_seq) where\n `max_predictions_per_seq` is maximum number of tokens to mask out and\n predict per each sequence.\n Returns:\n Masked out sequence tensor of shape (batch_size * max_predictions_per_seq,\n num_hidden).\n \"\"\"\n sequence_shape = get_shape_list(\n sequence_tensor, name='sequence_output_tensor')\n batch_size = sequence_shape[0]\n seq_length = sequence_shape[1]\n width = sequence_shape[2]\n\n flat_offsets = tf.keras.backend.reshape(\n tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n flat_positions = tf.keras.backend.reshape(positions + flat_offsets, [-1])\n flat_sequence_tensor = tf.keras.backend.reshape(\n sequence_tensor, [batch_size * seq_length, width])\n output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n\n return output_tensor\n\n\ndef gelu(x):\n \"\"\"Gaussian Error Linear Unit.\n This is a smoother version of the RELU.\n Original paper: https://arxiv.org/abs/1606.08415\n Args:\n x: float Tensor to perform activation.\n Returns:\n `x` with the GELU activation applied.\n \"\"\"\n cdf = 0.5 * (1.0 + tf.tanh(\n (math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3)))))\n return x * cdf\n\n\ndef swish(features):\n \"\"\"Computes the Swish activation function.\n The tf.nn.swish operation uses a custom gradient to reduce memory usage.\n Since saving custom gradients in SavedModel is currently not supported, and\n one would not be able to use an exported TF-Hub module for fine-tuning, we\n provide this wrapper that can allow to select whether to use the native\n TensorFlow swish operation, or whether to use a customized operation that\n has uses default TensorFlow gradient computation.\n Args:\n features: A `Tensor` representing preactivation values.\n Returns:\n The activation value.\n \"\"\"\n features = tf.convert_to_tensor(features)\n return features * tf.nn.sigmoid(features)\n\n\ndef pack_inputs(inputs, print_=False):\n \"\"\"Pack a list of `inputs` tensors to a tuple.\n\n Args:\n inputs: a list of tensors.\n\n Returns:\n a tuple of tensors. if any input is None, replace it with a special constant\n tensor.\n \"\"\"\n inputs = tf.nest.flatten(inputs)\n outputs = []\n for x in inputs:\n if x is None:\n outputs.append(tf.constant(0, shape=[], dtype=tf.int32))\n if print_:\n print(\"None\")\n else:\n if print_:\n print(x.name)\n outputs.append(x)\n return tuple(outputs)\n\n\ndef unpack_inputs(inputs):\n \"\"\"unpack a tuple of `inputs` tensors to a tuple.\n\n Args:\n inputs: a list of tensors.\n\n Returns:\n a tuple of tensors. if any input is a special constant tensor, replace it\n with None.\n \"\"\"\n inputs = tf.nest.flatten(inputs)\n outputs = []\n for x in inputs:\n if is_special_none_tensor(x):\n outputs.append(None)\n else:\n outputs.append(x)\n x = tuple(outputs)\n\n # To trick the very pointless 'unbalanced-tuple-unpacking' pylint check\n # from triggering.\n if len(x) == 1:\n return x[0]\n return tuple(outputs)\n\n\ndef is_special_none_tensor(tensor):\n \"\"\"Checks if a tensor is a special None Tensor.\"\"\"\n return tensor.shape.ndims == 0 and tensor.dtype == tf.int32\n\n\n# TODO(hongkuny): consider moving custom string-map lookup to keras api.\ndef get_activation(identifier):\n \"\"\"Maps a identifier to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n It checks string first and if it is one of customized activation not in TF,\n the corresponding activation will be returned. For non-customized activation\n names and callable identifiers, always fallback to tf.keras.activations.get.\n\n Args:\n identifier: String name of the activation function or callable.\n\n Returns:\n A Python function corresponding to the activation function.\n \"\"\"\n if isinstance(identifier, six.string_types):\n name_to_fn = {\n \"gelu\": gelu,\n \"custom_swish\": swish,\n }\n identifier = str(identifier).lower()\n if identifier in name_to_fn:\n return tf.keras.activations.get(name_to_fn[identifier])\n return tf.keras.activations.get(identifier)\n\n\ndef get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape\n\n\ndef assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n raise ValueError(\n \"For the tensor `%s`, the actual tensor rank `%d` (shape = %s) is not \"\n \"equal to the expected tensor rank `%s`\" %\n (name, actual_rank, str(tensor.shape), str(expected_rank)))\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.nn.sigmoid", "tensorflow.constant", "tensorflow.range", "tensorflow.shape", "tensorflow.pow", "tensorflow.nest.flatten", "tensorflow.gather", "tensorflow.keras.backend.reshape", "tensorflow.keras.activations.get" ] ]
HeosSacer/pyautomonkey
[ "f0863aab0e6464f50659c9ff183558739f56a772" ]
[ "pyautomonkey/image_tools.py" ]
[ "from desktopmagic.screengrab_win32 import getScreenAsImage\nimport win32gui, win32con\nfrom PIL import Image\nfrom time import sleep\nfrom timeit import default_timer as timer\nimport numpy as np\nimport cv2\n\n\ndef retrieve_image(window_name=None):\n \"\"\"\n brings specified window to foreground and returns an image of the\n whole screen.\n \"\"\"\n start = timer()\n\n toplist, winlist = [], []\n\n def enum_cb(hwnd, results):\n winlist.append((hwnd, win32gui.GetWindowText(hwnd)))\n\n win32gui.EnumWindows(enum_cb, toplist)\n\n if window_name:\n window_name = window_name.lower()\n window = [(hwnd, title) for hwnd, title in winlist if window_name in title.lower()]\n\n # just grab the hwnd for first window matching the window name\n try:\n window = window[0]\n hwnd = window[0]\n except IndexError:\n print(\"\\nERROR: Specified window with name %s not found!\\n\" % window_name)\n raise\n\n win32gui.ShowWindow(hwnd,win32con.SW_MAXIMIZE)\n win32gui.SetForegroundWindow(hwnd)\n bbox = win32gui.GetWindowRect(hwnd)\n img = getScreenAsImage()\n\n end = timer()\n\n img = np.array(img.convert('RGB'))\n img = img.astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n return img\n\n\ndef load_template(template_file_name):\n \"\"\"\n loads a template from file\n \"\"\"\n if type(template_file_name) is not list:\n template = Image.open(template_file_name).convert('RGB')\n template = np.array(template)\n # Convert RGB to BGR\n template = template.astype(np.uint8)\n template = cv2.cvtColor(template, cv2.COLOR_RGB2GRAY)\n return template\n elif type(template_file_name) is list:\n raise AttributeError(\"lists of templates are work in progress\")\n\n\ndef find_template(template, im=None, window_name=None):\n \"\"\"\n finds template on screen or specified window_name and returns xy-coordinates\n \"\"\"\n # if no image was specified, retreive an image\n im_loaded = False\n im = retrieve_image(window_name)\n # find template\n res = cv2.matchTemplate(im, template, cv2.TM_CCOEFF_NORMED)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n\n # calculate position of found picture center\n height, width = template.shape\n x_corner, y_corner = max_loc\n xy = (int(x_corner + (width/2)), int(y_corner + (height/2)))\n\n # return location and matching probability\n return xy, max_val\n" ]
[ [ "numpy.array" ] ]
Martinjoh1/Ticket-Maker
[ "71fb2004d28c6bbd4d071df8b1fc785f866bb1d1" ]
[ "ticket_scanner.py" ]
[ "import csv\nimport os\nimport os.path\nimport pandas as pd\n\n\ndef create_scanned_csv():\n csvData = ['ID']\n\n with open('scanned_tickets.csv', 'a') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(csvData)\n\n csvFile.close()\n\ndef add_to_csv(id):\n with open('scanned_tickets.csv', 'a') as csvAdd:\n writer = csv.writer(csvAdd)\n row = [id]\n writer.writerow(row)\n csvAdd.close()\n\ndef check_valid():\n id = raw_input(\"Ready to scan.\"+\"\\n\")\n new_id = \"\"\n for i in range(len(id)-1):\n new_id += id[i]\n new_id += \".0\"\n new_id = float(new_id)\n data = pd.read_csv(\"scanned_tickets.csv\")\n scanned_list = list(data[\"ID\"])\n if new_id not in scanned_list:\n pass\n else:\n print(\"Ticket already scanned.\")\n return False\n # data = pd.read_csv(\"scanned_tickets.csv\")\n # scanned_list = list(data[\"ID\"])\n data = pd.read_csv(\"ticket.csv\")\n bar_list = list(data[\"ID\"])\n if new_id not in bar_list:\n print(\"Invalid Ticket\")\n else:\n print(\"Thanks, enjoy the show.\")\n return new_id\n\n\ndef main():\n\n if not os.path.isfile('scanned_tickets.csv'):\n create_scanned_csv()\n id = check_valid()\n if id:\n add_to_csv(id)\n\n\nmain()\n" ]
[ [ "pandas.read_csv" ] ]
xeb/transformers
[ "43fda01d35739622cad1dd190b170c052c5959c8" ]
[ "examples/run_language_modeling.py" ]
[ "# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).\nGPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned\nusing a masked language modeling (MLM) loss.\n\"\"\"\n\n\nimport argparse\nimport glob\nimport logging\nimport os\nimport pickle\nimport random\nimport re\nimport shutil\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nimport torch\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom transformers import (\n MODEL_WITH_LM_HEAD_MAPPING,\n WEIGHTS_NAME,\n AdamW,\n AutoConfig,\n AutoModelWithLMHead,\n AutoTokenizer,\n PreTrainedModel,\n PreTrainedTokenizer,\n get_linear_schedule_with_warmup,\n)\n\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\n\nMODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\n\nclass TextDataset(Dataset):\n def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path: str, block_size=512):\n assert os.path.isfile(file_path)\n\n block_size = block_size - (tokenizer.max_len - tokenizer.max_len_single_sentence)\n\n directory, filename = os.path.split(file_path)\n cached_features_file = os.path.join(\n directory, args.model_type + \"_cached_lm_\" + str(block_size) + \"_\" + filename\n )\n\n if os.path.exists(cached_features_file) and not args.overwrite_cache:\n logger.info(\"Loading features from cached file %s\", cached_features_file)\n with open(cached_features_file, \"rb\") as handle:\n self.examples = pickle.load(handle)\n else:\n logger.info(\"Creating features from dataset file at %s\", directory)\n\n self.examples = []\n with open(file_path, encoding=\"utf-8\") as f:\n text = f.read()\n\n tokenized_text = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))\n\n for i in range(0, len(tokenized_text) - block_size + 1, block_size): # Truncate in block of block_size\n self.examples.append(tokenizer.build_inputs_with_special_tokens(tokenized_text[i : i + block_size]))\n # Note that we are loosing the last truncated example here for the sake of simplicity (no padding)\n # If your dataset is small, first you should loook for a bigger one :-) and second you\n # can change this behavior by adding (model specific) padding.\n\n logger.info(\"Saving features into cached file %s\", cached_features_file)\n with open(cached_features_file, \"wb\") as handle:\n pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, item):\n return torch.tensor(self.examples[item], dtype=torch.long)\n\n\nclass LineByLineTextDataset(Dataset):\n def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path: str, block_size=512):\n assert os.path.isfile(file_path)\n # Here, we do not cache the features, operating under the assumption\n # that we will soon use fast multithreaded tokenizers from the\n # `tokenizers` repo everywhere =)\n logger.info(\"Creating features from dataset file at %s\", file_path)\n\n with open(file_path, encoding=\"utf-8\") as f:\n lines = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]\n\n self.examples = tokenizer.batch_encode_plus(lines, add_special_tokens=True, max_length=block_size)[\"input_ids\"]\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, i):\n return torch.tensor(self.examples[i], dtype=torch.long)\n\n\ndef load_and_cache_examples(args, tokenizer, evaluate=False):\n file_path = args.eval_data_file if evaluate else args.train_data_file\n if args.line_by_line:\n return LineByLineTextDataset(tokenizer, args, file_path=file_path, block_size=args.block_size)\n else:\n return TextDataset(tokenizer, args, file_path=file_path, block_size=args.block_size)\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef _sorted_checkpoints(args, checkpoint_prefix=\"checkpoint\", use_mtime=False) -> List[str]:\n ordering_and_checkpoint_path = []\n\n glob_checkpoints = glob.glob(os.path.join(args.output_dir, \"{}-*\".format(checkpoint_prefix)))\n\n for path in glob_checkpoints:\n if use_mtime:\n ordering_and_checkpoint_path.append((os.path.getmtime(path), path))\n else:\n regex_match = re.match(\".*{}-([0-9]+)\".format(checkpoint_prefix), path)\n if regex_match and regex_match.groups():\n ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))\n\n checkpoints_sorted = sorted(ordering_and_checkpoint_path)\n checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]\n return checkpoints_sorted\n\n\ndef _rotate_checkpoints(args, checkpoint_prefix=\"checkpoint\", use_mtime=False) -> None:\n if not args.save_total_limit:\n return\n if args.save_total_limit <= 0:\n return\n\n # Check if we should delete older checkpoint(s)\n checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)\n if len(checkpoints_sorted) <= args.save_total_limit:\n return\n\n number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)\n checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]\n for checkpoint in checkpoints_to_be_deleted:\n logger.info(\"Deleting older checkpoint [{}] due to args.save_total_limit\".format(checkpoint))\n shutil.rmtree(checkpoint)\n\n\ndef mask_tokens(inputs: torch.Tensor, tokenizer: PreTrainedTokenizer, args) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. \"\"\"\n\n if tokenizer.mask_token is None:\n raise ValueError(\n \"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.\"\n )\n\n labels = inputs.clone()\n # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)\n probability_matrix = torch.full(labels.shape, args.mlm_probability)\n special_tokens_mask = [\n tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()\n ]\n probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)\n if tokenizer._pad_token is not None:\n padding_mask = labels.eq(tokenizer.pad_token_id)\n probability_matrix.masked_fill_(padding_mask, value=0.0)\n masked_indices = torch.bernoulli(probability_matrix).bool()\n labels[~masked_indices] = -100 # We only compute loss on masked tokens\n\n # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])\n indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices\n inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced\n random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)\n inputs[indices_random] = random_words[indices_random]\n\n # The rest of the time (10% of the time) we keep the masked input tokens unchanged\n return inputs, labels\n\n\ndef train(args, train_dataset, model: PreTrainedModel, tokenizer: PreTrainedTokenizer) -> Tuple[int, float]:\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n\n def collate(examples: List[torch.Tensor]):\n if tokenizer._pad_token is None:\n return pad_sequence(examples, batch_first=True)\n return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)\n\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(\n train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate\n )\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if (\n args.model_name_or_path\n and os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\"))\n and os.path.isfile(os.path.join(args.model_name_or_path, \"scheduler.pt\"))\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n if args.model_name_or_path and os.path.exists(args.model_name_or_path):\n try:\n # set global_step to gobal_step of last saved checkpoint from model path\n checkpoint_suffix = args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0]\n global_step = int(checkpoint_suffix)\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n except ValueError:\n logger.info(\" Starting fine-tuning.\")\n\n tr_loss, logging_loss = 0.0, 0.0\n\n model_to_resize = model.module if hasattr(model, \"module\") else model # Take care of distributed/parallel training\n model_to_resize.resize_token_embeddings(len(tokenizer))\n\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0]\n )\n set_seed(args) # Added here for reproducibility\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n inputs, labels = mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch)\n inputs = inputs.to(args.device)\n labels = labels.to(args.device)\n model.train()\n outputs = model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels)\n loss = outputs[0] # model outputs are always tuple in transformers (see doc)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n if (\n args.local_rank == -1 and args.evaluate_during_training\n ): # Only evaluate when single GPU otherwise metrics may not average well\n results = evaluate(args, model, tokenizer)\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step)\n logging_loss = tr_loss\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n checkpoint_prefix = \"checkpoint\"\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"{}-{}\".format(checkpoint_prefix, global_step))\n os.makedirs(output_dir, exist_ok=True)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n _rotate_checkpoints(args, checkpoint_prefix)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, prefix=\"\") -> Dict:\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_output_dir = args.output_dir\n\n eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)\n\n if args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir, exist_ok=True)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n\n def collate(examples: List[torch.Tensor]):\n if tokenizer._pad_token is None:\n return pad_sequence(examples, batch_first=True)\n return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(\n eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate\n )\n\n # multi-gpu evaluate\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n model.eval()\n\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n inputs, labels = mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch)\n inputs = inputs.to(args.device)\n labels = labels.to(args.device)\n\n with torch.no_grad():\n outputs = model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels)\n lm_loss = outputs[0]\n eval_loss += lm_loss.mean().item()\n nb_eval_steps += 1\n\n eval_loss = eval_loss / nb_eval_steps\n perplexity = torch.exp(torch.tensor(eval_loss))\n\n result = {\"perplexity\": perplexity}\n\n output_eval_file = os.path.join(eval_output_dir, prefix, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results {} *****\".format(prefix))\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n return result\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--train_data_file\", default=None, type=str, required=True, help=\"The input training data file (a text file).\"\n )\n parser.add_argument(\n \"--output_dir\",\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n parser.add_argument(\n \"--model_type\", type=str, required=True, help=\"The model architecture to be trained or fine-tuned.\",\n )\n\n # Other parameters\n parser.add_argument(\n \"--eval_data_file\",\n default=None,\n type=str,\n help=\"An optional input evaluation data file to evaluate the perplexity on (a text file).\",\n )\n parser.add_argument(\n \"--line_by_line\",\n action=\"store_true\",\n help=\"Whether distinct lines of text in the dataset are to be handled as distinct sequences.\",\n )\n parser.add_argument(\n \"--should_continue\", action=\"store_true\", help=\"Whether to continue from latest checkpoint in output_dir\"\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n help=\"The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.\",\n )\n\n parser.add_argument(\n \"--mlm\", action=\"store_true\", help=\"Train with masked-language modeling loss instead of language modeling.\"\n )\n parser.add_argument(\n \"--mlm_probability\", type=float, default=0.15, help=\"Ratio of tokens to mask for masked language modeling loss\"\n )\n\n parser.add_argument(\n \"--config_name\",\n default=None,\n type=str,\n help=\"Optional pretrained config name or path if not the same as model_name_or_path. If both are None, initialize a new config.\",\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=None,\n type=str,\n help=\"Optional pretrained tokenizer name or path if not the same as model_name_or_path. If both are None, initialize a new tokenizer.\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=None,\n type=str,\n help=\"Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)\",\n )\n parser.add_argument(\n \"--block_size\",\n default=-1,\n type=int,\n help=\"Optional input sequence length after tokenization.\"\n \"The training dataset will be truncated in block of this size for training.\"\n \"Default to the model max input length for single sentence inputs (take into account special tokens).\",\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\n \"--evaluate_during_training\", action=\"store_true\", help=\"Run evaluation during training at each logging step.\"\n )\n\n parser.add_argument(\"--per_gpu_train_batch_size\", default=4, type=int, help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=4, type=int, help=\"Batch size per GPU/CPU for evaluation.\"\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=1.0, type=float, help=\"Total number of training epochs to perform.\"\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument(\"--logging_steps\", type=int, default=500, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=500, help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--save_total_limit\",\n type=int,\n default=None,\n help=\"Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default\",\n )\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\"\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"For distant debugging.\")\n args = parser.parse_args()\n\n if args.model_type in [\"bert\", \"roberta\", \"distilbert\", \"camembert\"] and not args.mlm:\n raise ValueError(\n \"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the --mlm \"\n \"flag (masked language modeling).\"\n )\n if args.eval_data_file is None and args.do_eval:\n raise ValueError(\n \"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file \"\n \"or remove the --do_eval argument.\"\n )\n if args.should_continue:\n sorted_checkpoints = _sorted_checkpoints(args)\n if len(sorted_checkpoints) == 0:\n raise ValueError(\"Used --should_continue but no checkpoint was found in --output_dir.\")\n else:\n args.model_name_or_path = sorted_checkpoints[-1]\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n and not args.should_continue\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n set_seed(args)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab\n\n if args.config_name:\n config = AutoConfig.from_pretrained(args.config_name, cache_dir=args.cache_dir)\n elif args.model_name_or_path:\n config = AutoConfig.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)\n else:\n # When we release a pip version exposing CONFIG_MAPPING,\n # we can do `config = CONFIG_MAPPING[args.model_type]()`.\n raise ValueError(\n \"You are instantiating a new config instance from scratch. This is not supported, but you can do it from another script, save it,\"\n \"and load it from here, using --config_name\"\n )\n\n if args.tokenizer_name:\n tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir)\n elif args.model_name_or_path:\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)\n else:\n raise ValueError(\n \"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another script, save it,\"\n \"and load it from here, using --tokenizer_name\"\n )\n\n if args.block_size <= 0:\n args.block_size = tokenizer.max_len\n # Our input block size will be the max possible for the model\n else:\n args.block_size = min(args.block_size, tokenizer.max_len)\n\n if args.model_name_or_path:\n model = AutoModelWithLMHead.from_pretrained(\n args.model_name_or_path,\n from_tf=bool(\".ckpt\" in args.model_name_or_path),\n config=config,\n cache_dir=args.cache_dir,\n )\n else:\n logger.info(\"Training new model from scratch\")\n model = AutoModelWithLMHead.from_config(config)\n\n model.to(args.device)\n\n if args.local_rank == 0:\n torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Training\n if args.do_train:\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache\n\n train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)\n\n if args.local_rank == 0:\n torch.distributed.barrier()\n\n global_step, tr_loss = train(args, train_dataset, model, tokenizer)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Saving best-practices: if you use save_pretrained for the model and tokenizer, you can reload them using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir, exist_ok=True)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = AutoModelWithLMHead.from_pretrained(args.output_dir)\n tokenizer = AutoTokenizer.from_pretrained(args.output_dir)\n model.to(args.device)\n\n # Evaluation\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n )\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n prefix = checkpoint.split(\"/\")[-1] if checkpoint.find(\"checkpoint\") != -1 else \"\"\n\n model = AutoModelWithLMHead.from_pretrained(checkpoint)\n model.to(args.device)\n result = evaluate(args, model, tokenizer, prefix=prefix)\n result = dict((k + \"_{}\".format(global_step), v) for k, v in result.items())\n results.update(result)\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.nn.utils.rnn.pad_sequence", "torch.utils.data.DataLoader", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.device", "torch.distributed.get_rank", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.distributed.barrier", "torch.tensor", "torch.full", "torch.bernoulli", "torch.cuda.device_count", "torch.distributed.get_world_size", "torch.nn.parallel.DistributedDataParallel", "numpy.random.seed", "torch.cuda.set_device", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.utils.data.RandomSampler", "torch.nn.DataParallel" ] ]
chouxianyu/TFA
[ "8802aeb66fbc3a5081fa3dfb2830ca517718afe1" ]
[ "datasets/prepare_voc_few_shot.py" ]
[ "\"\"\"\n基于VOC,生成多组(29组)不同seed的few-shot dataset\n\n代码逻辑如下:\n遍历训练集(07、12的train和val合起来)的标注。对于每个class,知道哪些图片包含(至少有1个)属于该class的object\n对于某个seed、某个class的多个k-shot:3-shot dataset包括1-shot dataset、5-shot dataset包括3-shot dataset,以此类推……\n对于某个seed、某个class、某个k-shot(以5-shot为例):\n 基于上个shot(3-shot)选取的图片(m张图片,最多3张,可以少于3张,最少1张;n个object,最少3个,最多不限量)。Note:这里有个bug,详见代码(可搜索TODO)\n 先再随机(random seed为当前seed)选取diff_shot张(5-3=2张)图片(因为每张图片至少有1个属于该class的object,所以最多使用diff_shot=2张图片就能得到至少3+2=5个object)。\n 遍历这diff_shot=2张图片(不一定全部遍历完),先将当前所遍历到的图片纳入该class的few-shot dataset,然后判断目前所纳入的图片中属于该class的object的数量是否大于等于diff_shot,是的话则不再遍历后续图片(最多遍历diff_shot张图片)\n\"\"\"\n\nimport argparse\nimport copy\nimport os\nimport random\nimport xml.etree.ElementTree as ET\n\nimport numpy as np\nfrom fsdet.utils.file_io import PathManager\n\n# VOC的20个class\nVOC_CLASSES = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',\n 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',\n 'tvmonitor'] # fmt: skip\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n # 生成多少组few-shot dataset,range(1,30)即生成1到29\n parser.add_argument(\n \"--seeds\", type=int, nargs=\"+\", default=[1, 30], help=\"Range of seeds\"\n )\n args = parser.parse_args()\n return args\n\n\ndef generate_seeds(args):\n #### VOC2007、VOC2012中train set和val set的图片的id\n data = []\n for year in [2007, 2012]:\n data_file = \"datasets/VOC{}/ImageSets/Main/trainval.txt\".format(year) # 该文件中保存了train set和val set中图片的id\n with PathManager.open(data_file) as f: # 打开文件\n fileids = np.loadtxt(f, dtype=np.str).tolist() # 读取图片的id保存在list中\n data.extend(fileids)\n\n\n #### 每个class对应的标注文件路径(如果某图片中有属于class c的object,则该图片的标注文件的路径就会存入该class c的list)\n data_per_cat = {c: [] for c in VOC_CLASSES}\n for fileid in data: # 遍历所有图片的id\n # 读取对应标注(xml)文件\n year = \"2012\" if \"_\" in fileid else \"2007\" # 年份(VOC2007的图片id例如“009946”,VOC2012的图片id例如“2011_000971”)\n dirname = os.path.join(\"datasets\", \"VOC{}\".format(year)) # 文件夹路径\n anno_file = os.path.join(dirname, \"Annotations\", fileid + \".xml\") # 标注文件路径\n tree = ET.parse(anno_file) # 读取xml文件\n # 该图片中有哪些class的object\n clses = []\n for obj in tree.findall(\"object\"):\n cls = obj.find(\"name\").text\n clses.append(cls)\n # 该图片中有属于该class的object,则将该图片的标注文件路径存入该class对应的list\n for cls in set(clses):\n data_per_cat[cls].append(anno_file)\n\n\n #### 生成所有seed情况下所有class的所有k-shot dataset\n result = {cls: {} for cls in data_per_cat.keys()} # 双层dict,外层dict的key为cls、内层dict的key为k(-shot)、内层dict的value为对应所有图片的文件路径(list)\n shots = [1, 2, 3, 5, 10] # k(-shots):1, 2, 3, 5, 10\n for i in range(args.seeds[0], args.seeds[1]): # 遍历每个seed\n random.seed(i) # 设置seed\n # 生成当前seed情况下所有class的所有k-shot dataset\n for c in data_per_cat.keys(): # 遍历每个class\n c_data = [] # 该class对应的所有(不超过max_k个)图片(文件路径)\n ## IMPORTANT: 生成当前seed情况下当前class的所有k-shot dataset\n for j, shot in enumerate(shots): # 遍历每个k(-shot)\n ## 先采样一组(diff_shot个)图片(但不一定都纳入few-shot dataset)。因为每张图片中至少有1个属于该class的object,所以这组图片至少包含diff_shot个属于该class的object\n diff_shot = shots[j] - shots[j - 1] if j != 0 else 1 # diff_shot: 当前k-shot比上个k-shot多多少\n shots_c = random.sample(data_per_cat[c], diff_shot) # 采样diff_shot张图片(标注文件),\n \n ## IMPORTANT:遍历这diff_shot张图片并先将当前所遍历到的图片纳入该class的few-shot dataset,然后判断目前所纳入的图片中属于该class的object的数量大于等于diff_shot(如果是,则停止遍历)\n num_objs = 0 # 当目前新纳入的图片中属于该class的object的数量大于等于diff_shot,就肯定够k个object了,不再遍历后续图片\n for s in shots_c: # 遍历每张图片\n # TODO:这行代码应该是错了,s是标注文件路径,c_data是图片文件路径的list,怎么可能相等呢?这行代码本意应该是避免重复采样相同图片或避免重复采样相同标注\n # TODO:如果本意是避免重复采样相同图片/标注,那如果新采样的所有 图片/标注 在之前都已经被采样过,那按这个逻辑走下去,当前的k-shot dataset中object数量会不足k个,所以这是个bug。\n # TODO:更好的方式是像`prepare_coco_few_shot.py`那样,先预采样k个object,无论是否基于上个shot生成当前shot的dataset。\n if s not in c_data:\n tree = ET.parse(s) # 读取该图片的标注文件\n file = tree.find(\"filename\").text # 如\"009392.jpg\"\n year = tree.find(\"folder\").text # 如\"VOC2007\"\n name = \"datasets/{}/JPEGImages/{}\".format(year, file) # 该图片的文件路径\n c_data.append(name) # 将该图片纳入该class的few-shot dataset\n for obj in tree.findall(\"object\"): # 遍历该图片中的所有object,统计属于该class的object的数量\n if obj.find(\"name\").text == c:\n num_objs += 1\n if num_objs >= diff_shot: # IMPORTANT: 当目前新纳入的图片中属于该class的object的数量大于等于diff_shot,就肯定够k个object了,不再遍历后续图片\n break\n result[c][shot] = copy.deepcopy(c_data) # 某class某k-shot对应的所有图片文件的路径。双层dict,外层dict的key为cls、内层dict的key为k(-shot)、内层dict的value为对应所有图片的文件路径(list)\n \n # 保存当前seed情况下所有class的所有k-shot dataset\n save_path = \"datasets/vocsplit/seed{}\".format(i)\n os.makedirs(save_path, exist_ok=True) # 创建该seed对应的文件夹\n for c in result.keys(): # 遍历所有class\n for shot in result[c].keys(): # 遍历所有k(-shot)\n filename = \"box_{}shot_{}_train.txt\".format(shot, c) # 文件路径\n # 当前seed情况下该class的该k-shot dataset(图片数量不固定)\n with open(os.path.join(save_path, filename), \"w\") as fp:\n fp.write(\"\\n\".join(result[c][shot]) + \"\\n\") # 每行为1个图片的文件路径\n print('Generated : ', save_path)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n generate_seeds(args)\n" ]
[ [ "numpy.loadtxt" ] ]
raunak274/greyatom-python-for-data-science
[ "180158a75a67d124d4b3737d0ce1ca5e972c36bc" ]
[ "Wrangling-With-Pandas/code.py" ]
[ "# --------------\n# Importing header files\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy.stats import mode \r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\n#Reading file\r\nbank_data = pd.read_csv(path)\r\n\r\n#Code starts here\r\nbank = pd.DataFrame(bank_data)\r\n\r\ncategorical_var = bank.select_dtypes(include = 'object')\r\nprint(categorical_var)\r\n\r\nnumerical_var = bank.select_dtypes(include = 'number')\r\nprint(numerical_var)\r\n\r\nbanks = bank.drop(columns='Loan_ID')\r\nprint(banks.isnull().sum())\r\n\r\nbank_mode = banks.mode()\r\n\r\nbanks.fillna(bank_mode,inplace=True)\r\nprint(banks.isnull().sum())\r\n\r\navg_loan_amount = pd.pivot_table(banks,index=['Gender','Married','Self_Employed'],values=[\"LoanAmount\"],aggfunc=np.mean)\r\nprint(avg_loan_amount['LoanAmount'][1],2)\r\n\r\nloan_approved_se = banks.loc[(banks[\"Self_Employed\"]==\"Yes\") & (banks[\"Loan_Status\"]==\"Y\"), [\"Loan_Status\"]].count()\r\nprint(loan_approved_se)\r\n\r\nloan_approved_nse = banks.loc[(banks[\"Self_Employed\"]==\"No\") & (banks[\"Loan_Status\"]==\"Y\"), [\"Loan_Status\"]].count()\r\nprint(loan_approved_nse)\r\n\r\npercentage_se = (loan_approved_se * 100 / 614)\r\n#percentage_se=percentage_se[0]\r\nprint(\"%.2f\"%percentage_se)\r\n\r\npercentage_nse = (loan_approved_nse * 100 / 614)\r\n#percentage_nse=percentage_nse[0]\r\nprint (\"%.2f\"%percentage_nse)\r\n\r\nloan_term = banks['Loan_Amount_Term'].apply(lambda x: x/12 )\r\nbig_loan_term=len(loan_term[loan_term>=25])\r\nprint(big_loan_term)\r\n\r\nloan_groupby = banks.groupby(['Loan_Status'])\r\nloan_groupby = loan_groupby['ApplicantIncome','Credit_History']\r\nmean_values = loan_groupby.mean()\r\nprint(\"%.2f\"%mean_values.iloc[1,0],2)\n\n\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.pivot_table" ] ]
ElementAI/SRKORNIA
[ "a0be8884bc79d9fdbc8e9145606a8b634c392d94" ]
[ "code/trainval.py" ]
[ "import os\nimport argparse\nimport pandas as pd\nimport pprint\nimport torch \n\nimport exp_configs\nfrom src.backbones import get_backbone\nfrom src import datasets\nfrom src import models\n\nfrom haven import haven_utils as hu\nfrom haven import haven_chk as hc\nfrom haven import haven_jobs as hj\n\n\ndef trainval(exp_dict, savedir_base, n_workers, test_only, reset=False):\n # bookkeeping\n # ---------------\n\n # get experiment directory\n exp_id = hu.hash_dict(exp_dict)\n savedir = os.path.join(savedir_base, exp_id)\n\n if reset:\n # delete and backup experiment\n hc.delete_experiment(savedir, backup_flag=True)\n \n # create folder and save the experiment dictionary\n os.makedirs(savedir, exist_ok=True)\n hu.save_json(os.path.join(savedir, 'exp_dict.json'), exp_dict)\n pprint.pprint(exp_dict)\n print('Experiment saved in %s' % savedir)\n\n # Dataset\n # -----------\n\n # train loader\n train_loader = datasets.get_loader(\"train\", exp_dict, n_workers, test_only=test_only)\n\n # val loader\n val_loader = datasets.get_loader(\"test\", exp_dict, n_workers, test_only=test_only)\n\n # Model\n # -----------\n model = models.get_model(exp_dict)\n\n # Checkpoint\n # -----------\n model_path = os.path.join(savedir, 'model.pth')\n score_list_path = os.path.join(savedir, 'score_list.pkl')\n\n if os.path.exists(score_list_path):\n # resume experiment\n model.set_state_dict(hu.torch_load(model_path))\n score_list = hu.load_pkl(score_list_path)\n s_epoch = score_list[-1]['epoch'] + 1\n else:\n # restart experiment\n score_list = []\n s_epoch = 0\n\n # Train & Val\n # ------------\n print('Starting experiment at epoch %d' % (s_epoch))\n\n for e in range(s_epoch, 10):\n score_dict = {}\n\n # Train the model\n score_dict.update(model.train_on_loader(train_loader))\n\n # Validate the model\n score_dict.update(model.test_on_loader(val_loader))\n\n # Get metrics\n # score_dict['train_loss'] = train_dict['train_loss']\n # score_dict['val_acc'] = val_dict['val_acc']\n score_dict['epoch'] = e\n\n # Add to score_list and save checkpoint\n score_list += [score_dict]\n\n # Report & Save\n score_df = pd.DataFrame(score_list)\n print(score_df.tail())\n hu.torch_save(model_path, model.get_state_dict())\n hu.save_pkl(score_list_path, score_list)\n print('Checkpoint Saved: %s' % savedir)\n\n print('experiment completed')\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-e', '--exp_group_list', nargs=\"+\")\n parser.add_argument('-sb', '--savedir_base', required=True)\n parser.add_argument('-nw', '--n_workers', type=int, default=0)\n parser.add_argument('-t', '--test_only', type=int, default=0)\n parser.add_argument(\"-r\", \"--reset\", default=0, type=int)\n parser.add_argument(\"-ei\", \"--exp_id\", default=None)\n parser.add_argument(\"-j\", \"--run_jobs\", type=int, default=0)\n\n args = parser.parse_args()\n\n # Collect experiments\n # -------------------\n if args.exp_id is not None:\n # select one experiment\n savedir = os.path.join(args.savedir_base, args.exp_id)\n exp_dict = hu.load_json(os.path.join(savedir, \"exp_dict.json\")) \n \n exp_list = [exp_dict]\n \n else:\n # select exp group\n exp_list = []\n for exp_group_name in args.exp_group_list:\n exp_list += exp_configs.EXP_GROUPS[exp_group_name]\n\n # launch jobs\n if args.run_jobs:\n # launch jobs\n from haven import haven_jobs as hjb\n run_command = ('python trainval.py -ei <exp_id> -sb %s -d %s -nw 1' % (args.savedir_base, args.datadir_base))\n job_config = {'volume': \"\",\n 'image': \"\",\n 'bid': '1',\n 'restartable': '1',\n 'gpu': '4',\n 'mem': '30',\n 'cpu': '2'}\n workdir = os.path.dirname(os.path.realpath(__file__))\n\n hjb.run_exp_list_jobs(exp_list, \n savedir_base=args.savedir_base, \n workdir=workdir,\n run_command=run_command,\n job_config=job_config)\n else:\n # run experiments\n for exp_dict in exp_list:\n # do trainval\n trainval(exp_dict=exp_dict,\n savedir_base=args.savedir_base,\n n_workers=args.n_workers,\n test_only=args.test_only,\n reset=args.reset)" ]
[ [ "pandas.DataFrame" ] ]
ncclementi/pygbe_lspr
[ "0a73406701a74369206c33e0a3fd31d7b5f0cf55" ]
[ "analysis_notebooks/scripts/data_analysis_helper.py" ]
[ "import numpy \nfrom matplotlib import pyplot, rcParams\nfrom scipy.interpolate import interp1d, splev, splrep\n\ndef wave_filter_interp(lambda_eval, lambda_interp):\n '''It removes the data points of a wavelength array, that are out of the\n range where the resulting interpolation is valid. \n\n Arguments:\n ----------\n lambda_eval : array, wavelength array that where we want to evaluate the\n resulting function of \n lambda_interp : array, wavelength array of the interpolated data. \n\n Returns:\n --------\n lambda_eval_new: array, wavelength array ready for evaluation. \n idx_min : int, index where the slicing of lambda_eval starts. \n idx_max : int, index where the slicing of lambda_eval ends.\n\n '''\n\n lam_min = numpy.where(lambda_eval<min(lambda_interp))[0]\n\n if len(lam_min)>0:\n idx_min = max(lam_min) + 1\n else:\n idx_min = 0\n\n lam_max = numpy.where(lambda_eval>max(lambda_interp))[0]\n\n if len(lam_max)>0:\n idx_max = min(lam_max)\n else:\n idx_max = None\n\n lambda_eval_new = lambda_eval[idx_min:idx_max]\n\n return lambda_eval_new, idx_min, idx_max\n\n\ndef nm_from_ev(electron_volts):\n '''Returns wavelength in nanometers [nm] from energy given in electron\n volts [eV].\n\n Arguments:\n ----------\n electron_volts: float/array, energy in electron volts [eV]\n\n Returns:\n --------\n lamb_nm = float/array, wavelength in nanometers [nm]\n '''\n\n h = 4.13566766225e-15 # Planck's contant in [eV.s]\n c = 2.99792458e17 # light velocity in [nm/s]\n \n lamb_nm = h*c / electron_volts \n\n return lamb_nm\n\ndef linear_interp(lamb, n, k):\n '''Returns the linear interpolation of the real and imaginary refractive index.\n \n Arguments:\n ----------\n lamb: array, wavelengths.\n n : array, real part of refractive index. \n k : array, imaginary part of refractive index.\n \n Returns:\n --------\n real_inter: function, interpolated function of the real part of the refrac index.\n imag_inter: function, interpolated function of the imaginary part the refrac index.\n \n '''\n real_inter = interp1d(lamb, n)\n imag_inter = interp1d(lamb, k)\n \n return real_inter, imag_inter\n\n\ndef spline(lamb, n, k):\n '''Returns the B-spline representations of the real and imaginary refractive index.\n \n Arguments:\n ----------\n lamb: array, wavelengths.\n n : array, real part of refractive index. \n k : array, imaginary part of refractive index.\n \n Returns:\n --------\n real_tuple: tuple, vector of knots, spline coefficients, and the degree of the spline\n of the real part of the refrac index.\n imag_tuple: tuple, vector of knots, spline coefficients, and the degree of the spline\n of the imaginary part of the refrac index.\n '''\n \n real_tuple = splrep(lamb, n)\n imag_tuple = splrep(lamb, k)\n \n return real_tuple, imag_tuple\n\n\ndef spline_eval(x, real_tuple, imag_tuple): \n '''Evaluates the B-splines of the real and imaginary refractive index.\n \n Arguments:\n ----------\n x : array, points at which to return the value of the spline.\n real_tuple: tuple, vector of knots, spline coefficients, and the degree of the spline\n of the real part of the refrac index.\n imag_tuple: tuple, vector of knots, spline coefficients, and the degree of the spline\n of the imaginary part of the refrac index.\n \n \n Returns:\n -------- \n real_spline: array, values representing the spline function evaluated at the points in\n x for real refractive index.\n imag_spline: array, values representing the spline function evaluated at the points in\n x for real refractive index. \n '''\n \n real_spline = splev(x, real_tuple)\n imag_spline = splev(x, imag_tuple)\n \n return real_spline, imag_spline\n\ndef plot_refrac(lamb, n, k): \n \"\"\"\n Plots the refractive index vs wavelength.\n Plots separately the real and imaginary part of the refractive index.\n \n Arguments:\n ----------\n lamb: array, wavelengths.\n n : array, real part of refractive index. \n k : array, imaginary part of refractive index.\n \n Returns:\n --------\n Plots of refrac_index_real vs lambda, refrac_index_imaginary vs lambda. \n \"\"\"\n \n pyplot.figure(figsize=(12,4)) \n\n pyplot.subplot(121)\n \n pyplot.scatter(lamb,n, color='#2929a3') \n \n pyplot.xlabel('Wavelength [nm]')\n pyplot.ylabel('Refractive index')\n pyplot.xlim(min(lamb)-5, max(lamb)+5)\n pyplot.xticks(numpy.linspace(min(lamb), max(lamb), 10), rotation=25)\n pyplot.title('Real')\n pyplot.grid(linestyle=':')\n \n \n pyplot.subplot(122)\n \n pyplot.scatter(lamb,k, color='#ff5733') \n \n pyplot.xlabel('Wavelength [nm]')\n #pyplot.ylabel('Refractive index')\n pyplot.xlim(min(lamb)-5, max(lamb)+5)\n pyplot.xticks(numpy.linspace(min(lamb), max(lamb), 10), rotation=25)\n pyplot.title('Imaginary')\n pyplot.grid(linestyle=':')\n\ndef plot_interpolation(lamb, n, k, lamb_x, real_linear, imag_linear, real_spline, imag_spline):\n '''Plots data, linear interpolation and spline of the real and imaginary refractive index\n \n Arguments:\n ---------- \n lamb : array, wavelengths.\n n : array, real part of refractive index. \n k : array, imaginary part of refractive index.\n lamb_x : array, points at which to return the value of the spline\n real_linear: function, interpolated function of the real part of the refrac index.\n imag_linear: function, interpolated function of the imaginary part the refrac index.\n real_spline: array, values representing the spline function evaluated at the points in\n x for real refractive index.\n imag_spline: array, values representing the spline function evaluated at the points in\n x for real refractive index. \n '''\n \n pyplot.figure(figsize=(12,12)) \n\n #Real refrac index\n pyplot.subplot(211)\n #data\n pyplot.scatter(lamb, n, color='#2929a3', alpha = 0.8, label = 'data')\n #linear interp\n pyplot.plot(lamb_x, real_linear(lamb_x), color = 'r', ls = '-', label = 'linear')\n #spline interp\n pyplot.plot(lamb_x, real_spline, color = 'g', ls = '--', label = 'spline')\n \n pyplot.xlim(min(lamb)-5, max(lamb)+5)\n pyplot.xticks(numpy.linspace(min(lamb), max(lamb), 20), rotation=25)\n pyplot.title('Real')\n pyplot.ylabel('Refractive index')\n pyplot.legend(loc='best')\n pyplot.grid(linestyle=':')\n \n #Imaginary refrac index\n pyplot.subplot(212)\n #data\n pyplot.scatter(lamb, k, color='#2929a3', alpha = 0.8, label = 'data') \n #linear interp\n pyplot.plot(lamb_x, imag_linear(lamb_x), color = 'r', ls = '-', label = 'linear')\n #spline interp\n pyplot.plot(lamb_x, imag_spline, color = 'g', ls = '--', label = 'spline')\n \n pyplot.xlim(min(lamb)-5, max(lamb)+5)\n pyplot.xticks(numpy.linspace(min(lamb), max(lamb), 20), rotation=25)\n pyplot.title('Imaginary')\n pyplot.ylabel('Refractive index')\n pyplot.xlabel('Wavelength [nm]')\n pyplot.legend(loc='best')\n pyplot.grid(linestyle=':')\n\ndef plot_sph_complex_convergence(N_Ag, N_Au, error_Ag, error_Au):\n \"\"\"\n Plots grid convergence for silver and gold sphere lspr problems.\n\n Arguments:\n ----------\n N : list, number of elements of meshes picked for convergence analysis. \n error: list, relative error compared to the analytical solution.\n \"\"\"\n pyplot.figure(figsize=(8,5))\n\n rcParams['font.family'] = 'serif'\n rcParams['font.size'] = 16\n rcParams['xtick.top'] = True\n rcParams['ytick.right'] = True\n rcParams['axes.linewidth'] = 2\n\n asymp_Ag = N_Ag[-2]*error_Ag[-2]/N_Ag\n asymp_Au = N_Au[-2]*error_Au[-2]/N_Au\n\n\n pyplot.loglog(N_Ag, error_Ag, ls='',marker='o', c='k', mew=1.5, mfc='w', ms=10, label='Ag')\n pyplot.loglog(N_Ag, asymp_Ag, c='k', marker='None', ls=':', lw=2, label=None)\n\n pyplot.loglog(N_Au, error_Au, ls='',marker='s', c='k', mew=1.5, mfc='w', ms=10, label='Au')\n pyplot.loglog(N_Au, asymp_Au, c='k', marker='None', ls=':', lw=2, label=None)\n\n loc_Ag = (3*N_Ag[-2]+N_Ag[-1])/4\n loc_Au = (3*N_Au[-2]+N_Au[-1])/4\n\n tex_loc_Ag = numpy.array((loc_Ag,N_Ag[-1]*error_Ag[-1]/loc_Ag))\n tex_loc_Au = numpy.array((loc_Au,N_Au[-1]*error_Au[-1]/loc_Au))\n\n pyplot.text(tex_loc_Ag[0], tex_loc_Ag[1],'N$^{-1}$', fontsize=12,\n rotation=-35,rotation_mode='anchor')\n pyplot.text(tex_loc_Au[0], tex_loc_Au[1],'N$^{-1}$',fontsize=12,\n rotation=-35,rotation_mode='anchor')\n\n pyplot.xlabel('N')\n pyplot.ylabel('Relative error')\n pyplot.tick_params(axis='both', length=10, width=1, which='major', direction='in')\n pyplot.tick_params(axis='both', length=5, width=1, which='minor', direction='in')\n pyplot.ylim(1e-4,1)\n pyplot.xlim(1e2,1e5)\n pyplot.legend(loc='best')\n pyplot.grid(True, which=\"both\")\n\n #Uncomment if desired to save figure\n #pyplot.savefig('figures/Cext_convergence_sph_Ag_Au.pdf', dpi=80, format='pdf')\n\ndef plot_cext_wave(lamb, cext, cext_an, ylim_s, ylim_e, xpoints, title=None):\n rcParams['font.family'] = 'serif'\n rcParams['font.size'] = 14\n rcParams['xtick.top'] = True\n rcParams['ytick.right'] = True\n rcParams['axes.linewidth'] = 2\n \n pyplot.figure(figsize=(9,6))\n\n pyplot.plot(lamb, cext, ls='', marker='o', color='0.4', mew=1.5, mfc='w', ms=7, label='PyGBe')\n pyplot.plot(lamb, cext_an, ls='--', marker='None', c='k', lw=1.5, label='Analytical')\n\n\n pyplot.xlabel('Wavelength [nm]')\n pyplot.ylabel('Cross extinction section [$nm^2$]')\n pyplot.xlim(min(lamb), max(lamb))\n pyplot.ylim(ylim_s, ylim_e)\n\n pyplot.xticks(numpy.linspace(min(lamb), max(lamb), xpoints), rotation=25)\n pyplot.tick_params(axis='both', length=8, width=1, direction='in')\n pyplot.title(title)\n pyplot.legend(loc='best')\n pyplot.grid(linestyle=':')\n\n #Uncomment if desired to save figure\n #pyplot.savefig('figures/cext_wave_'+title+'.pdf', dpi=80, format='pdf');\n\ndef plot_sph_multiple_complex_convergence(avg_density, error):\n \"\"\"\n Plots grid convergence for multiple spheres lspr problem.\n\n Arguments:\n ----------\n avg_density: list, avg elements/nm^2 of meshes picked for convergence analysis. \n error : list, relative error compared to the analytical solution.\n \"\"\"\n\n rcParams['font.family'] = 'serif'\n rcParams['font.size'] = 16\n rcParams['xtick.top'] = True\n rcParams['ytick.right'] = True\n rcParams['axes.linewidth'] = 2\n\n asymp = avg_density[-2]*error[-2]/avg_density\n\n pyplot.figure(figsize=(8,5))\n\n pyplot.loglog(avg_density, error, ls='',marker='o', c='k', mew=1.5, mfc='w', ms=10)\n pyplot.loglog(avg_density, asymp, c='k', marker='None', ls=':', lw=2)\n\n \n loc = (3*avg_density[-2]+avg_density[-1])/4\n\n tex_loc = numpy.array((loc,avg_density[-1]*error[-1]/loc))\n\n pyplot.text(tex_loc[0], tex_loc[1],'avg_den$^{-1}$', fontsize=12,\n rotation=-35,rotation_mode='anchor')\n \n\n pyplot.xlabel('Average elements/$nm^2$')\n pyplot.ylabel('Relative error')\n pyplot.tick_params(axis='both', length=10, width=1, which='major', direction='in')\n pyplot.tick_params(axis='both', length=5, width=1, which='minor', direction='in')\n pyplot.ylim(1e-3,1)\n pyplot.xlim(1e-1,1e2)\n pyplot.grid(True, which=\"both\")\n\n #Uncomment if desired to save figure\n #pyplot.savefig('figures/Cext_convergence_mult_sph.pdf', dpi=80, format='pdf')\n \ndef plot_cext_wave_distance(wavelength, cext, linestyles, colors, labels):\n '''Plots the cross extinction section as a function of wavelength for\n different values of distance at which the proteins are located.\n\n \tArguments:\n ----------\n wavelength: list of wavelength arrays for each distance case.\n cext : list of cross extinction section arrays for each distance case.\n linestyles: list of linstyles we desire to use for each distance case.\n colors : list of colors we desire to use for each distance case.\n labels : list of labels we desire to use for each distance case.\n\t'''\n rcParams['font.family'] = 'serif'\n rcParams['font.size'] = 16\n rcParams['xtick.top'] = True\n rcParams['ytick.right'] = True\n rcParams['axes.linewidth'] = 2\n\n fig=pyplot.figure(figsize=(9,6))\n ax = fig.add_subplot(1,1,1)\n \n major_xticks = numpy.linspace(min(wavelength[0]), max(wavelength[0]), 11)\n minor_xticks = numpy.linspace(min(wavelength[0]), max(wavelength[0]), 41)\n major_yticks = numpy.linspace(0, 8000, 9)\n minor_yticks = numpy.linspace(0, 8000, 33)\n\n ax.set_xticks(major_xticks) \n ax.set_xticks(minor_xticks, minor=True)\n ax.set_yticks(major_yticks) \n ax.set_yticks(minor_yticks, minor=True)\n\n pyplot.xticks(rotation=25)\n pyplot.tick_params(axis='both', length=5, width=1, which='major', direction='in')\n pyplot.tick_params(axis='both', length=2.5, width=1, which='minor', direction='in')\n\n pyplot.xlabel('Wavelength [nm]')\n pyplot.ylabel('Cross extinction section [$nm^2$]')\n pyplot.xlim(380,400)\n pyplot.ylim(0,8000)\n pyplot.grid(ls=':', which='minor', alpha=0.4)\n pyplot.grid(ls=':', which='major', alpha=0.8)\n #pyplot.title('Silver sphere with BSA Proteins')\n \n for i in range(len(wavelength)):\n pyplot.plot(wavelength[i], cext[i], linestyle=linestyles[i], \n color=colors[i], linewidth=2, label=labels[i])\n \n pyplot.legend(loc='best')\n\n #Uncomment if desired to save figure\n #pyplot.savefig('figures/Cext_wave_distance.pdf', dpi=80, format='pdf')\n\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.linspace", "matplotlib.pyplot.loglog", "matplotlib.pyplot.plot", "scipy.interpolate.interp1d", "matplotlib.pyplot.subplot", "matplotlib.pyplot.text", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "scipy.interpolate.splev", "numpy.array", "matplotlib.pyplot.ylabel", "scipy.interpolate.splrep", "matplotlib.pyplot.scatter", "matplotlib.pyplot.xlim", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.tick_params" ] ]
bmd2007/benchmark_eval
[ "aa42bb3369e79db4cb63e1963afcc8af6d8f5696" ]
[ "Methods/GenericPairwisePredictor.py" ]
[ "import os\r\nimport sys\r\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\r\nparentdir = os.path.dirname(currentdir)\r\nsys.path.append(parentdir)\r\nimport time\r\nimport numpy as np\r\nfrom joblib import dump, load\r\nimport torch\r\nimport copy\r\nfrom ProteinFeaturesHolder import ProteinFeaturesHolder\r\nfrom GenericModule import GenericModule\r\nclass GenericPairwisePredictor(GenericModule):\r\n\tdef __init__(self, hyperParams = None):\r\n\t\tif hyperParams is None:\r\n\t\t\thyperParams = {}\r\n\t\tself.hyperParams = copy.deepcopy(hyperParams)\r\n\t\t\r\n\t\tif 'featScaleClass' not in self.hyperParams:\r\n\t\t\tself.featScaleClass = None\r\n\t\telse:\r\n\t\t\tself.featScaleClass = self.hyperParams['featScaleClass']\r\n\t\t\r\n\t\tself.featDict= None\r\n\t\tself.TrainFiles = self.hyperParams.get('TrainFiles',[])\r\n\t\tself.TestFiles = self.hyperParams.get('TestFiles',[])\r\n\t\tself.ColumnNames = self.hyperParams.get('Columns',[])\r\n\t\tself.AugmentFunctions = self.hyperParams.get('Augment',[])\r\n\t\tself.testDataset = self.hyperParams.get('testData','test.tsv')\r\n\t\tself.trainDataset = self.hyperParams.get('trainData','train.tsv')\r\n\t\tself.datasetHeaders = self.hyperParams.get('datasetHeaders',False)\r\n\t\tself.replaceMissing = self.hyperParams.get('replaceMissing',0)\r\n\t\t\r\n\t\tif 'seed' in hyperParams:\r\n\t\t\tself.seed = int(hyperParams['seed'])\r\n\t\telse:\r\n\t\t\tself.seed = 1\r\n\t\t\r\n\t\tself.model=None\r\n\t\tself.scaleData = None\r\n\t\tself.featureFolder = None\r\n\t\r\n\t#load from each listed file to each key in the featDict\r\n\tdef loadFeatureData(self,featureFolder):\r\n\t\tself.featureFolder =featureFolder\r\n\t\t\r\n\t\r\n\tdef genFeatureData(self,pairs,dataType='train',returnDict=False):\r\n\t\treturn None\r\n\r\n\tdef augment(self,features):\r\n\t\tfeatures = np.asarray(features)\r\n\t\tif len(self.AugmentFunctions) == features.shape[1]:\r\n\t\t\tfor i in range(0,len(self.AugmentFunctions)):\r\n\t\t\t\taug = self.AugmentFunctions[i]\r\n\t\t\t\tmissing = np.isnan(features[i,:])\r\n\t\t\t\tnonmssing = missing==False\r\n\t\t\t\tif aug == 'log':\r\n\t\t\t\t\tfeatures[i,nonmssing] = np.log(features[i,nonmssing]+1e-8)\r\n\t\t\t\telif aug == 'log10':\r\n\t\t\t\t\tfeatures[i,nonmssing] = np.log10(features[i,nonmssing]+1e-8)\r\n\t\t\t\telif aug == 'log2':\r\n\t\t\t\t\tfeatures[i,nonmssing] = np.log2(features[i,nonmssing]+1e-8)\r\n\t\t\t\telif aug == 'sqrt':\r\n\t\t\t\t\tfeatures[i,nonmssing] = np.sqrt(np.abs(features[i,nonmssing])) * np.sign(features[i,nonmissing])\r\n\t\t\t\t\r\n\t\t\t\tfeatures[i,missing] = self.replaceMissing\r\n\t\telse:\r\n\t\t\tfeatures[np.isnan(features)] = self.replaceMissing\r\n\t\treturn features\r\n\t\r\n\tdef train(self,trainPairs=None):\r\n\t\tfor (trainFeatures, trainClasses) in self.loadDataFromFiles(trainPairs,'train'):\r\n\t\t\ttrainFeatures = self.augment(trainFeatures)\r\n\t\t\ttrainClasses = np.asarray(trainClasses)\r\n\t\t\ttrainFeatures = self.scaleFeatures(trainFeatures,'train')\r\n\t\t\tself.fit(trainFeatures,trainClasses)\r\n\t\t\r\n\tdef fit(self,trainFeatures,trainClasses):\r\n\t\tself.genModel() #create a new model from scratch, ensuring we don't overwrite the previously trained one\r\n\t\tself.model.fit(trainFeatures,trainClasses)\r\n\r\n\r\n\t#Predict using Full Data Method. Assumes all data has been loaded to created test features, and that entire test dataset and features fit into memory\r\n\tdef predictPairs(self, testPairs=None):\r\n\t\treturn self.predictPairsFromBatch(testPairs,64)\r\n\t\r\n\tdef predictPairsFromBatch(self,testPairs,batchSize=64):\r\n\t\tpredictionsLst = []\r\n\t\tpredictClassesLst = []\r\n\t\tfor (predictFeatures, predictClasses) in self.loadDataFromFiles(testPairs,'predict',batchSize):\r\n\t\t\tpredictFeatures = self.augment(predictFeatures)\r\n\t\t\tpredictFeatures = self.scaleFeatures(predictFeatures,'test')\r\n\t\t\tp,c = self.predict_proba(predictFeatures,predictClasses)\r\n\t\t\tpredictionsLst.append(p)\r\n\t\t\tpredictClassesLst.append(c)\r\n\t\treturn (np.vstack(predictionsLst),np.hstack(predictClassesLst))\r\n\t\r\n\t\t\r\n\tdef loadDataFromFiles(self,pairs,fileType,batchSize=None,delim='\\t'):\r\n\t\t#filters out only the rows from the files containing the pairs we need\r\n\t\tif pairs is not None:\r\n\t\t\tself.pairSet = set()\r\n\t\t\tfor item in pairs:\r\n\t\t\t\tself.pairSet.add(tuple(item))\r\n\t\telse:\r\n\t\t\tself.pairSet = None\r\n\t\t\t\t\r\n\t\tif fileType == 'train':\r\n\t\t\tfLst = self.TrainFiles\r\n\t\t\tdatasetFile = self.trainDataset\r\n\t\telif fileType == 'predict' or fileType == 'test':\r\n\t\t\tfLst = self.TestFiles\r\n\t\t\tdatasetFile = self.testDataset\r\n\t\tself.curFiles = []\r\n\t\tfor item in fLst:\r\n\t\t\tself.curFiles.append(open(self.featureFolder+item))\r\n\t\tself.datasetFile = open(self.featureFolder+datasetFile)\r\n\t\tself.curFilesHeader= []\r\n\t\tfor item in self.curFiles:\r\n\t\t\tself.curFilesHeader.extend(item.readline().strip().split(delim))\r\n\t\t\r\n\t\tself.curHeaderDict = {}\r\n\t\tidx = 0\r\n\t\tfor item in self.curFilesHeader:\r\n\t\t\tif item not in self.curHeaderDict:\r\n\t\t\t\tself.curHeaderDict[item] = idx\r\n\t\t\tidx += 1\r\n\t\t\r\n\t\tif self.datasetHeaders:\r\n\t\t\tdHeader = self.datasetFile.readline()\r\n\t\t\r\n\t\tfail =False\r\n\t\tfor item in self.ColumnNames:\r\n\t\t\tif item not in self.curHeaderDict:\r\n\t\t\t\tprint('Error, missing column name: ', item)\r\n\t\t\t\tfail = True\r\n\t\tif fail:\r\n\t\t\texit(42)\r\n\t\t\r\n\t\tclassData = []\r\n\t\tfeatureData = []\r\n\t\tcurLine = self.curFilesHeader\r\n\t\t#parseFiles\r\n\t\twhile len(curLine) > 0:\r\n\t\t\tcurLine = []\r\n\t\t\tfor item in self.curFiles:\r\n\t\t\t\tcurLine.extend(item.readline().strip().split(delim))\r\n\t\t\tif len(curLine[0]) == 0:\r\n\t\t\t\tbreak #no more features\r\n\t\t\tproteinData = self.datasetFile.readline().strip().split(delim)\r\n\t\t\t\r\n\t\t\tp1, p2, c = proteinData\r\n\t\t\t\r\n\t\t\tclassData.append(int(c))\r\n\t\t\tcurData = []\r\n\t\t\tfor item in self.ColumnNames:\r\n\t\t\t\tval = curLine[self.curHeaderDict[item]]\r\n\t\t\t\tif val == '?':\r\n\t\t\t\t\tval = np.nan\r\n\t\t\t\telse:\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tval = float(val)\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tpass\r\n\t\t\t\tcurData.append(val)\r\n\t\t\tfeatureData.append(curData)\r\n\t\t\tif self.pairSet is not None:\r\n\t\t\t\tif (p1,p2) not in self.pairSet:\r\n\t\t\t\t\tcontinue #not one of the pairs we are parsing\r\n\t\t\t\r\n\t\t\t\r\n\t\t\tif batchSize is not None and len(classData) == batchSize:\r\n\t\t\t\tyield(featureData,classData)\r\n\t\t\t\tfeatureData = []\r\n\t\t\t\tclassData = []\r\n\t\t\t\r\n\t\tif len(classData) > 0:\r\n\t\t\tyield (featureData,classData)\r\n\t\t\t\t\r\n\t\t\t\r\n\t\t\r\n\t#Predict using batches method. Assumes all data has been loaded, but computing features for all pairs in memory at once would be infeasible.\r\n\tdef predictFromBatch(self,testPairs,batchSize):\r\n\t\tpass\r\n\t\t\r\n\tdef predict_proba(self,predictFeatures,predictClasses):\r\n\t\tpreds = self.model.predict_proba(predictFeatures)\r\n\t\treturn (preds,np.asarray(predictClasses,dtype=np.int))\r\n\t\t\r\n\t\r\n\tdef parseTxtGenerator(self,tsvFile,batch,sep='\\t',headerLines=1,classIdx = -1):\r\n\t\tf = open(tsvFile)\r\n\t\theader = None\r\n\t\tcurData = []\r\n\t\tclassData = []\r\n\t\tfor line in f:\r\n\t\t\tif headerLines >0:\r\n\t\t\t\tif header is None:\r\n\t\t\t\t\theader = line.strip().split(sep)\r\n\t\t\t\telse:\r\n\t\t\t\t\theader = [header]\r\n\t\t\t\t\theader.append(line.strip().split(sep))\r\n\t\t\t\theaderLines -=1\r\n\t\t\t\tcontinue\r\n\t\t\tline = line.strip().split(sep)\r\n\t\t\tif classIdx == -1:\r\n\t\t\t\tclassIdx = len(line)-1\r\n\t\t\tclassData.append(int(line[classIdx]))\r\n\t\t\tline = line[:classIdx] + line[(classIdx+1):]\r\n\t\t\tline = [float(s) for s in line]\r\n\t\t\tcurData.append(line)\r\n\t\t\t\r\n\t\t\tif len(curData) == batch:\r\n\t\t\t\tyield (header,curData,classData)\r\n\t\t\t\tcurData =[]\r\n\t\t\t\tclassData = []\r\n\t\tif len(curData) > 1:\r\n\t\t\tyield (header,curData,classData)\r\n\t\t\tcurData = []\r\n\t\t\tclassData = []\r\n" ]
[ [ "numpy.hstack", "numpy.log", "numpy.log2", "numpy.abs", "numpy.asarray", "numpy.isnan", "numpy.sign", "numpy.log10", "numpy.vstack" ] ]
flatM/AlephNull
[ "796edec7e106cd76a5a69cb6e67a1a96c7a22cf6" ]
[ "alephnull/examples/buystock.py" ]
[ "#!/usr/bin/env python\n#\n# Copyright 2013 Carter Bain Wealth Management\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nimport pytz\n\nfrom alephnull.algorithm import TradingAlgorithm\nfrom alephnull.utils.factory import load_from_yahoo\n\nSYMBOL = 'GS'\n\nclass BuyStock(TradingAlgorithm): # inherit from TradingAlgorithm\n \"\"\"This is the simplest possible algorithm that does nothing but\n buy 1 share of SYMBOL on each event.\n \"\"\"\n def handle_data(self, data): # overload handle_data() method\n self.order(SYMBOL, 1) # order SID (=0) and amount (=1 shares)\n\n\nif __name__ == '__main__':\n start = datetime(2008, 1, 1, 0, 0, 0, 0, pytz.utc)\n end = datetime(2013, 1, 1, 0, 0, 0, 0, pytz.utc)\n data = load_from_yahoo(stocks=[SYMBOL], indexes={}, start=start,\n end=end)\n simple_algo = BuyStock()\n results = simple_algo.run(data)\n\n ax1 = plt.subplot(211)\n results.portfolio_value.plot(ax=ax1)\n ax2 = plt.subplot(212, sharex=ax1)\n stock_data = getattr(data, SYMBOL)\n stock_data.plot(ax=ax2)\n plt.gcf().set_size_inches(18, 8)" ]
[ [ "matplotlib.pyplot.subplot", "matplotlib.pyplot.gcf" ] ]
pkretzschmar/commons-config-backend
[ "a5abf9294e0b2fbb8f5d5bf5d3db2b1c55ebbc91" ]
[ "models/augmented_bonding_curve.py" ]
[ "import os\nimport pandas as pd\nimport numpy as np\n\n\n#These numbers are fixed from the hatch results (all in thousands except the token price)\nTOTAL_HATCH_FUNDING = 1571.22357 \nTOTAL_INITIAL_TECH_SUPPLY= 2035.918945 \nHATCH_FINAL_TECH_PRICE = 0.754743 \n\n\nclass BondingCurveInitializer:\n\n def __init__(self, reserve_balance=100, opening_price=5, initial_supply=100):\n self.opening_price = opening_price\n self.initial_supply = initial_supply\n self.initial_balance = reserve_balance\n\n def reserve_ratio(self):\n return self.initial_balance / (self.opening_price * self.initial_supply)\n \n #Returns the token price given a specific supply\n def get_price(self, supply):\n return (supply ** ((1 / self.reserve_ratio()) - 1) * self.opening_price) / (\n self.initial_supply ** ((1 / self.reserve_ratio()) - 1)\n )\n\n #Returns the collateral balance price given a specific supply\n def get_balance(self, supply):\n return (\n self.reserve_ratio() * self.get_price(supply) * supply\n )\n\n #Returns supply at a specific balance. THIS IS AN APPROXIMATION only meant for visualizing scenarios\n def get_supply(self, balance):\n supply_ref = 0\n while self.get_balance(supply_ref) < balance:\n supply_ref = supply_ref + 10\n\n df = self.curve_over_balance(supply_ref-10, supply_ref, 100000)\n df_rounded = df.round(3)\n \n index= int(df.index.where(df_rounded['balanceInThousands'] >= balance).dropna()[0])\n price = df.at[index, \"price\"]\n\n supply = balance/(price* self.reserve_ratio())\n \n\n return supply\n\n #For drawing the bonding curve. Range shows how many times the initial supply you make the graph for, steps how many subdivisions\n def curve_over_supply(self, range_begin=0, range_end=1000, steps=500):\n x = np.linspace(range_begin, range_end, steps)\n y = self.get_price(x)\n\n return pd.DataFrame(zip(x, y), columns=[\"supplyInThousands\", \"price\"])\n \n def curve_over_balance(self, range_begin=0, range_end=1000, steps=500):\n supply_list = np.linspace(range_begin, range_end, steps)\n x = self.get_balance(supply_list)\n y = self.get_price(supply_list)\n\n return pd.DataFrame(zip(x, y), columns=[\"balanceInThousands\", \"price\"])\n\n\n\nclass BondingCurve(BondingCurveInitializer):\n\n def __init__(self, reserve_balance=100, opening_price=5, initial_supply=100, entry_tribute=0.05, exit_tribute=0.05):\n super().__init__(reserve_balance, opening_price, initial_supply)\n self.current_supply = self.initial_supply\n self.current_balance = self.get_balance(self.initial_supply)\n self.entry_tribute = entry_tribute\n self.exit_tribute = exit_tribute\n\n def set_new_supply(self, new_supply):\n self.current_supply = new_supply\n self.current_balance = self.get_balance(new_supply)\n\n #Returns how much wxDAI you get from selling TEC. Informative, doesn't change state\n def sale_return(self, bonded):\n return self.current_balance * (\n (bonded / self.current_supply + 1) ** (1 / self.reserve_ratio()) - 1\n )\n\n #Returns how much TEC you get from purchasing with wxDAI. Informative, doesn't change state\n def purchase_return(self, collateral):\n return self.current_supply * (\n (collateral / self.current_balance + 1) ** (self.reserve_ratio()) - 1\n )\n\n\n\nclass BondingCurveHandler():\n '''\n The handler for the Bonding Curve. All interaction happens through this.\n\n The constrctor receives following args:\n\n commons_percentage: float between 0-0.95. Percentage of funds that get substracted from the total funding to go to the commons pool\n ragequit_amount: float, wxDai in thousands . Amount of reserve returned to ragequitters before the bonding curve gets initialized\n opening_price: float. No real limit but, expected to be between 1 and 4\n entry_tribute: float between 0-0.99. Percentage of funds substracted on buy (mint) operations before interacting with the bonding curve\n exit_tribute: float between 0-0.99. Percentage of funds substracted on sell (burn) operations after interacting with the boding curve\n steplist: list with format [[\"AMOUNT\", \"TOKEN\"],[\"AMOUNT\", \"TOKEN\"]]. Set of buy/sell operations applied to the bonding curve.\n zoom_graph=0: optional. value 0 or 1. To specify if the draw function should show the whole curve(0) or \"zoom in\" into the area where operations are happening (1)\n plot_mode=0: optional. value 0 or 1. Not in the scope of this iteration. Specifies if the draw function should plot the price against the balance (0) or the supply (1)\n \n '''\n\n def __init__(self,\n commons_percentage,\n ragequit_amount,\n opening_price,\n entry_tribute,\n exit_tribute,\n initial_buy,\n scenario_reserve_balance,\n steplist,\n zoom_graph=0,\n plot_mode=0):\n\n #parse the steplist (which gets read as string) into the right format\n steplist_parsed = []\n if steplist != \"\":\n for step in steplist:\n buf = str(step).strip('][').split(', ')\n buf[0] = (float(buf[0]) / 1000)\n buf[1] = buf[1].strip(\"'\")\n steplist_parsed.append(buf)\n\n params_valid = self.check_param_validity( \n commons_percentage,\n ragequit_amount,\n opening_price,\n entry_tribute,\n exit_tribute,\n initial_buy,\n float(scenario_reserve_balance),\n steplist_parsed,\n int(zoom_graph),\n int(plot_mode)\n )\n #The numbers for initial supply and taken from the constants\n self.bonding_curve = self.create_bonding_curve(commons_percentage=commons_percentage, ragequit_amount=ragequit_amount, opening_price=opening_price, entry_tribute= entry_tribute / 100, exit_tribute= exit_tribute / 100, initial_buy=initial_buy)\n \n #If there is an initial buy, perform it here \n self.steps_table = pd.DataFrame() \n if initial_buy > 0: \n #the buy-in gets saved as \"step zero\"\n self.steps_table = self.steps_table.append(self.generate_outputs_table(bondingCurve= self.bonding_curve, steplist= [[initial_buy, \"wxDai\"]]))\n self.steps_table[\"step\"] = 0\n\n\n #set the current supply to the point where the scenarios are going to happen (if it isn't the launch situation)\n # if it's the launch situation, the supply change from the buy in has already been saved before\n # rounded a bit to make sure it gets triggered when necessary\n adjusted_start_balance = round((TOTAL_HATCH_FUNDING * (1- commons_percentage)), 3)\n if(round(scenario_reserve_balance, 3) != adjusted_start_balance):\n scenario_supply= self.bonding_curve.get_supply(float(scenario_reserve_balance))\n self.bonding_curve.set_new_supply(scenario_supply)\n \n #calculate the scenarios\n self.steps_table = self.steps_table.append(self.generate_outputs_table(bondingCurve= self.bonding_curve, steplist= steplist_parsed))\n self.zoom_graph = zoom_graph\n self.plot_mode = plot_mode\n \n\n def get_data(self):\n\n [min_range, max_range] = self.get_scenario_range(steps_table= self.steps_table, zoom_graph=self.zoom_graph)\n\n clean_figure_data = self.get_data_augmented_bonding_curve(bondingCurve= self.bonding_curve, min_range=min_range, max_range=max_range, plot_mode=self.plot_mode).to_dict(orient='list')\n clean_figure_data['reserveRatio'] = self.bonding_curve.reserve_ratio()\n \n figure_bonding_curve= {\"chartData\": {}}\n figure_milestone_table =self.get_milestone_table(self.bonding_curve) \n \n if self.steps_table.empty:\n figure_bonding_curve['chartData'] = clean_figure_data\n figure_bonding_curve['milestoneTable'] = figure_milestone_table\n return figure_bonding_curve\n else: \n figure_buy_sell_table =self.steps_table.loc[:,[\"step\", \"currentPriceParsed\", \"currentSupplyParsed\",\"amountIn\", \"tributeCollected\", \"amountOut\", \"newPriceParsed\", \"slippage\"]].to_dict(orient='list')\n extended_figure_data = clean_figure_data\n #get single points with full coordinates\n extended_figure_data['singlePoints'] = self.get_single_point_coordinates(self.steps_table)\n #get linspace from every step.\n extended_figure_data['stepLinSpaces'] = self.get_step_linspaces(self.bonding_curve, self.steps_table)\n\n figure_bonding_curve['chartData'] = extended_figure_data\n figure_bonding_curve['stepTable'] = figure_buy_sell_table\n figure_bonding_curve['milestoneTable'] = figure_milestone_table\n\n return figure_bonding_curve\n\n def create_bonding_curve(self, commons_percentage=0.5, ragequit_amount=100, opening_price=3, entry_tribute=0.05, exit_tribute=0.05, initial_buy=0):\n \n initial_supply = TOTAL_INITIAL_TECH_SUPPLY - (ragequit_amount / HATCH_FINAL_TECH_PRICE)\n hatch_funding= TOTAL_HATCH_FUNDING - ragequit_amount - initial_buy\n\n initial_reserve = hatch_funding * (1 - commons_percentage)\n \n bCurve = BondingCurve(initial_reserve, opening_price, initial_supply, entry_tribute, exit_tribute)\n\n return bCurve\n\n def generate_outputs_table(self, bondingCurve, steplist):\n\n column_names = [\n \"step\",\n \"currentPrice\",\n \"currentPriceParsed\",\n \"currentSupply\",\n \"currentSupplyParsed\",\n \"currentBalance\",\n \"currentBalanceParsed\",\n \"amountIn\",\n \"tributeCollected\",\n \"amountOut\",\n \"newPrice\",\n \"newPriceParsed\",\n \"newSupply\",\n \"newSupplyParsed\",\n \"newBalance\",\n \"newBalanceParsed\",\n \"slippage\",\n ]\n outputTable = pd.DataFrame(columns=column_names)\n\n for index, step in enumerate(steplist):\n\n current_supply = float(bondingCurve.current_supply)\n current_supply_parsed = str(format(current_supply, '.2f')) + \" TEC\"\n\n current_price = bondingCurve.get_price(current_supply)\n current_price_parsed = str(format(current_price, '.2f')) + \" wxDAI\"\n #current_price_parsed = str(round(current_price, 2)) + \" wxDAI\"\n\n current_balance = float(bondingCurve.current_balance)\n current_balance_parsed = str(format(current_balance, '.2f')) + \" wxDAI\"\n\n amount_in = step[0]\n token_type = \"wxDAI\" if step[1] == \"wxDai\" else step[1] #to avoid the most obvious error. TO DO: in-depth validation of the steplist...\n \n amount_in_parsed = str(format(amount_in, '.2f')) + \"k \" + str(token_type)\n #amount_in_parsed = str(round(amount_in, 2)) + \"k \" + str(token_type)\n \n amount_out = 0\n amount_out_parsed = \"\"\n new_supply = 0\n tribute_collected = 0\n if token_type == \"wxDAI\":\n # take tribute and buy\n tribute_collected = amount_in * bondingCurve.entry_tribute\n amountAfterTribute = amount_in - tribute_collected\n\n amount_out = bondingCurve.purchase_return(amountAfterTribute)\n amount_out_parsed = str(format(amount_out, '.2f')) + \"k TEC\"\n #amount_out_parsed = str(round(amount_out, 2)) + \"k TEC\"\n tribute_collected_parsed = str(format(tribute_collected, '.2f')) + \"k wxDAI\"\n #tribute_collected_parsed = str(round(tribute_collected, 2)) + \"k wxDAI\"\n\n slippage = (amount_in - tribute_collected)/bondingCurve.get_price(current_supply) - amount_out\n slippage_pct = slippage / ((amount_in - tribute_collected)/bondingCurve.get_price(current_supply))\n slippage_pct = str(format((slippage_pct*100), '.2f')) + \"%\"\n\n new_supply = max(\n 0, current_supply + amount_out\n )\n elif token_type == \"TEC\":\n #this section works, but all the -1 mults are a bit of a mess. \n # sell and take tribute\n amount_in = amount_in * -1 #because we are reducing the supply (burning)\n amountBeforeTribute = bondingCurve.sale_return(amount_in) \n\n tribute_collected = amountBeforeTribute * bondingCurve.exit_tribute #since it is a sale, the number returned is negative\n tribute_collected_parsed = str(format((tribute_collected*-1), '.2f')) + \"k wxDAI\"\n #tribute_collected_parsed = str(round((tribute_collected*-1), 2)) + \"k wxDAI\"\n amount_out = (amountBeforeTribute - tribute_collected) #we leave it negative for the supply calculations down below\n amount_out_parsed = str(format((amount_out*-1), '.2f')) + \"k wxDAI\" \n #amount_out_parsed = str(round((amount_out*-1), 2)) + \"k wxDAI\"\n\n slippage = ((amount_in*(1-bondingCurve.exit_tribute))*bondingCurve.get_price(current_supply) - amount_out) *-1\n slippage_pct = slippage / ((amount_in*(1-bondingCurve.exit_tribute))*bondingCurve.get_price(current_supply)) *-1\n slippage_pct = str(format((slippage_pct*100), '.2f')) + \"%\"\n\n new_supply = max(\n 0, current_supply + bondingCurve.purchase_return(amount_out),\n )\n \n\n new_price = bondingCurve.get_price(new_supply)\n new_price_parsed = str(format(new_price, '.2f')) + \" wxDAI\"\n #new_price_parsed = str(round(new_price, 2)) + \" wxDAI\"\n\n new_supply_parsed = str(format(new_supply, '.2f')) + \" TEC\"\n\n new_balance = bondingCurve.get_balance(new_supply)\n new_balance_parsed = str(format(new_balance, '.2f')) + \" wxDAI\"\n\n # add to Dataframe\n outputTable.loc[len(outputTable.index)] = [\n (index+1),\n current_price,\n current_price_parsed,\n current_supply,\n current_supply_parsed,\n current_balance,\n current_balance_parsed,\n amount_in_parsed,\n tribute_collected_parsed,\n amount_out_parsed,\n new_price,\n new_price_parsed,\n new_supply,\n new_supply_parsed,\n new_balance,\n new_balance_parsed,\n slippage_pct,\n ]\n\n # update current supply and balance \n bondingCurve.set_new_supply(new_supply)\n\n #print(outputTable)\n return outputTable\n\n def get_data_augmented_bonding_curve(self, bondingCurve, min_range, max_range, plot_mode=0):\n \n if plot_mode == 0:\n curve_draw = bondingCurve.curve_over_balance(min_range, max_range)\n elif plot_mode == 1:\n curve_draw = bondingCurve.curve_over_supply(min_range, max_range)\n \n return curve_draw\n\n def get_single_point_coordinates(self, steps_table):\n\n coord_list= []\n \n for index, row in steps_table.iterrows():\n #point = {'x' : row['currentBalance'], 'y': row['currentPrice'], 'pointSupply': row['currentSupply']}\n point = {'x' : row['currentBalance'], 'y': row['currentPrice']}\n coord_list.append(point) \n \n last_row = steps_table.iloc[-1]\n #last_point = {'x' : last_row['newBalance'], 'y': last_row['newPrice'], 'pointSupply': row['newSupply']}\n last_point = {'x' : last_row['newBalance'], 'y': last_row['newPrice']}\n coord_list.append(last_point) \n\n return coord_list\n\n def get_step_linspaces(self, bondingCurve, steps_table):\n\n linspace_list = []\n\n for index, row in steps_table.iterrows():\n lin_step_df = bondingCurve.curve_over_balance(bondingCurve.get_supply(row['currentBalance']), bondingCurve.get_supply(row['newBalance']), steps=100)\n lin_step_df = lin_step_df.rename(columns={\"balanceInThousands\": \"x\", \"price\": \"y\"})\n lin_step = lin_step_df.to_dict(orient='list')\n #print(\"Interval:\" + str(row['currentBalance']) + \" - \" + str(row['newBalance']))\n linspace_list.append(lin_step)\n\n return linspace_list\n\n\n def get_scenario_range(self, steps_table, zoom_graph=0):\n\n if steps_table.empty :\n min_range = 0\n max_range = 500\n else:\n min_range = 0 if zoom_graph == 0 else ( min(steps_table['currentSupply'].min(), steps_table['newSupply'].min()) - 50)\n max_range = steps_table['newSupply'].max() + (200 if zoom_graph == 0 else 50)\n\n\n return [min_range, max_range]\n\n def get_milestone_table(self, bCurve):\n balance_list = [250, 500, 1000, 2000, 3000, 5000, 10000]\n price_list = []\n supply_list = []\n\n for bal in balance_list:\n sup = bCurve.get_supply(bal)\n supply_list.append(sup)\n price_list.append(bCurve.get_price(sup))\n \n #print(balance_list)\n #print(price_list)\n #print(supply_list)\n\n table_data = { \"balance\": balance_list, \"supply\": supply_list, \"price\": price_list}\n\n return table_data\n\n def check_param_validity(self, commons_percentage, ragequit_amount, opening_price, entry_tribute, exit_tribute, initial_buy, scenario_reserve_balance, steplist, zoom_graph, plot_mode):\n if commons_percentage < 0 or commons_percentage > 0.95:\n raise ValueError(\"Error: Invalid Commons Percentage Parameter.\")\n if ragequit_amount < 0:\n raise ValueError(\"Error: Invalid Ragequit Amount Parameter.\")\n if opening_price <=0:\n raise ValueError(\"Error: Invalid Initial Price Parameter.\")\n if entry_tribute < 0 or entry_tribute >= 1:\n raise ValueError(\"Error: Invalid Entry Tribute Parameter.\")\n if exit_tribute < 0 or exit_tribute >= 1:\n raise ValueError(\"Error: Invalid Exit Tribute Parameter.\")\n if initial_buy < 0 or initial_buy > (TOTAL_HATCH_FUNDING - ragequit_amount):\n raise ValueError(\"Error: The Initial Buy is either negative or bigger than the remaining Hatch Funding after Ragequits.\")\n if scenario_reserve_balance <= 0:\n raise ValueError(\"Error: Invalid Hatch Scenario Funding Parameter.\")\n if not isinstance(steplist, list):\n #TO DO: in-depth validation of the steplist\n raise ValueError(\"Error: Invalid Steplist Parameter.\")\n if not (zoom_graph == 0 or zoom_graph == 1):\n raise ValueError(\"Error: Invalid Graph Zoom Parameter.\")\n if not (plot_mode == 0 or plot_mode == 1):\n raise ValueError(\"Error: Invalid Plot Mode Parameter.\")\n \n return True\n\n " ]
[ [ "numpy.linspace", "pandas.DataFrame" ] ]
HugoTessier-lab/SWD
[ "f382166d0231d1bfce50392acefd3634e6d50734" ]
[ "experiment.py" ]
[ "import torch\nfrom utils.parse_arguments import parse_arguments\nimport sys\nfrom utils.datasets import get_dataset\nfrom utils.regularization_and_pruning import Regularization, get_mask_function\nimport math\nimport numpy as np\nfrom utils.checkpoint import Checkpoint\nimport time\n\nif torch.backends.cudnn.enabled:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\nget_mask = None\n\n\ndef display_progress(batch_idx, accuracy, top5, loss, loader, train_or_test, batch_size):\n sys.stdout.write(f'\\r{train_or_test} : ({batch_idx + 1}/{len(loader)}) '\n f'-> top-1 : {round(accuracy / ((1 + batch_idx) * batch_size), 3)}'\n f' top-5 : {round(top5 / ((1 + batch_idx) * batch_size), 3)}'\n f' loss : {round(loss / ((1 + batch_idx) * batch_size), 3)} ')\n\n\ndef apply_mask(model, masks):\n with torch.no_grad():\n for i, parameter in enumerate(model.parameters()):\n parameter.data = parameter.data * masks[i]\n\n\ndef test_model(dataset, model, args):\n model.eval()\n test_loader = dataset['test']\n accuracy = 0\n loss = 0\n top5 = 0\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(test_loader):\n if batch_idx != 0 and args.debug:\n break\n device = 'cuda' if not args.no_cuda else 'cpu'\n data, target = data.to(device), target.to(device)\n output = model(data)\n pred = output.argmax(dim=1, keepdim=True)\n accuracy += pred.eq(target.view_as(pred)).sum().item()\n top5 += accuracy_top5(output, target)\n loss += float(torch.nn.functional.cross_entropy(output, target))\n display_progress(batch_idx, accuracy, top5, loss, test_loader, 'Test', args.test_batch_size)\n return (accuracy / (len(test_loader) * args.test_batch_size),\n top5 / (len(test_loader) * args.test_batch_size),\n loss / (len(test_loader) * args.test_batch_size))\n\n\ndef compute_migration(before, after):\n ingoing = 0\n outgoing = 0\n for b, a in zip(before, after):\n ingoing += int(((b == 0) & (a == 1)).sum())\n outgoing += int(((b == 1) & (a == 0)).sum())\n return ingoing, outgoing\n\n\ndef l2_norm(model):\n norm = 0\n for p in model.parameters():\n norm += float(torch.pow(p, 2).sum())\n return norm\n\n\ndef get_a(batch_idx, current_epoch, max_epoch, dataset_length, args):\n if args.fix_a is not None:\n return args.fix_a\n else:\n max_batch = max_epoch * dataset_length\n current_batch = (current_epoch * dataset_length) + batch_idx\n exponent = math.log(args.a_max / args.a_min) / max_batch\n return args.a_min * math.exp(current_batch * exponent)\n\n\ndef accuracy_top5(output, target):\n result = 0\n for o, t in zip(output, target):\n result += int(t in torch.argsort(o, descending=True)[:5])\n return result\n\n\ndef train_model(checkpoint, args, epochs, dataset, masks=None, soft_pruning=False):\n while epochs[0] <= checkpoint.epoch < epochs[1]:\n if soft_pruning:\n apply_mask(checkpoint.model,\n get_mask(checkpoint.model,\n checkpoint.regularization.get_target() if checkpoint.regularization else args.target))\n if checkpoint.epoch == epochs[0]:\n acc, top5, test_loss = test_model(_dataset, checkpoint.model, args)\n checkpoint.save_results({'epoch': 'before', 'acc': acc, 'top5': top5, 'loss': test_loss,\n 'norm': l2_norm(checkpoint.model),\n 'pruned_param_count': checkpoint.model.compute_params_count(\n args.pruning_type),\n 'pruned_flops_count': checkpoint.model.compute_flops_count()})\n print(f'\\nEpoch {checkpoint.epoch + 1}/{epochs[1]}')\n train_loader = dataset['train']\n\n reg_mask_before = get_mask(checkpoint.model,\n checkpoint.regularization.get_target() if checkpoint.regularization else args.target)\n\n checkpoint.model.train()\n accuracy = 0\n global_loss = 0\n top5 = 0\n begin = None\n for batch_idx, (data, target) in enumerate(train_loader):\n if begin is None:\n begin = time.time()\n if batch_idx != 0 and args.debug:\n break\n if checkpoint.regularization is not None:\n checkpoint.regularization.set_a(get_a(batch_idx, checkpoint.epoch - epochs[0], epochs[1] - epochs[0],\n len(dataset['train']), args))\n if masks:\n apply_mask(checkpoint.model, masks)\n\n device = 'cuda' if not args.no_cuda else 'cpu'\n data, target = data.to(device), target.to(device)\n checkpoint.optimizer.zero_grad()\n output = checkpoint.model(data)\n\n loss = torch.nn.functional.cross_entropy(output, target)\n if checkpoint.regularization:\n if args.wd == 0 and args.mu > 0:\n loss += args.mu * checkpoint.regularization(checkpoint.model)\n else:\n loss += args.wd * checkpoint.regularization(checkpoint.model)\n loss.backward()\n checkpoint.optimizer.step()\n\n pred = output.argmax(dim=1, keepdim=True)\n accuracy += pred.eq(target.view_as(pred)).sum().item()\n top5 += accuracy_top5(output, target)\n with torch.no_grad():\n global_loss += float(loss)\n display_progress(batch_idx, accuracy, top5, global_loss, train_loader, 'Train', args.batch_size)\n\n if masks:\n apply_mask(checkpoint.model, masks)\n\n duration = time.time() - begin\n\n reg_mask_after = get_mask(checkpoint.model,\n checkpoint.regularization.get_target() if checkpoint.regularization else args.target)\n ingoing, outgoing = compute_migration(reg_mask_before, reg_mask_after)\n last_a = get_a(len(dataset['train']) - 1, checkpoint.epoch - epochs[0], epochs[1] - epochs[0],\n len(dataset['train']), args)\n\n sys.stderr.write('\\n')\n acc, top5, test_loss = test_model(dataset, checkpoint.model, args)\n\n checkpoint.save_results({'epoch': checkpoint.epoch, 'acc': acc, 'top5': top5, 'loss': test_loss,\n 'ingoing': ingoing, 'outgoing': outgoing, 'a': last_a,\n 'norm': l2_norm(checkpoint.model),\n 'pruned_param_count': checkpoint.model.compute_params_count(args.pruning_type),\n 'pruned_flops_count': checkpoint.model.compute_flops_count(),\n 'epoch_duration': duration})\n checkpoint.epoch += 1\n checkpoint.scheduler.step()\n checkpoint.save()\n\n\nif __name__ == '__main__':\n arguments = parse_arguments()\n torch.manual_seed(arguments.seed)\n np.random.seed(arguments.seed)\n if arguments.fix_a is None and arguments.reg_type == \"swd\" and arguments.pruning_iterations != 1:\n print('Progressive a is not compatible with iterative pruning')\n raise ValueError\n if arguments.no_ft and arguments.pruning_iterations != 1:\n print(\"You can't specify a pruning_iteration value if there is no fine-tuning at all\")\n raise ValueError\n get_mask = get_mask_function(arguments.pruning_type)\n _dataset = get_dataset(arguments)\n _targets = [int((n + 1) * (arguments.target / arguments.pruning_iterations)) for n in\n range(arguments.pruning_iterations)]\n\n # Train model\n print('Train model !')\n print(f'Regularization with t-{_targets[0]}')\n\n training_model = Checkpoint(arguments, 'training')\n training_model.regularization = Regularization(None, _targets[0], arguments)\n training_model.load()\n train_model(training_model, arguments, [0, arguments.epochs], _dataset, None, soft_pruning=arguments.soft_pruning)\n\n if arguments.lr_rewinding:\n training_model.rewind_lr()\n\n if arguments.no_ft:\n print('\\nPruning model without fine tuning :')\n pruned_model = training_model.clone('pruned')\n pruned_model.load()\n mask = get_mask(pruned_model.model, arguments.target)\n apply_mask(pruned_model.model, mask)\n _acc, _top5, _test_loss = test_model(_dataset, pruned_model.model, arguments)\n pruned_model.save_results({'epoch': 'before', 'acc': _acc, 'top5': _top5, 'loss': _test_loss,\n 'norm': l2_norm(pruned_model.model),\n 'pruned_param_count': pruned_model.model.compute_params_count(\n arguments.pruning_type),\n 'pruned_flops_count': pruned_model.model.compute_flops_count()})\n pruned_model.save()\n last_model = pruned_model\n last_epoch = arguments.epochs\n else:\n fine_tuned_model = training_model\n # Prune and fine-tune model\n for _i, _t in enumerate(_targets):\n print(f'\\n\\nPruning with target {_t}/1000 ({_i + 1}/{len(_targets)}) and fine-tuning model !')\n fine_tuned_model = fine_tuned_model.clone(f'fine_tuning({_i + 1}-{len(_targets)})')\n fine_tuned_model.load()\n mask = get_mask(fine_tuned_model.model, _t)\n\n if _i + 1 != len(_targets):\n regularization = Regularization(None, _targets[_i + 1], arguments)\n print(f'Regularization with t-{_targets[_i + 1]}')\n else:\n print('Final fine-tuning without regularization')\n regularization = None\n fine_tuned_model.regularization = regularization\n train_model(fine_tuned_model, arguments,\n [arguments.epochs + (_i * arguments.ft_epochs),\n arguments.epochs + ((_i + 1) * arguments.ft_epochs)],\n _dataset, mask)\n last_model = fine_tuned_model\n last_epoch = arguments.epochs + (len(_targets) * arguments.ft_epochs)\n\n if arguments.additional_epochs != 0:\n print('\\nAdditional fine-tuning epochs')\n print(last_model.epoch, last_epoch, last_epoch + arguments.additional_epochs)\n last_model = last_model.clone('last_epochs')\n last_model.load()\n last_model.regularization = None\n mask = get_mask(last_model.model, arguments.target)\n train_model(last_model, arguments,\n [last_epoch,\n last_epoch + arguments.additional_epochs],\n _dataset, mask)\n\n print('\\nDone')\n" ]
[ [ "numpy.random.seed", "torch.manual_seed", "torch.nn.functional.cross_entropy", "torch.no_grad", "torch.argsort", "torch.pow" ] ]
lbruand/tensorflow
[ "02159bbe1f87638bb6cde6a6f4aaa2fc0362e53b" ]
[ "tensorflow/python/eager/def_function.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=unidiomatic-typecheck\n\"\"\"Prototype decorator for defining graph functions with eager semantics.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport threading\nimport weakref\nimport six\n\nfrom google.protobuf import text_format as _text_format\nfrom google.protobuf.message import DecodeError\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import function as function_lib\nfrom tensorflow.python.eager import lift_to_graph\nfrom tensorflow.python.framework import func_graph as func_graph_module\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import control_flow_util\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.profiler import trace\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util.tf_export import tf_export\n\nFREQUENT_TRACING_WARNING_MAX_CALL_HISTORY = 10\nFREQUENT_TRACING_WARNING_THRESHOLD = 5\n\n\nclass _CallCounter(object):\n \"\"\"Class keeping track of how many recent calls triggered tracing.\"\"\"\n\n __slots__ = [\"_max_call_history\", \"_calls_per_tracings\", \"call_count\"]\n\n def __init__(self, max_call_history):\n self._max_call_history = max_call_history\n self._calls_per_tracings = []\n self.call_count = 0\n\n def called_with_tracing(self):\n self.call_count += 1\n self._calls_per_tracings.append(1)\n\n while self._calls_per_tracings:\n if self.call_count - self._calls_per_tracings[0] > self._max_call_history:\n self.call_count -= self._calls_per_tracings.pop(0)\n else:\n break\n\n def called_without_tracing(self):\n # We don't count tracing when users load a concrete function directly or\n # call get_concrete_function, so the first call can be not a tracing call.\n if not self._calls_per_tracings:\n self._calls_per_tracings = [0]\n self._calls_per_tracings[-1] += 1\n self.call_count += 1\n\n def get_tracing_count(self):\n return len(self._calls_per_tracings)\n\n\nclass _FrequentTracingDetector(object):\n \"\"\"Class for frequent retracing detection and warning.\"\"\"\n\n __slots__ = [\"_counters\", \"_lock\"]\n\n def __init__(self):\n self._counters = weakref.WeakKeyDictionary() # GUARDED_BY(self._lock)\n self._lock = threading.Lock()\n\n def _get_counter(self, key):\n if key not in self._counters:\n self._counters[key] = _CallCounter(\n FREQUENT_TRACING_WARNING_MAX_CALL_HISTORY)\n return self._counters[key]\n\n def called_without_tracing(self, key):\n with self._lock:\n counter = self._get_counter(key)\n counter.called_without_tracing()\n\n def called_with_tracing(self, key, function_name):\n with self._lock:\n counter = self._get_counter(key)\n counter.called_with_tracing()\n if counter.get_tracing_count() >= FREQUENT_TRACING_WARNING_THRESHOLD:\n logging.warning(\n \"{} out of the last {} calls to {} triggered tf.function \"\n \"retracing. Tracing is expensive and the excessive number of \"\n \"tracings could be due to (1) creating @tf.function repeatedly in \"\n \"a loop, (2) passing tensors with different shapes, (3) passing \"\n \"Python objects instead of tensors. For (1), please define your \"\n \"@tf.function outside of the loop. For (2), @tf.function has \"\n \"experimental_relax_shapes=True option that relaxes argument \"\n \"shapes that can avoid unnecessary retracing. For (3), please \"\n \"refer to \"\n \"https://www.tensorflow.org/guide/function#controlling_retracing\"\n \" and https://www.tensorflow.org/api_docs/python/tf/function for \"\n \" more details.\".format(counter.get_tracing_count(),\n counter.call_count, function_name))\n\n\n_frequent_tracing_detector = _FrequentTracingDetector()\n\n\nclass UnliftedInitializerVariable(resource_variable_ops.UninitializedVariable):\n \"\"\"Variable which does not lift its initializer out of function context.\n\n Instances of this variable, when created, build a graph which runs their\n initializer inside a tf.cond(is_initialized) block.\n\n This can only be created inside a defun called from (eventually) eager\n mode. That is, non-function-building graphs are not supported.\n \"\"\"\n\n def __init__(self,\n initial_value=None,\n trainable=None,\n caching_device=None,\n name=None,\n dtype=None,\n constraint=None,\n add_initializers_to=None,\n lifted_initializer_graph=None,\n synchronization=None,\n aggregation=None,\n shape=None,\n **unused_kwargs):\n \"\"\"Creates a variable.\n\n Args:\n initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called.\n (Note that initializer functions from init_ops.py must first be bound\n to a shape before being used here.)\n trainable: If `True`, GradientTapes automatically watch uses of this\n Variable.\n caching_device: Optional device string or function describing where the\n Variable should be cached for reading. Defaults to the Variable's\n device. If not `None`, caches on another device. Typical use is to\n cache on the device where the Ops using the Variable reside, to\n deduplicate copying through `Switch` and other conditional statements.\n name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n dtype: If set, initial_value will be converted to the given type.\n If None, either the datatype will be kept (if initial_value is\n a Tensor) or float32 will be used (if it is a Python object convertible\n to a Tensor).\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value\n (which must have the same shape). Constraints are not safe to\n use when doing asynchronous distributed training.\n add_initializers_to: if not None and not in legacy graph mode, the\n initializer tensor will be added to this map in addition to adding the\n assignment to the function.\n lifted_initializer_graph: FuncGraph to try to lift initializers to.\n synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses\n when to synchronize.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n shape: (optional) The shape of this variable. If None, the shape of\n `initial_value` will be used. When setting this argument to\n `tf.TensorShape(None)` (representing an unspecified shape), the variable\n can be assigned with values of different shapes.\n\n Raises:\n ValueError: If the initial value is not specified, or does not have a\n shape and `validate_shape` is `True`.\n RuntimeError: If called outside of a function definition.\n \"\"\"\n with ops.init_scope():\n self._in_graph_mode = not context.executing_eagerly()\n if not ops.inside_function():\n # If we've been init_scope()d out of the function definition nothing to do\n # here; we can't really do the capturing or conditional logic.\n resource_variable_ops.ResourceVariable.__init__(\n self, initial_value=initial_value, trainable=trainable,\n caching_device=caching_device, name=name, dtype=dtype,\n constraint=constraint)\n return\n if initial_value is None:\n raise ValueError(\"initial_value must be specified.\")\n init_from_fn = callable(initial_value)\n\n if constraint is not None and not callable(constraint):\n raise ValueError(\"The `constraint` argument must be a callable.\")\n\n with ops.name_scope(name, \"Variable\", []\n if init_from_fn else [initial_value]) as scope_name:\n with ops.name_scope(\"Initializer\"):\n if init_from_fn:\n initial_value = initial_value()\n if isinstance(initial_value, trackable.CheckpointInitialValue):\n self._maybe_initialize_trackable()\n self._update_uid = initial_value.checkpoint_position.restore_uid\n initial_value = initial_value.wrapped_value\n\n initial_value = ops.convert_to_tensor(initial_value,\n name=\"initial_value\", dtype=dtype)\n assert initial_value is not None\n\n # Don't use `shape or initial_value.shape` since TensorShape has\n # overridden `__bool__`.\n if shape is None:\n shape = initial_value.shape\n\n # Use the constructor for UninitializedVariable to start. Outside the name\n # scope so we don't double up the prefix.\n super(UnliftedInitializerVariable, self).__init__(\n trainable=trainable,\n caching_device=caching_device,\n name=name,\n shape=shape,\n dtype=initial_value.dtype,\n constraint=constraint,\n synchronization=synchronization,\n aggregation=aggregation,\n extra_handle_data=initial_value,\n **unused_kwargs)\n\n with ops.name_scope(scope_name):\n if self._in_graph_mode:\n with ops.init_scope():\n outer_graph = ops.get_default_graph()\n func_graph = ops.get_default_graph()\n function_placeholders = (\n func_graph.inputs + func_graph.internal_captures)\n placeholder_ops = set(\n [tensor.op for tensor in function_placeholders])\n lifted_initializer = lift_to_graph.lift_to_graph(\n [initial_value], outer_graph,\n disallowed_placeholders=placeholder_ops)[initial_value]\n with ops.init_scope():\n self._initial_value = lifted_initializer\n with ops.name_scope(\"IsInitialized\"):\n self._is_initialized_op = (\n resource_variable_ops.var_is_initialized_op(self._handle))\n if initial_value is not None:\n with ops.name_scope(\"Assign\") as n, ops.colocate_with(self._handle):\n self._initializer_op = resource_variable_ops.assign_variable_op(\n self._handle, lifted_initializer, name=n)\n elif context.executing_eagerly():\n # In this case, both current scope and init scope are eager.\n # Assign_variable_op will be executed immediately. So we don't need to\n # add it to \"add_initializers_to\" to lift it out.\n with ops.name_scope(\"Assign\") as n, ops.colocate_with(self._handle):\n resource_variable_ops.assign_variable_op(\n self._handle, initial_value, name=n)\n else:\n # Init scope is eager but current scope is graph. We will lift out this\n # variable by addint it into \"add_initializers_to\".\n if add_initializers_to is not None:\n add_initializers_to.append((self, initial_value))\n\n def assign_fn():\n with ops.name_scope(\"Assign\") as n, ops.colocate_with(self._handle):\n resource_variable_ops.assign_variable_op(\n self._handle,\n initial_value,\n name=n)\n # Returning values to keep tf.cond happy.\n return ops.convert_to_tensor(1)\n def not_assign_fn():\n return ops.convert_to_tensor(0)\n # Note: this cond is always guaranteed to run because we're inside a\n # defun which will insert automatic control dependencies. It will only\n # execute assign_fn if lifting failed.\n graph = ops.get_default_graph()\n\n # Capture the handle ahead of time in order to avoid querying the shape\n # of the handle which helps async execution performance\n graph.capture(self._handle, shape=())\n control_flow_ops.cond(\n resource_variable_ops.var_is_initialized_op(self._handle),\n not_assign_fn, assign_fn)\n\n\nRUN_FUNCTIONS_EAGERLY = False\n\n\n@deprecation.deprecated(\n None,\n \"Use `tf.config.run_functions_eagerly` instead of the experimental \"\n \"version.\")\n@tf_export(\"config.experimental_run_functions_eagerly\")\ndef experimental_run_functions_eagerly(run_eagerly):\n \"\"\"Enables / disables eager execution of `tf.function`s.\n\n Calling `tf.config.experimental_run_functions_eagerly(True)` will make all\n invocations of `tf.function` run eagerly instead of running as a traced graph\n function.\n\n This can be useful for debugging or profiling. For example, let's say you\n implemented a simple iterative sqrt function, and you want to collect the\n intermediate values and plot the convergence. Appending the values to a list\n in `@tf.function` normally wouldn't work since it will just record the Tensors\n being traced, not the values. Instead, you can do the following.\n\n >>> ys = []\n >>>\n >>> @tf.function\n ... def sqrt(x):\n ... y = x / 2\n ... d = y\n ... for _ in range(10):\n ... d /= 2\n ... if y * y < x:\n ... y += d\n ... else:\n ... y -= d\n ... ys.append(y.numpy())\n ... return y\n >>>\n >>> tf.config.experimental_run_functions_eagerly(True)\n >>> sqrt(tf.constant(2.))\n <tf.Tensor: shape=(), dtype=float32, numpy=1.4150391>\n >>> ys\n [1.5, 1.25, 1.375, 1.4375, 1.40625, 1.421875, 1.4140625, 1.4179688, 1.4160156,\n 1.4150391]\n >>> tf.config.experimental_run_functions_eagerly(False)\n\n Calling `tf.config.experimental_run_functions_eagerly(False)` will undo this\n behavior.\n\n Note: This flag has no effect on functions passed into tf.data transformations\n as arguments. tf.data functions are never executed eagerly and are always\n executed as a compiled Tensorflow Graph.\n\n Args:\n run_eagerly: Boolean. Whether to run functions eagerly.\n \"\"\"\n return run_functions_eagerly(run_eagerly)\n\n\n@tf_export(\"config.run_functions_eagerly\")\ndef run_functions_eagerly(run_eagerly):\n \"\"\"Enables / disables eager execution of `tf.function`s.\n\n Calling `tf.config.run_functions_eagerly(True)` will make all\n invocations of `tf.function` run eagerly instead of running as a traced graph\n function.\n\n This can be useful for debugging or profiling. For example, let's say you\n implemented a simple iterative sqrt function, and you want to collect the\n intermediate values and plot the convergence. Appending the values to a list\n in `@tf.function` normally wouldn't work since it will just record the Tensors\n being traced, not the values. Instead, you can do the following.\n\n >>> ys = []\n >>>\n >>> @tf.function\n ... def sqrt(x):\n ... y = x / 2\n ... d = y\n ... for _ in range(10):\n ... d /= 2\n ... if y * y < x:\n ... y += d\n ... else:\n ... y -= d\n ... ys.append(y.numpy())\n ... return y\n >>>\n >>> tf.config.run_functions_eagerly(True)\n >>> sqrt(tf.constant(2.))\n <tf.Tensor: shape=(), dtype=float32, numpy=1.4150391>\n >>> ys\n [1.5, 1.25, 1.375, 1.4375, 1.40625, 1.421875, 1.4140625, 1.4179688, 1.4160156,\n 1.4150391]\n >>> tf.config.run_functions_eagerly(False)\n\n Calling `tf.config.run_functions_eagerly(False)` will undo this\n behavior.\n\n Note: This flag has no effect on functions passed into tf.data transformations\n as arguments. tf.data functions are never executed eagerly and are always\n executed as a compiled Tensorflow Graph.\n\n Args:\n run_eagerly: Boolean. Whether to run functions eagerly.\n \"\"\"\n global RUN_FUNCTIONS_EAGERLY\n RUN_FUNCTIONS_EAGERLY = bool(run_eagerly)\n\n\n@deprecation.deprecated(\n None,\n \"Use tf.config.functions_run_eagerly instead of the experimental version.\")\n@tf_export(\"config.experimental_functions_run_eagerly\")\ndef experimental_functions_run_eagerly():\n \"\"\"Returns the value of the `experimental_run_functions_eagerly` setting.\"\"\"\n return functions_run_eagerly()\n\n\n@tf_export(\"config.functions_run_eagerly\")\ndef functions_run_eagerly():\n \"\"\"Returns the value of the `run_functions_eagerly` setting.\"\"\"\n return RUN_FUNCTIONS_EAGERLY\n\n\nclass FunctionDeleter(object):\n\n __slots__ = [\"func_graph\"]\n\n def __init__(self, func_graph):\n self.func_graph = func_graph\n\n def __del__(self):\n try:\n func_graph_module.dismantle_func_graph(self.func_graph)\n except: # pylint: disable=bare-except\n # Note: bare except here because this can be noisy at shutdown time.\n pass\n\n\nclass Function(object):\n \"\"\"Wrapper class for the graph functions defined for a Python function.\n\n See the documentation for `tf.function` for more information on the semantics\n of defined functions.\n\n `Function` is thread-compatible.\n \"\"\"\n\n def __init__(self,\n python_function,\n name,\n input_signature=None,\n autograph=True,\n experimental_implements=None,\n experimental_autograph_options=None,\n experimental_relax_shapes=False,\n experimental_compile=None,\n experimental_follow_type_hints=None):\n \"\"\"Initializes a `Function`.\n\n Args:\n python_function: the function to be wrapped.\n name: the name given to it.\n input_signature: a possibly nested sequence of `TensorSpec` objects\n specifying the input signature of this function. If `None`, a separate\n function is instantiated for each inferred input signature.\n autograph: whether `python_function` should be converted to graph mode.\n See https://www.tensorflow.org/guide/autograph for more information.\n experimental_implements: If provided, contains a name of a \"known\"\n function this implements. For example \"mycompany.my_recurrent_cell\".\n This is stored as an attribute in the serialized representation,\n which can then be detected and manipulated when processing serialized\n graph.\n See\n https://github.com/tensorflow/community/blob/master/rfcs/20190610-standardizing-composite_ops.md\n for details. For an example of utilizing this attribute see:\n https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc\n The code above automatically detects and substitutes function that\n implements \"embedded_matmul\" and allows TFLite to substitute its own\n implementations. For instance, a tensorflow user can use this\n attribute to mark that their function also implements\n `embedded_matmul``` (perhaps more efficiently!)\n by specifying it using this flag.\n\n ```python\n @tf.function(\n experimental_implements=\"lingvo.SimpleEmbeddingLayer.EmbMatmul\")\n def embedding_matmul(a, b):\n # custom implementation here\n ```\n This can either be specified as just the string name of the function or\n a NameAttrList corresponding to a list of key-value attributes\n with the function name. The name of the function will be in the 'name'\n field of the NameAttrList.\n experimental_autograph_options: optional tuple of\n tensorflow.autograph.Feature values. Allows enabling additional\n conversion options when autograph is set to True.\n experimental_relax_shapes: When true, argument shapes may be relaxed to\n avoid unnecessary retracing.\n experimental_compile: If `True`, compiles the function using XLA\n (see https://tensorflow.org/xla). XLA performs compiler optimizations,\n such as fusion, and attempts to emit more efficient code. This may\n drastically improve the performance. If set to `True`,\n the whole function needs to be compilable by XLA, or an\n `errors.InvalidArgumentError` is thrown.\n If `None` (default), compiles the function with XLA when running on TPU\n and goes through the regular function execution path when running on\n other devices.\n If `False`, executes the function in a regular way (graph rewrite\n passes are applied, kernels are dispatched one-by-one by the TensorFlow\n executor). Set this value to `False` when directly running a\n multi-device function on TPUs (e.g. two TPU cores, one TPU core and its\n host CPU).\n experimental_follow_type_hints: See the documentation for `tf.function`.\n\n Raises:\n ValueError: if `input_signature` is not None and the `python_function`'s\n argspec has keyword arguments.\n \"\"\"\n self._lock = threading.Lock()\n self._python_function = python_function\n self._function_spec = function_lib.FunctionSpec.from_function_and_signature(\n python_function,\n input_signature,\n experimental_compile=experimental_compile,\n experimental_follow_type_hints=experimental_follow_type_hints,\n )\n self._implements = experimental_implements\n # If `True`, the function uses the rendezvous of the parent. This is only\n # needed to support code where raw send/recv operations are inserted and\n # when functions are run in graph mode where they may not be inlined.\n self._shared_rendezvous = None\n self._autograph = autograph\n self._experimental_autograph_options = experimental_autograph_options\n self._experimental_relax_shapes = experimental_relax_shapes\n self._experimental_compile = experimental_compile\n if experimental_follow_type_hints is None:\n experimental_follow_type_hints = False\n self._experimental_follow_type_hints = experimental_follow_type_hints\n self._created_variables = None # GUARDED_BY(self._lock)\n self._stateful_fn = None # GUARDED_BY(self._lock)\n self._stateless_fn = None # GUARDED_BY(self._lock)\n self._descriptor_cache = weakref.WeakKeyDictionary()\n self._name = name\n self._input_signature = input_signature\n self._key_for_call_stats = self._get_key_for_call_stats()\n ops._tf_function_api_guage.get_cell().set(True) # pylint: disable=protected-access\n\n def __getstate__(self):\n \"\"\"Custom pickling, to omit unpickleable objects.\"\"\"\n result = self.__dict__.copy()\n del result[\"_lock\"]\n del result[\"_descriptor_cache\"]\n del result[\"_key_for_call_stats\"]\n return result\n\n def __setstate__(self, state):\n \"\"\"Restore from pickled state.\"\"\"\n self.__dict__ = state\n self._lock = threading.Lock()\n self._descriptor_cache = weakref.WeakKeyDictionary()\n self._key_for_call_stats = self._get_key_for_call_stats()\n\n def _get_key_for_call_stats(self):\n \"\"\"Returns key instance to track call stats and retracings.\n\n The key instance a best-effort to preserve global consistency.\n \"\"\"\n target_function = self._python_function\n # `__wrapped__` is a conventional Python attribute that a higher-order\n # function keeps its original function's instance. We also directly use\n # this attribute for dealing with a class method. See\n # `bound_method_wrapper` in `function.py`. If we don't use `__wrapped__`,\n # all class methods will return the same `bound_method_wrapper` instance\n # from this function.\n while hasattr(target_function, \"__wrapped__\"):\n target_function = target_function.__wrapped__\n\n if hasattr(target_function, \"__func__\"):\n target_function = target_function.__func__\n\n if hasattr(target_function, \"__code__\"):\n return target_function.__code__\n\n return self._python_function\n\n def _defun_with_scope(self, scope):\n \"\"\"Creates a defun wrapped inside a variable creator scope.\"\"\"\n\n weak_wrapped_fn = None\n compile_with_xla = self._experimental_compile\n\n def wrapped_fn(*args, **kwds):\n \"\"\"Wraps `self._python_function` in a variable creator scope.\"\"\"\n # We register a variable creator with reduced priority. If an outer\n # variable creator is just modifying keyword arguments to the variable\n # constructor, this will work harmoniously. Since the `scope` registered\n # here actually creates the variable, it taking priority would otherwise\n # ignore the outer creator.\n #\n # If an outer variable creator calls the variable constructor manually,\n # for example creating a MirroredVariable, then they won't call our\n # creator. This means we won't be able to trace the initialization graph,\n # and so variable initializers can't depend on function arguments. This is\n # better than the alternative, tracing the initialization graph but giving\n # the user a variable type they didn't want.\n default_graph = ops.get_default_graph()\n with default_graph._variable_creator_scope(scope, priority=50): # pylint: disable=protected-access\n # __wrapped__ allows AutoGraph to swap in a converted function. We give\n # the function a weak reference to itself to avoid a reference cycle.\n if compile_with_xla and \\\n not control_flow_util.GraphOrParentsInXlaContext(default_graph):\n xla_context = control_flow_ops.XLAControlFlowContext()\n try:\n xla_context.Enter()\n out = weak_wrapped_fn().__wrapped__(*args, **kwds)\n finally:\n xla_context.Exit()\n else:\n out = weak_wrapped_fn().__wrapped__(*args, **kwds)\n return out\n\n weak_wrapped_fn = weakref.ref(wrapped_fn)\n\n return self._defun(tf_decorator.make_decorator(\n self._python_function,\n wrapped_fn))\n\n def _create_implements_attribute(self):\n \"\"\"Creates the attribute value corresponding to IMPLEMENTS_ATTRIBUTE_NAME.\"\"\"\n attributes = {}\n if isinstance(self._implements, str):\n # First check if the IMPLEMENTS_ATTRIBUTE_NAME is specified as a\n # NameAttrList. This is used when apart from the function name being\n # implemented, a list of attributes is also being specified.\n # The attributes are specified as key-value pairs in the NameAttrList\n # of the corresponding AttrValue. The function name will be in the\n # 'name' field of the NameAttrList. Else, it is just a string\n # corresponding to the function name.\n try:\n implements_attr = six.ensure_text(self._implements, \"utf-8\")\n attr_value = attr_value_pb2.AttrValue()\n nameattrlist = attr_value_pb2.NameAttrList()\n _text_format.Merge(implements_attr, nameattrlist)\n attr_value.func.CopyFrom(nameattrlist)\n attributes[function_lib.IMPLEMENTS_ATTRIBUTE_NAME] = attr_value\n except (_text_format.ParseError, DecodeError):\n attributes[function_lib.IMPLEMENTS_ATTRIBUTE_NAME] = self._implements\n return attributes\n\n def _defun(self, fn):\n \"\"\"Returns a defun generated from the input function.\"\"\"\n attributes = {}\n\n if self._implements is not None:\n attributes = self._create_implements_attribute()\n\n share = self._shared_rendezvous\n if share is not None:\n attributes[function_lib.SHARED_RENDEZVOUS_ATTRIBUTE_NAME] = share\n\n if self._experimental_compile is not None:\n attributes.update(_XlaMustCompile=bool(self._experimental_compile))\n if self._experimental_compile:\n attributes.update(_noinline=True)\n if not attributes:\n attributes = None\n return function_lib.defun_with_attributes(\n fn,\n input_signature=self.input_signature,\n attributes=attributes,\n autograph=self._autograph,\n experimental_autograph_options=self._experimental_autograph_options,\n experimental_compile=self._experimental_compile,\n experimental_follow_type_hints=self._experimental_follow_type_hints,\n experimental_relax_shapes=self._experimental_relax_shapes)\n\n def _initialize(self, args, kwds, add_initializers_to=None):\n \"\"\"Initializes, on the first call.\n\n Creates two `Function`s, one that will allow creation of variables\n and one that won't.\n\n Additionally runs a trace for the `Function` that allows creation\n of variables.\n\n Args:\n args: Arguments to the underlying python callable.\n kwds: Keyword arguments to the python callable.\n add_initializers_to: Where to collect variable initializers, if not None.\n \"\"\"\n\n created_variables = []\n lifted_initializer_graph = func_graph_module.FuncGraph(\"initializer\")\n\n def variable_capturing_scope(unused_next_creator, **kwds):\n \"\"\"Creates UnliftedInitializerVariables and saves references to them.\"\"\"\n v = UnliftedInitializerVariable(\n add_initializers_to=add_initializers_to,\n lifted_initializer_graph=lifted_initializer_graph, **kwds)\n created_variables.append(weakref.ref(v))\n return v\n\n self._created_variables = created_variables\n self._stateful_fn = self._defun_with_scope(variable_capturing_scope)\n self._stateful_fn._name = self._name # pylint: disable=protected-access\n # Force the definition of the function for these arguments\n self._lifted_initializer_graph = lifted_initializer_graph\n self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)\n self._concrete_stateful_fn = (\n self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access\n *args, **kwds))\n\n def invalid_creator_scope(*unused_args, **unused_kwds):\n \"\"\"Disables variable creation.\"\"\"\n raise ValueError(\n \"tf.function-decorated function tried to create \"\n \"variables on non-first call.\")\n\n self._stateless_fn = self._defun_with_scope(invalid_creator_scope)\n self._stateless_fn._name = self._name # pylint: disable=protected-access\n\n def _clone(self, python_function):\n \"\"\"Clone the function with different python function.\"\"\"\n f = Function(\n python_function=(self._python_function\n if python_function is None else python_function),\n name=self._name,\n input_signature=self._input_signature,\n autograph=self._autograph,\n experimental_implements=self._implements,\n experimental_autograph_options=self._experimental_autograph_options,\n experimental_relax_shapes=self._experimental_relax_shapes,\n experimental_compile=self._experimental_compile,\n experimental_follow_type_hints=self._experimental_follow_type_hints)\n\n if self._shared_rendezvous:\n f._shared_rendezvous = self._shared_rendezvous # pylint: disable=protected-access\n\n return f\n\n def _decorate(self, decorator):\n \"\"\"Allows the captured Python function to be decorated in place.\n\n This method is only safe to call when the Function has not been called by a\n user. It makes sense to use this method to push a decorator into the\n function rather than wrapping the function in the decorator.\n\n We use this in tf.Module to allow user annotated `tf.functions` to remain as\n `Function` objects but still automatically enter the Module name_scope\n when they are evaluated like all other methods.\n\n Args:\n decorator: A callable accepting a single argument which is the function\n to decorate and returning a callable result.\n\n Raises:\n ValueError: If the function has been called a ValueError is raised.\n \"\"\"\n if self._stateful_fn is not None or self._stateless_fn is not None:\n raise ValueError(\n \"Functions cannot be decorated after they have been traced.\")\n\n self._python_function = decorator(self._python_function)\n self._function_spec = function_lib.FunctionSpec.from_function_and_signature(\n self._python_function, self.input_signature)\n\n def _get_tracing_count(self):\n result = self._stateless_fn.tracing_count if self._stateless_fn else 0\n result += self._stateful_fn.tracing_count if self._stateful_fn else 0\n return result\n\n def __call__(self, *args, **kwds):\n \"\"\"Calls the graph function and warn too frequent tracings.\"\"\"\n if RUN_FUNCTIONS_EAGERLY:\n with trace.Trace(self._name, tf_function_call=\"eager\"):\n return self._python_function(*args, **kwds)\n\n tracing_count = self._get_tracing_count()\n with trace.Trace(self._name) as tm:\n result = self._call(*args, **kwds)\n compiler = \"xla\" if self._experimental_compile else \"nonXla\"\n new_tracing_count = self._get_tracing_count()\n without_tracing = (tracing_count == new_tracing_count)\n execution_mode = \"notTraced\" if without_tracing else \"traced\"\n tm.set_metadata(tf_function_call=execution_mode + \"-\" + compiler,\n tracing_count=new_tracing_count)\n\n if context.executing_eagerly():\n if without_tracing:\n _frequent_tracing_detector.called_without_tracing(\n self._key_for_call_stats)\n else:\n _frequent_tracing_detector.called_with_tracing(self._key_for_call_stats,\n self._python_function)\n\n return result\n\n def _call(self, *args, **kwds):\n \"\"\"Calls the graph function.\"\"\"\n self._lock.acquire()\n if self._created_variables:\n # Release the lock early so that multiple threads can perform the call\n # in parallel.\n self._lock.release()\n # In this case we have created variables on the first call, so we run the\n # defunned version which is guaranteed to never create variables.\n return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable\n elif self._stateful_fn is not None:\n # Release the lock early so that multiple threads can perform the call\n # in parallel.\n self._lock.release()\n # In this case we have not created variables on the first call. So we can\n # run the first trace but we should fail if variables are created.\n results = self._stateful_fn(*args, **kwds)\n if self._created_variables:\n raise ValueError(\"Creating variables on a non-first call to a function\"\n \" decorated with tf.function.\")\n return results\n\n try:\n # This is the first call of __call__, so we have to initialize.\n initializers = []\n self._initialize(args, kwds, add_initializers_to=initializers)\n finally:\n # At this point we know that the initialization is complete (or less\n # interestingly an exception was raised) so we no longer need a lock.\n self._lock.release()\n\n if self._created_variables:\n try:\n # Attempt to initialize variables eagerly and without conds by lifting\n # out initialization graphs. This is the only initialization strategy\n # compatible with XLA at the moment.\n self._initialize_uninitialized_variables(initializers)\n except lift_to_graph.UnliftableError:\n pass # Fall through to cond-based initialization.\n else:\n # Lifting succeeded, so variables are initialized and we can run the\n # stateless function.\n return self._stateless_fn(*args, **kwds)\n else:\n _, _, _, filtered_flat_args = \\\n self._stateful_fn._function_spec.canonicalize_function_inputs( # pylint: disable=protected-access\n *args, **kwds)\n # If we did not create any variables the trace we have is good enough.\n return self._concrete_stateful_fn._call_flat(\n filtered_flat_args, self._concrete_stateful_fn.captured_inputs) # pylint: disable=protected-access\n\n def fn_with_cond(inner_args, inner_kwds, inner_filtered_flat_args):\n \"\"\"Conditionally runs initialization if it's needed.\"\"\"\n condition = True\n for wr in self._created_variables:\n variable = wr()\n if variable is None:\n raise ValueError(\n \"A tf.Variable created inside your tf.function has been\"\n \" garbage-collected. Your code needs to keep Python references\"\n \" to variables created inside `tf.function`s.\\n\"\n \"\\n\"\n \"A common way to raise this error is to create and return a\"\n \" variable only referenced inside your function:\\n\"\n \"\\n\"\n \"@tf.function\\n\"\n \"def f():\\n\"\n \" v = tf.Variable(1.0)\\n\"\n \" return v\\n\"\n \"\\n\"\n \"v = f() # Crashes with this error message!\\n\"\n \"\\n\"\n \"The reason this crashes is that @tf.function annotated\"\n \" function returns a **`tf.Tensor`** with the **value** of the\"\n \" variable when the function is called rather than the\"\n \" variable instance itself. As such there is no code holding a\"\n \" reference to the `v` created inside the function and Python\"\n \" garbage collects it.\\n\"\n \"\\n\"\n \"The simplest way to fix this issue is to create variables\"\n \" outside the function and capture them:\\n\"\n \"\\n\"\n \"v = tf.Variable(1.0)\\n\"\n \"\\n\"\n \"@tf.function\\n\"\n \"def f():\\n\"\n \" return v\\n\"\n \"\\n\"\n \"f() # <tf.Tensor: numpy=1.>\\n\"\n \"v.assign_add(1.)\\n\"\n \"f() # <tf.Tensor: numpy=2.>\")\n condition = math_ops.logical_and(\n condition, resource_variable_ops.var_is_initialized_op(\n variable.handle))\n # We want to call stateless_fn if possible because it avoids recomputing\n # potentially expensive initializers.\n return control_flow_ops.cond(\n condition,\n lambda: self._stateless_fn(*inner_args, **inner_kwds),\n functools.partial(\n self._concrete_stateful_fn._call_flat, # pylint: disable=protected-access\n inner_filtered_flat_args,\n captured_inputs=self._concrete_stateful_fn.captured_inputs))\n\n # We've created variables and are unable to lift the initialization graphs,\n # so we fall back to initializing with conds while running the function.\n canon_args, canon_kwds, _, filtered_flat_args = \\\n self._stateful_fn._function_spec.canonicalize_function_inputs( # pylint: disable=protected-access\n *args, **kwds)\n return function_lib.defun(fn_with_cond)(canon_args, canon_kwds,\n filtered_flat_args)\n\n def experimental_get_compiler_ir(self, *args, **kwargs):\n \"\"\"Returns compiler IR for the compiled function.\n\n This API is intended *only* for debugging as there are no guarantees on\n backwards compatibility of returned IR or the allowed values of `stage`.\n\n Args:\n *args: Arguments used for compilation; same arguments as used for calling\n the function. Need to be eager tensors.\n **kwargs: Keyword arguments used for compilation.\n\n Returns:\n Function callable with the stage at which the compiler IR should be\n serialized. Allowed values for the `stage` are:\n - `hlo`: HLO output after conversion from TF\n (https://www.tensorflow.org/xla/operation_semantics).\n - `optimized_hlo`: HLO after compiler optimizations.\n - `optimized_hlo_dot`: optimized HLO in DOT format suitable for\n Graphviz.\n\n For example, for\n\n ```python\n @tf.function(experimental_compile=True)\n def f(x):\n return x + 1\n\n f.experimental_get_compiler_ir(tf.random.normal([10, 10])(stage='hlo')\n ```\n\n the output is:\n\n ```\n HloModule a_inference_f_13__.9\n\n ENTRY %a_inference_f_13__.9 (arg0.1: f32[10,10]) -> f32[10,10] {\n %arg0.1 = f32[10,10]{1,0} parameter(0), parameter_replication={false}\n %reshape.2 = f32[10,10]{1,0} reshape(f32[10,10]{1,0} %arg0.1)\n %constant.3 = f32[] constant(1)\n %broadcast.4 = f32[10,10]{1,0} broadcast(f32[] %constant.3)\n %add.5 = f32[10,10]{1,0} add(f32[10,10]{1,0} %reshape.2,\n f32[10,10]{1,0} %broadcast.4)\n %reshape.6 = f32[10,10]{1,0} reshape(f32[10,10]{1,0} %add.5)\n %tuple.7 = (f32[10,10]{1,0}) tuple(f32[10,10]{1,0} %reshape.6)\n ROOT %get-tuple-element.8 = f32[10,10]{1,0}\n get-tuple-element((f32[10,10]{1,0}) %tuple.7), index=0\n }\n ```\n\n Raises:\n ValueError: If an invalid `stage` is selected or if applied to a function\n which is not compiled (`experimental_compile=True` is not set).\n TypeError: When called with input in graph mode.\n \"\"\"\n context.ensure_initialized()\n if not self._experimental_compile:\n raise ValueError(\n \"Compiler IR can only be returned for functions marked with \"\n \"experimental_compile=True\")\n\n concrete_fn = self.get_concrete_function(*args, **kwargs)\n fn_name = concrete_fn.name\n\n # pylint: disable=protected-access\n canon_args, _, _, _ = \\\n concrete_fn._function_spec.canonicalize_function_inputs(\n *args, **kwargs)\n\n def compiler_ir_generator(stage='hlo'):\n \"\"\"Returns compiler IR for the given `stage`.\n\n Args:\n stage: Stage at which to return the IR. Allowed values are 'hlo' and\n 'optimized_hlo'.\n \"\"\"\n return context.context().get_compiler_ir(\n stage=stage,\n function_name=fn_name,\n args=list(canon_args) + concrete_fn.captured_inputs)\n\n return compiler_ir_generator\n\n @property\n def python_function(self):\n \"\"\"The python function wrapped in this tf.function.\"\"\"\n return self._python_function\n\n @property\n def input_signature(self):\n return self._function_spec.input_signature\n\n @property\n def function_spec(self):\n return self._function_spec\n\n def pretty_printed_concrete_signatures(self, verbose=True):\n joiner = \"\\n\\n\" if verbose else \"\\n\"\n return joiner.join([\n c.pretty_printed_signature(verbose=verbose)\n for c in self._list_all_concrete_functions()\n ])\n\n def _initialize_uninitialized_variables(self, initializers):\n \"\"\"Make and call a `ConcreteFunction` which initializes variables.\"\"\"\n\n if not initializers:\n return\n\n # Note: using defun here avoids an infinite recursion.\n # Most of the code in this function runs eagerly with init_scope, where\n # autograph is not necessary.\n @function_lib.defun(autograph=False)\n def initialize_variables():\n op_map = object_identity.ObjectIdentityDictionary()\n # Stack all the var_is_initialized values into one tensor and interpret\n # the numpy value. This will reduce the number of RPCs between client and\n # worker in the remote case.\n with ops.init_scope():\n var_is_initialized = []\n for v, _ in initializers:\n var_is_initialized.append(\n resource_variable_ops.var_is_initialized_op(v.handle))\n var_is_initialized = array_ops.stack(var_is_initialized).numpy()\n\n inits = []\n for (v, init), is_initialized in zip(initializers, var_is_initialized):\n with ops.init_scope():\n if is_initialized:\n continue\n inits.append(init)\n\n if inits:\n op_map = lift_to_graph.lift_to_graph(\n inits, ops.get_default_graph(), op_map=op_map)\n for (v, init), is_initialized in zip(initializers, var_is_initialized):\n with ops.init_scope():\n if is_initialized:\n continue\n v.assign(op_map[init], read_value=False)\n\n with ops.init_scope():\n return initialize_variables.get_concrete_function()()\n\n def get_initialization_function(self, *args, **kwargs):\n \"\"\"Returns a `ConcreteFunction` which initializes this function's variables.\n\n Requires that this function hasn't been accessed yet through either calling\n it or calling get_concrete_function. Fails if we cannot build an initializer\n function which does not depend on the concrete values of the inputs to this\n function.\n\n Note that running this function will overwrite any values currently assigned\n to variables, for example restores from a checkpoint.\n\n Args:\n *args: arguments to the underlying python callable.\n **kwargs: keyword arguments to the python callable.\n\n Returns:\n A `ConcreteFunction` object which initializes the variables of this\n function.\n\n Raises:\n RuntimeError: if called after the variables have been initialized.\n \"\"\"\n with self._lock:\n if self._stateful_fn is not None:\n raise RuntimeError(\n \"get_initialization_function cannot be called after the function \"\n \"has been used\")\n # Here we trace the function, collect the initializers, and attempt to\n # extract them and run them eagerly. Fail only if we cannot do so.\n initializers = []\n self._initialize(args, kwargs, add_initializers_to=initializers)\n\n # Note: using defun here avoids an infinite recursion.\n @function_lib.defun\n def initialize_variables():\n for v, init in initializers:\n v.assign(\n lift_to_graph.lift_to_graph([init], ops.get_default_graph())[init],\n read_value=False)\n\n return initialize_variables.get_concrete_function()\n\n def _list_all_concrete_functions(self):\n \"\"\"Returns all concrete functions.\"\"\"\n if self.input_signature is not None:\n self.get_concrete_function()\n concrete_functions = []\n # pylint: disable=protected-access\n if self._stateful_fn:\n concrete_functions.extend(\n self._stateful_fn._function_cache.all_values())\n if self._stateless_fn:\n concrete_functions.extend(\n self._stateless_fn._function_cache.all_values())\n # pylint: enable=protected-access\n return concrete_functions\n\n def _list_all_concrete_functions_for_serialization(self):\n \"\"\"Returns all concrete functions for serialization.\n\n Returns:\n A list of instances of `ConcreteFunction`.\n \"\"\"\n concrete_functions = self._list_all_concrete_functions()\n seen_signatures = []\n for concrete_function in concrete_functions:\n signature = concrete_function.structured_input_signature\n flattened = nest.flatten(signature)\n if any(\n isinstance(arg, func_graph_module.UnknownArgument)\n for arg in flattened):\n logging.info(\"Unsupported signature for serialization: %s.\", signature)\n continue\n equal_to_signature = functools.partial(\n function_lib.is_same_structure, signature, check_values=True)\n if not any(equal_to_signature(s) for s in seen_signatures):\n seen_signatures.append(signature)\n\n # Re-create concrete functions for these signatures. Re-creating ensures\n # that if the cache key has changed, the function will be traced again.\n concrete_functions = []\n for args, kwargs in seen_signatures:\n concrete_functions.append(self.get_concrete_function(*args, **kwargs))\n return concrete_functions\n\n def _get_concrete_function_garbage_collected(self, *args, **kwargs):\n \"\"\"Returns a `ConcreteFunction` specialized to inputs and execution context.\n\n Unlike `get_concrete_function(...)`, the graph will be deleted when the\n returned function is deleted. It's useful to avoid creating a reference\n cycle when you know for sure that the graph will be no longer used without\n the returned function.\n\n Args:\n *args: inputs to specialize on.\n **kwargs: inputs to specialize on.\n\n Returns:\n A TensorFlow function which takes exactly one `tf.Tensor` per argument.\n\n Raises:\n ValueError: if this object has not yet been called on concrete values.\n \"\"\"\n with self._lock:\n if self._stateful_fn is None:\n initializers = []\n self._initialize(args, kwargs, add_initializers_to=initializers)\n self._initialize_uninitialized_variables(initializers)\n\n if self._created_variables:\n # In this case we have created variables on the first call, so we run the\n # defunned version which is guaranteed to never create variables.\n return self._stateless_fn._get_concrete_function_garbage_collected( # pylint: disable=protected-access\n *args, **kwargs)\n elif self._stateful_fn is not None:\n # In this case we have not created variables on the first call. So we can\n # run the first trace but we should fail if variables are created.\n concrete = self._stateful_fn._get_concrete_function_garbage_collected( # pylint: disable=protected-access\n *args, **kwargs)\n if self._created_variables:\n raise ValueError(\"Creating variables on a non-first call to a function\"\n \" decorated with tf.function.\")\n return concrete\n\n def get_concrete_function(self, *args, **kwargs):\n \"\"\"Returns a `ConcreteFunction` specialized to inputs and execution context.\n\n If this `Function` was created with an `input_signature`, `args` and\n `kwargs` may be omitted. With an input signature there is only one\n concrete function associated with this `Function`.\n\n If there is no fixed `input_signature` associated with this\n `Function`, positional and keyword arguments to `get_concrete_function`\n follow the same rules as input signature specification, with `tf.TensorSpec`\n objects describing `tf.Tensor`s which will be passed to the concrete\n function.\n\n Each `tf.Tensor` argument to the concrete function must have a unique name,\n either because it is the only one associated with a named argument of the\n Python function or because an explicit `name=` was passed to its\n `tf.TensorSpec` object. These names become the argument names for the\n concrete function.\n\n Arguments to the concrete function may always be specified as keyword\n arguments, naming the Tensor input. Positional arguments may be used instead\n when each preceding argument to the Python function is a Tensor.\n\n ```python\n @tf.function\n def f(x):\n return x\n\n f_concrete = f.get_concrete_function(tf.TensorSpec([], tf.float64))\n f_concrete(tf.constant(1.))\n f_concrete(x=tf.constant(1.))\n ```\n\n Nested structures containing Tensors may be specified when retrieving\n concrete functions. Structures with multiple Tensors are expanded into\n multiple arguments of the concrete function. Since multiple concrete\n function arguments are associated with one argument to the original\n function, these Tensors must be named explicitly. Tensors in nested\n structures may not be passed using positional arguments when calling the\n concrete function.\n\n ```python\n f_concrete2 = f.get_concrete_function(\n (tf.TensorSpec(None, tf.float64, name=\"first\"),\n tf.TensorSpec([], tf.float32, name=\"second\")))\n # Keyword arguments are required when identifying Tensors in nested\n # structures.\n f_concrete2(first=tf.constant([1.]), second=tf.constant(0.))\n ```\n\n Functions with fixed input signatures have only one concrete function\n associated with them, which can be retrieved without specifying any\n arguments. As before Tensors must have unique names, either inferred from\n the argument names in the original Python function or specified\n explicitly.\n\n ```python\n @tf.function(input_signature=(tf.TensorSpec(None, tf.float32)))\n def f_sig(y):\n return y\n\n f_sig_concrete = f.get_concrete_function()\n f_sig_concrete(tf.constant(1.))\n f_sig_concrete(y=tf.constant(1.))\n ```\n\n Args:\n *args: inputs to specialize on.\n **kwargs: inputs to specialize on.\n\n Returns:\n A TensorFlow function which takes exactly one `tf.Tensor` per argument.\n\n Raises:\n ValueError: if this object has not yet been called on concrete values.\n \"\"\"\n concrete = self._get_concrete_function_garbage_collected(*args, **kwargs)\n concrete._garbage_collector.release() # pylint: disable=protected-access\n return concrete\n\n def __get__(self, instance, owner):\n \"\"\"Makes it possible to defun instance methods.\"\"\"\n del owner\n # `instance` here is the instance that this `Function` was accessed through\n # e.g., for\n #\n # class Foo(object):\n #\n # @function.defun\n # def bar(self):\n # ...\n #\n # foo = Foo()\n # foo.bar() # `foo.bar` is a `Function` instance\n #\n # then `instance` will be `foo` (and `owner` will be `Foo`). We create a\n # new instance of `Function` here to allow different instances each\n # to create variables once, thereby allowing methods to be decorated with\n # tf.function. Keeps a cache to avoid retracing the function every time the\n # descriptor is accessed.\n if instance not in self._descriptor_cache:\n if instance is None:\n return self\n self._descriptor_cache[instance] = (\n function_lib.class_method_to_instance_method(self, instance))\n return self._descriptor_cache[instance]\n\n\n@tf_export(\"function\")\ndef function(func=None,\n input_signature=None,\n autograph=True,\n experimental_implements=None,\n experimental_autograph_options=None,\n experimental_relax_shapes=False,\n experimental_compile=None,\n experimental_follow_type_hints=None):\n \"\"\"Compiles a function into a callable TensorFlow graph.\n\n `tf.function` constructs a callable that executes a TensorFlow graph\n (`tf.Graph`) created by trace-compiling the TensorFlow operations in `func`,\n effectively executing `func` as a TensorFlow graph.\n\n Example usage:\n\n >>> @tf.function\n ... def f(x, y):\n ... return x ** 2 + y\n >>> x = tf.constant([2, 3])\n >>> y = tf.constant([3, -2])\n >>> f(x, y)\n <tf.Tensor: ... numpy=array([7, 7], ...)>\n\n _Features_\n\n `func` may use data-dependent control flow, including `if`, `for`, `while`\n `break`, `continue` and `return` statements:\n\n >>> @tf.function\n ... def f(x):\n ... if tf.reduce_sum(x) > 0:\n ... return x * x\n ... else:\n ... return -x // 2\n >>> f(tf.constant(-2))\n <tf.Tensor: ... numpy=1>\n\n `func`'s closure may include `tf.Tensor` and `tf.Variable` objects:\n\n >>> @tf.function\n ... def f():\n ... return x ** 2 + y\n >>> x = tf.constant([-2, -3])\n >>> y = tf.Variable([3, -2])\n >>> f()\n <tf.Tensor: ... numpy=array([7, 7], ...)>\n\n `func` may also use ops with side effects, such as `tf.print`, `tf.Variable`\n and others:\n\n >>> v = tf.Variable(1)\n >>> @tf.function\n ... def f(x):\n ... for i in tf.range(x):\n ... v.assign_add(i)\n >>> f(3)\n >>> v\n <tf.Variable ... numpy=4>\n\n Important: Any Python side-effects (appending to a list, printing with\n `print`, etc) will only happen once, when `func` is traced. To have\n side-effects executed into your `tf.function` they need to be written\n as TF ops:\n\n >>> l = []\n >>> @tf.function\n ... def f(x):\n ... for i in x:\n ... l.append(i + 1) # Caution! Will only happen once when tracing\n >>> f(tf.constant([1, 2, 3]))\n >>> l\n [<tf.Tensor ...>]\n\n Instead, use TensorFlow collections like `tf.TensorArray`:\n\n >>> @tf.function\n ... def f(x):\n ... ta = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True)\n ... for i in range(len(x)):\n ... ta = ta.write(i, x[i] + 1)\n ... return ta.stack()\n >>> f(tf.constant([1, 2, 3]))\n <tf.Tensor: ..., numpy=array([2, 3, 4], ...)>\n\n _`tf.function` is polymorphic_\n\n Internally, `tf.function` can build more than one graph, to support arguments\n with different data types or shapes, since TensorFlow can build more\n efficient graphs that are specialized on shapes and dtypes. `tf.function`\n also treats any pure Python value as opaque objects, and builds a separate\n graph for each set of Python arguments that it encounters.\n\n To obtain an individual graph, use the `get_concrete_function` method of\n the callable created by `tf.function`. It can be called with the same\n arguments as `func` and returns a special `tf.Graph` object:\n\n >>> @tf.function\n ... def f(x):\n ... return x + 1\n >>> isinstance(f.get_concrete_function(1).graph, tf.Graph)\n True\n\n Caution: Passing python scalars or lists as arguments to `tf.function` will\n always build a new graph. To avoid this, pass numeric arguments as Tensors\n whenever possible:\n\n >>> @tf.function\n ... def f(x):\n ... return tf.abs(x)\n >>> f1 = f.get_concrete_function(1)\n >>> f2 = f.get_concrete_function(2) # Slow - builds new graph\n >>> f1 is f2\n False\n >>> f1 = f.get_concrete_function(tf.constant(1))\n >>> f2 = f.get_concrete_function(tf.constant(2)) # Fast - reuses f1\n >>> f1 is f2\n True\n\n Python numerical arguments should only be used when they take few distinct\n values, such as hyperparameters like the number of layers in a neural network.\n\n _Input signatures_\n\n For Tensor arguments, `tf.function` instantiates a separate graph for every\n unique set of input shapes and datatypes. The example below creates two\n separate graphs, each specialized to a different shape:\n\n >>> @tf.function\n ... def f(x):\n ... return x + 1\n >>> vector = tf.constant([1.0, 1.0])\n >>> matrix = tf.constant([[3.0]])\n >>> f.get_concrete_function(vector) is f.get_concrete_function(matrix)\n False\n\n An \"input signature\" can be optionally provided to `tf.function` to control\n the graphs traced. The input signature specifies the shape and type of each\n Tensor argument to the function using a `tf.TensorSpec` object. More general\n shapes can be used. This is useful to avoid creating multiple graphs when\n Tensors have dynamic shapes. It also restricts the shape and datatype of\n Tensors that can be used:\n\n >>> @tf.function(\n ... input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])\n ... def f(x):\n ... return x + 1\n >>> vector = tf.constant([1.0, 1.0])\n >>> matrix = tf.constant([[3.0]])\n >>> f.get_concrete_function(vector) is f.get_concrete_function(matrix)\n True\n\n _Variables may only be created once_\n\n `tf.function` only allows creating new `tf.Variable` objects when it is called\n for the first time:\n\n >>> class MyModule(tf.Module):\n ... def __init__(self):\n ... self.v = None\n ...\n ... @tf.function\n ... def __call__(self, x):\n ... if self.v is None:\n ... self.v = tf.Variable(tf.ones_like(x))\n ... return self.v * x\n\n In general, it is recommended to create stateful objects like `tf.Variable`\n outside of `tf.function` and passing them as arguments.\n\n _Using type annotations to improve performance_\n\n 'experimental_follow_type_hints` can be used along with type annotations to\n improve performance by reducing the number of expensive graph retracings.\n For example, an argument annotated with `tf.Tensor` is converted to Tensor\n even when the input is a non-Tensor value.\n\n >>> @tf.function(experimental_follow_type_hints=True)\n ... def f_with_hints(x: tf.Tensor):\n ... print('Tracing')\n ... return x\n >>> @tf.function(experimental_follow_type_hints=False)\n ... def f_no_hints(x: tf.Tensor):\n ... print('Tracing')\n ... return x\n >>> f_no_hints(1)\n Tracing\n <tf.Tensor: shape=(), dtype=int32, numpy=1>\n >>> f_no_hints(2)\n Tracing\n <tf.Tensor: shape=(), dtype=int32, numpy=2>\n >>> f_with_hints(1)\n Tracing\n <tf.Tensor: shape=(), dtype=int32, numpy=1>\n >>> f_with_hints(2)\n <tf.Tensor: shape=(), dtype=int32, numpy=2>\n\n Args:\n func: the function to be compiled. If `func` is None, `tf.function` returns\n a decorator that can be invoked with a single argument - `func`. In other\n words, `tf.function(input_signature=...)(func)` is equivalent to\n `tf.function(func, input_signature=...)`. The former can be used as\n decorator.\n input_signature: A possibly nested sequence of `tf.TensorSpec` objects\n specifying the shapes and dtypes of the Tensors that will be supplied to\n this function. If `None`, a separate function is instantiated for each\n inferred input signature. If input_signature is specified, every input to\n `func` must be a `Tensor`, and `func` cannot accept `**kwargs`.\n autograph: Whether autograph should be applied on `func` before tracing a\n graph. Data-dependent control flow requires `autograph=True`. For more\n information, see the [tf.function and AutoGraph guide](\n https://www.tensorflow.org/guide/function).\n experimental_implements: If provided, contains a name of a \"known\" function\n this implements. For example \"mycompany.my_recurrent_cell\".\n This is stored as an attribute in inference function,\n which can then be detected when processing serialized function.\n See [standardizing composite ops](https://github.com/tensorflow/community/blob/master/rfcs/20190610-standardizing-composite_ops.md) # pylint: disable=line-too-long\n for details. For an example of utilizing this attribute see this\n [example](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc)\n The code above automatically detects and substitutes function that\n implements \"embedded_matmul\" and allows TFLite to substitute its own\n implementations. For instance, a tensorflow user can use this\n attribute to mark that their function also implements\n `embedded_matmul` (perhaps more efficiently!)\n by specifying it using this parameter:\n `@tf.function(experimental_implements=\"embedded_matmul\")`\n This can either be specified as just the string name of the function or\n a NameAttrList corresponding to a list of key-value attributes associated\n with the function name. The name of the function will be in the 'name'\n field of the NameAttrList.\n experimental_autograph_options: Optional tuple of\n `tf.autograph.experimental.Feature` values.\n experimental_relax_shapes: When True, `tf.function` may generate fewer,\n graphs that are less specialized on input shapes.\n experimental_compile: If True, the function is always compiled by\n [XLA](https://www.tensorflow.org/xla). XLA may be more efficient in some\n cases (e.g. TPU, XLA_GPU, dense tensor computations).\n experimental_follow_type_hints: When True, the function may use type\n annotations from `func` to optimize the tracing performance. For example,\n arguments annotated with `tf.Tensor` will automatically be converted\n to a Tensor.\n\n Returns:\n If `func` is not None, returns a callable that will execute the compiled\n function (and return zero or more `tf.Tensor` objects).\n If `func` is None, returns a decorator that, when invoked with a single\n `func` argument, returns a callable equivalent to the case above.\n\n Raises:\n ValueError when attempting to use experimental_compile, but XLA support is\n not enabled.\n \"\"\"\n # TODO(mdan): Link to `tf.types` section once published.\n if input_signature is not None:\n function_lib.validate_signature(input_signature)\n if experimental_follow_type_hints is None:\n experimental_follow_type_hints = False\n\n def decorated(inner_function):\n try:\n name = inner_function.__name__\n except AttributeError:\n name = \"function\"\n return tf_decorator.make_decorator(\n inner_function,\n decorator_name=\"tf.function\",\n decorator_func=Function(\n inner_function,\n name,\n input_signature=input_signature,\n autograph=autograph,\n experimental_autograph_options=experimental_autograph_options,\n experimental_relax_shapes=experimental_relax_shapes,\n experimental_compile=experimental_compile,\n experimental_implements=experimental_implements,\n experimental_follow_type_hints=experimental_follow_type_hints))\n\n # This code path is for the `foo = tf.function(foo, ...)` use case\n if func is not None:\n return decorated(func)\n\n # This code path is for the\n #\n # @tf.function(...)\n # def foo(...):\n # ...\n #\n # use case, which is equivalent to `foo = tf.function(...)(foo)`\n return decorated\n" ]
[ [ "tensorflow.python.ops.resource_variable_ops.var_is_initialized_op", "tensorflow.python.util.tf_decorator.make_decorator", "tensorflow.python.eager.context.context", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.eager.context.ensure_initialized", "tensorflow.python.eager.function.class_method_to_instance_method", "tensorflow.python.framework.ops._tf_function_api_guage.get_cell", "tensorflow.python.ops.control_flow_ops.XLAControlFlowContext", "tensorflow.python.framework.ops.inside_function", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.ops.resource_variable_ops.assign_variable_op", "tensorflow.python.ops.resource_variable_ops.ResourceVariable.__init__", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.eager.function.defun_with_attributes", "tensorflow.python.framework.func_graph.FuncGraph", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.eager.function.defun", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.core.framework.attr_value_pb2.AttrValue", "tensorflow.core.framework.attr_value_pb2.NameAttrList", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.util.object_identity.ObjectIdentityDictionary", "tensorflow.python.profiler.trace.Trace", "tensorflow.python.eager.function.FunctionSpec.from_function_and_signature", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.framework.func_graph.dismantle_func_graph", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.eager.lift_to_graph.lift_to_graph", "tensorflow.python.eager.function.validate_signature", "tensorflow.python.ops.control_flow_util.GraphOrParentsInXlaContext", "tensorflow.python.util.nest.flatten" ] ]
Juancarlos1983/Pymop
[ "7b7e789e640126c6d254e86ede5d7f4baad7eaa5" ]
[ "tests/archive/wfg.py" ]
[ "import numpy as np\n\nfrom pymop.problem import Problem\n\n\nclass WFG(Problem):\n def __init__(self, name, n_var, n_obj, k=None):\n Problem.__init__(self)\n self.n_obj = n_obj\n self.n_var = n_var\n self.k = 2 * (self.n_obj - 1) if k is None else k\n self.func = self._evaluate\n self.xl = np.zeros(self.n_var)\n self.xu = np.ones(self.n_var)\n\n # function used to evaluate\n self.func = self._evaluate\n\n # the function pointing to the optproblems implementation\n exec('import optproblems.wfg')\n clazz, = eval('optproblems.wfg.%s' % name),\n self._func = clazz(num_objectives=self.n_obj, num_variables=self.n_var, k=self.k)\n\n def _evaluate(self, x, f):\n for n in range(len(x)):\n z = x[n, :]\n f[n, :] = self._func(z)\n\n def pareto_front(self):\n n_optimal_solution = 1000\n pf = np.zeros((n_optimal_solution, self.n_obj))\n\n s = self._func.get_optimal_solutions(n_optimal_solution)\n for n in range(len(s)):\n pf[n, :] = self._func(s[n].phenome)\n return pf\n\n\nclass WFG1(WFG):\n def __init__(self, n_var=10, n_obj=3, k=None):\n super().__init__(self.__class__.__name__, n_var, n_obj, k=k)\n\n\nclass WFG2(WFG):\n def __init__(self, n_var=10, n_obj=3, k=None):\n super().__init__(self.__class__.__name__, n_var, n_obj, k=k)\n\n\nclass WFG3(WFG):\n def __init__(self, n_var=10, n_obj=3, k=None):\n super().__init__(self.__class__.__name__, n_var, n_obj, k=k)\n\n\nclass WFG4(WFG):\n def __init__(self, n_var=10, n_obj=3, k=None):\n super().__init__(self.__class__.__name__, n_var, n_obj, k=k)\n\n\nclass WFG5(WFG):\n def __init__(self, n_var=10, n_obj=3, k=None):\n super().__init__(self.__class__.__name__, n_var, n_obj, k=k)\n\n\nclass WFG6(WFG):\n def __init__(self, n_var=10, n_obj=3, k=None):\n super().__init__(self.__class__.__name__, n_var, n_obj, k=k)\n\n\nclass WFG7(WFG):\n def __init__(self, n_var=10, n_obj=3, k=None):\n super().__init__(self.__class__.__name__, n_var, n_obj, k=k)\n\n\nclass WFG8(WFG):\n def __init__(self, n_var=10, n_obj=3, k=None):\n super().__init__(self.__class__.__name__, n_var, n_obj, k=k)\n\n\nclass WFG9(WFG):\n def __init__(self, n_var=10, n_obj=3, k=None):\n super().__init__(self.__class__.__name__, n_var, n_obj, k=k)\n\n\nif __name__ == \"__main__\":\n \n import matplotlib.pyplot as plt\n\n problem = WFG1(n_var=12, n_obj=3, k=4)\n pf = problem.pareto_front()\n\n fig = plt.figure()\n\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(pf[:, 0], pf[:, 1], pf[:, 2])\n plt.show()\n" ]
[ [ "numpy.ones", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
LaudateCorpus1/caladrius
[ "9094d7542fb1796a690fed2b8b0083533dcac67e" ]
[ "graph/analysis/heron/io_ratios.py" ]
[ "# Copyright 2018 Twitter, Inc.\n# Licensed under the Apache License, Version 2.0\n# http://www.apache.org/licenses/LICENSE-2.0\n\n\"\"\" This module contains methods for calculating the input output (I/O) ratio\nfor the instances of a given topology. \"\"\"\n\nimport logging\n\nimport datetime as dt\n\nfrom typing import Dict, Union, List, Tuple\n\nimport pandas as pd\nimport numpy as np\n\nfrom caladrius.metrics.heron.client import HeronMetricsClient\nfrom caladrius.graph.gremlin.client import GremlinClient\n\nLOG: logging.Logger = logging.getLogger(__name__)\n\n\ndef get_in_out_components(graph_client: GremlinClient,\n topology_id: str) -> List[str]:\n \"\"\" Gets a list of components that have both incoming and outgoing streams.\n\n Arguments:\n graph_client (GremlinClient): The client instance for the graph\n database.\n topology_id (str): The topology identification string.\n\n Returns:\n A list of component name strings.\n \"\"\"\n\n in_out_comps: List[str] = (graph_client.graph_traversal.V()\n .hasLabel(\"bolt\").has(\"topology_id\",\n topology_id)\n .inE(\"logically_connected\")\n .inV().as_(\"in_out\")\n .outE(\"logically_connected\")\n .select(\"in_out\").by(\"component\")\n .dedup().toList())\n return in_out_comps\n\n\ndef lstsq_io_ratios(metrics_client: HeronMetricsClient,\n graph_client: GremlinClient, topology_id: str,\n cluster: str, environ: str,\n start: dt.datetime, end: dt.datetime, bucket_length: int,\n **kwargs: Union[str, int, float]) -> pd.DataFrame:\n \"\"\" This method will calculate the input/output ratio for each instance in\n the supplied topology using data aggregated from the defined period. The\n method uses least squares regression to calculate a coefficient for each\n input stream into a instance such that the total output amount for a given\n output stream is sum of all input stream arrival amounts times their\n coefficient.\n\n *NOTE*: This method assumes that there is an (approximately) linear\n relationship between the inputs and outputs of a given component.\n\n Arguments:\n metrics_client (HeronMetricsClient): The client instance for the\n metrics database.\n graph_client (GremlinClient): The client instance for the graph\n database.\n topology_id (str): The topology identification string.\n start (dt.datetime): The UTC datetime object for the start of the\n metric gathering period.\n end (dt.datetime): The UTC datetime object for the end of the metric\n gathering period.\n bucket_length (int): The length in seconds that the metrics should\n be aggregated into. *NOTE*: For the least\n squares regression to work the number of\n buckets must exceed the highest number of input\n streams into the component of the topology.\n **kwargs: Additional keyword arguments that will be passed to the\n metrics client object. Consult the documentation for the\n specific metrics client beings used.\n Returns:\n pandas.DataFrame: A DataFrame with the following columns:\n\n * task: Task ID integer.\n * output_stream: The output stream name.\n * input_stream: The input stream name.\n * source_component: The name of the source component for the input\n stream.\n * coefficient: The value of the input amount coefficient for this\n output stream, inputs stream source component combination.\n \"\"\"\n\n LOG.info(\"Calculating instance input/output ratios using least squares \"\n \"regression for topology %s over a %d second window between %s \"\n \"and %s\", topology_id, (end-start).total_seconds(),\n start.isoformat(), end.isoformat())\n\n emit_counts: pd.DataFrame = metrics_client.get_emit_counts(\n topology_id, cluster, environ, start, end, **kwargs)\n\n arrived_tuples: pd.DataFrame = metrics_client.get_tuple_arrivals_at_stmgr(\n topology_id, cluster, environ, start, end, **kwargs)\n\n execute_counts: pd.DataFrame = metrics_client.get_execute_counts(\n topology_id, cluster, environ, start, end, **kwargs)\n\n arrived_tuples = arrived_tuples.merge(execute_counts, on=[\"task\", \"component\", \"container\", \"timestamp\"])\n\n arrived_tuples.drop(\"execute_count\", axis=1, inplace=True)\n # Limit the count DataFrames to only those component with both incoming and\n # outgoing streams\n in_out_comps: List[str] = get_in_out_components(graph_client, topology_id)\n\n emit_counts = emit_counts[emit_counts[\"component\"].isin(in_out_comps)]\n emit_counts.rename(index=str, columns={\"stream\": \"outgoing_stream\"},\n inplace=True)\n\n arrived_tuples = arrived_tuples[arrived_tuples[\"component\"]\n .isin(in_out_comps)]\n arrived_tuples.rename(index=str, columns={\"stream\": \"incoming_stream\"},\n inplace=True)\n # Re-sample the counts into equal length time buckets and group by task id,\n # time bucket and stream. This aligns the two DataFrames with timestamps of\n # equal length and start point so they can be merged later\n emit_counts_ts: pd.DataFrame = \\\n (emit_counts.set_index([\"task\", \"timestamp\"])\n .groupby([pd.Grouper(level=\"task\"),\n pd.Grouper(freq=f\"{bucket_length}S\", level='timestamp'),\n \"component\", \"outgoing_stream\"])\n [\"emit_count\"]\n .sum().reset_index())\n\n arrived_tuples_ts: pd.DataFrame = \\\n (arrived_tuples.set_index([\"task\", \"timestamp\"])\n .groupby([pd.Grouper(level=\"task\"),\n pd.Grouper(freq=f\"{bucket_length}S\", level='timestamp'),\n \"component\", \"incoming_stream\", \"source_component\"])\n [\"num-tuples\"]\n .sum().reset_index())\n\n rows: List[Dict[str, Union[str, float]]] = []\n\n # Now we loop through each component and munge the data until we have an\n # output total for each output stream for each task on the same row (one\n # row per time bucket) as the input total for each input stream\n component: str\n in_data: pd.DataFrame\n for component, in_data in arrived_tuples_ts.groupby([\"component\"]):\n in_stream_counts: pd.DataFrame = \\\n (in_data.set_index([\"task\", \"timestamp\", \"incoming_stream\",\n \"source_component\"])\n [\"num-tuples\"].unstack(level=[\"incoming_stream\",\n \"source_component\"])\n .reset_index())\n\n out_stream_counts: pd.DataFrame = \\\n emit_counts_ts[emit_counts_ts.component == component]\n\n merged: pd.DataFrame = out_stream_counts.merge(in_stream_counts,\n on=[\"task\",\n \"timestamp\"])\n task: int\n out_stream: str\n data: pd.DataFrame\n for (task, out_stream), data in merged.groupby([\"task\",\n \"outgoing_stream\"]):\n\n LOG.debug(\"Processing instance %d output stream %s\", task,\n out_stream)\n\n # Get a series of the output counts for this output stream, these\n # are the dependent variables (b) of the least squares regression\n # a x = b\n output_counts: pd.DataFrame = data.emit_count\n\n # If this instance's component has output stream registered that\n # nothing else subscribes too then the emit count will be zero and\n # we can skip this output stream\n if output_counts.sum() <= 0.0:\n LOG.debug(\"No emissions from instance %d on stream %s, \"\n \"skipping this stream...\", task, out_stream)\n continue\n\n # Get just the input stream counts for each time bucket. This is\n # the coefficients matrix (a) of the least squares regression\n # a x = b\n cols: List[Tuple[str, str]] = data.columns[5:]\n input_counts: pd.DataFrame = data[cols]\n\n coeffs: List[float]\n coeffs, _, _, _ = np.linalg.lstsq(input_counts, output_counts,\n rcond=None)\n i: int\n in_stream: str\n source: str\n for i, (in_stream, source) in enumerate(cols):\n row: Dict[str, Union[str, float]] = {\n \"task\": task,\n \"output_stream\": out_stream,\n \"input_stream\": in_stream,\n \"source_component\": source,\n \"coefficient\": coeffs[i]}\n rows.append(row)\n result = pd.DataFrame(rows)\n\n if result.empty:\n raise Exception(\"lstsq_io_ratios returns an empty dataframe\")\n\n return result\n" ]
[ [ "numpy.linalg.lstsq", "pandas.Grouper", "pandas.DataFrame" ] ]
Mire-Group/GMCORE
[ "d6743305aacfa3051b6ab0a51033b03804347848" ]
[ "tools/calc_errors.py" ]
[ "#!/usr/bin/env python3\n\nfrom netCDF4 import Dataset\nimport numpy as np\nimport sys\n\nf = Dataset(sys.argv[1], 'r')\n\nh0 = f.variables['h'][0,:]\nh1 = f.variables['h'][-1,:]\ncos_lat = np.cos(np.radians(f.variables['lat'][:]))\n\nL1 = np.sum(np.sum(np.abs(h1 - h0), axis=1) * cos_lat) / np.sum(np.sum(np.abs(h0), axis=1) * cos_lat)\nL2 = np.sqrt(np.sum(np.sum((h1 - h0)**2, axis=1) * cos_lat)) / np.sqrt(np.sum(np.sum(h0**2, axis=1) * cos_lat))\nprint(L1, L2)" ]
[ [ "numpy.radians", "numpy.sum", "numpy.abs" ] ]
arbab97/deepcrime
[ "d85b0b30d11cd858655d8ef2519cd54a6af24c3c" ]
[ "test_models/custom_model_orig.py" ]
[ "\nfrom __future__ import print_function\nimport keras, sys\nfrom utils import mutation_utils\nimport os\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.model_selection import train_test_split\n\ndef main(model_name):\n model_location = os.path.join('trained_models', model_name)\n custom_data_path = '/media/rabi/Data/11111/Task 99/deepcrime/datasets/custom_data.csv'\n data = pd.read_csv(custom_data_path)\n (data, test) = train_test_split(data, test_size=0.2)\n data = data.drop(columns=['id'])\n labels = data['diagnosis']\n data.drop(columns=['diagnosis'], inplace=True)\n data = data.iloc[:, 0:29]\n data_x = data\n n = Normalizer()\n data_x = n.fit_transform(data_x)\n map = {'M': 1, 'B': 0}\n labels = labels.map(map)\n test_y = test['diagnosis']\n test = test.drop(columns=['id', 'diagnosis'])\n test = test.iloc[:, 0:29]\n test = n.transform(test)\n test_y = test_y.map(map)\n test_y.head()\n if (not os.path.exists(model_location)):\n epochs = 300\n batch_size = 32\n model = Sequential()\n model.add(Dense(12, input_dim=29, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(5, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss=keras.losses.binary_crossentropy, optimizer=keras.optimizers.Adam(learning_rate=0.001), metrics=['accuracy'])\n mutation_utils.save_original_model_params(model)\n mutation_utils.save_original_fit_params(x=data_x, batch_size=batch_size, epochs=epochs)\n model.fit(data_x, labels, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(test, test_y))\n model.save(os.path.join('trained_models', 'custom_trained.h5'))\n score = model.evaluate(test, test_y, verbose=0)\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n return score\n else:\n graph1 = tf.Graph()\n with graph1.as_default():\n session1 = tf.compat.v1.Session()\n with session1.as_default():\n model = tf.keras.models.load_model(model_location)\n score = model.evaluate(test, test_y, verbose=0)\n print(('score:' + str(score)))\n return score\nif (__name__ == '__main__'):\n score = main('')\n" ]
[ [ "tensorflow.keras.models.load_model", "tensorflow.Graph", "pandas.read_csv", "sklearn.model_selection.train_test_split", "tensorflow.compat.v1.Session", "sklearn.preprocessing.Normalizer" ] ]
trh3/clpipe
[ "e783fccc1ed530ea59c2047d0b8ecb26a2f98204" ]
[ "clpipe/fmri_postprocess.py" ]
[ "import os\nimport glob\nimport click\nimport pandas\nimport nibabel as nib\nfrom .batch_manager import BatchManager, Job\nfrom .config_json_parser import ClpipeConfigParser\nimport json\nfrom pkg_resources import resource_stream, resource_filename\nimport clpipe.postprocutils\nimport numpy\nimport logging\nimport gc\nimport psutil\nimport sys\nfrom .error_handler import exception_handler\n#import nipy.modalities.fmri.hrf\nimport re\n\n@click.command()\n@click.argument('subjects', nargs=-1, required=False, default=None)\n@click.option('-config_file', type=click.Path(exists=True, dir_okay=False, file_okay=True), default=None, help = 'Use a given configuration file. If left blank, uses the default config file, requiring definition of BIDS, working and output directories.')\n@click.option('-target_dir', type=click.Path(exists=True, dir_okay=True, file_okay=False), help='Which fmriprep directory to process. If a configuration file is provided with a BIDS directory, this argument is not necessary. Note, must point to the ``fmriprep`` directory, not its parent directory.')\n@click.option('-target_suffix', help= 'Which file suffix to use. If a configuration file is provided with a target suffix, this argument is not necessary. Defaults to \"preproc_bold.nii.gz\"')\n@click.option('-output_dir', type=click.Path(dir_okay=True, file_okay=False), help = 'Where to put the postprocessed data. If a configuration file is provided with a output directory, this argument is not necessary.')\n@click.option('-output_suffix', help = 'What suffix to append to the postprocessed files. If a configuration file is provided with a output suffix, this argument is not necessary.')\n@click.option('-task', help = 'Which task to postprocess. If left blank, defaults to all tasks.')\n@click.option('-TR', help = 'The TR of the scans. If a config file is not provided, this option is required. If a config file is provided, this information is found from the sidecar jsons.')\n@click.option('-processing_stream', help = 'Optional processing stream selector.')\n@click.option('-log_dir', type=click.Path(dir_okay=True, file_okay=False), help = 'Where to put HPC output files. If not specified, defaults to <outputDir>/batchOutput.')\n@click.option('-beta_series', is_flag = True, default = False, help = \"Flag to activate beta-series correlation correlation. ADVANCED METHOD, refer to the documentation.\")\n@click.option('-submit', is_flag = True, default=False, help = 'Flag to submit commands to the HPC.')\n@click.option('-batch/-single', default=True, help = 'Submit to batch, or run in current session. Mainly used internally.')\n@click.option('-debug', is_flag = True, default=False, help = 'Print detailed processing information and traceback for errors.')\ndef fmri_postprocess(config_file=None, subjects=None, target_dir=None, target_suffix=None, output_dir=None,\n output_suffix=None, log_dir=None,\n submit=False, batch=True, task=None, tr=None, processing_stream = None, debug = False, beta_series = False):\n \"\"\"This command runs an fMRIprep'ed dataset through additional processing, as defined in the configuration file. To run specific subjects, specify their IDs. If no IDs are specified, all subjects are ran.\"\"\"\n if not debug:\n sys.excepthook = exception_handler\n logging.basicConfig(level=logging.INFO)\n else:\n logging.basicConfig(level=logging.DEBUG)\n\n if config_file is None and tr is None:\n raise ValueError('No config file and no specified TR. Please include one.')\n\n config = ClpipeConfigParser()\n config.config_updater(config_file)\n config.setup_postproc(target_dir, target_suffix, output_dir, output_suffix, beta_series,\n log_dir)\n config.validate_config()\n if beta_series:\n raise ValueError(\"At this time, the beta series functionality is no longer working due to incompatibilities between packages.\")\n output_type = 'BetaSeriesOptions'\n else:\n output_type = 'PostProcessingOptions'\n if config_file is None:\n config_file = resource_filename(__name__, \"data/defaultConfig.json\")\n\n alt_proc_toggle = False\n if processing_stream is not None:\n\n processing_stream_config = config.config['ProcessingStreams']\n processing_stream_config = [i for i in processing_stream_config if i['ProcessingStream'] == processing_stream]\n if len(processing_stream_config) == 0:\n raise KeyError('The processing stream you specified was not found.')\n alt_proc_toggle = True\n\n if alt_proc_toggle:\n if beta_series:\n config.update_processing_stream(processing_stream, processing_stream_config[0]['BetaSeriesOptions']['OutputDirectory'],\n processing_stream_config[0]['BetaSeriesOptions']['OutputSuffix'],\n processing_stream_config[0]['BetaSeriesOptions']['LogDirectory'])\n config.config['BetaSeriesOptions'].update(processing_stream_config[0]['BetaSeriesOptions'])\n else:\n config.config['PostProcessingOptions'].update(processing_stream_config[0]['PostProcessingOptions'])\n config.update_processing_stream(processing_stream, processing_stream_config[0]['PostProcessingOptions']['OutputDirectory'],\n processing_stream_config[0]['PostProcessingOptions']['OutputSuffix'],\n processing_stream_config[0]['PostProcessingOptions']['LogDirectory'])\n\n\n\n\n if not subjects:\n subjectstring = \"ALL\"\n sublist = [o.replace('sub-', '') for o in os.listdir(config.config[output_type]['TargetDirectory'])\n if os.path.isdir(os.path.join(config.config[output_type]['TargetDirectory'], o)) and 'sub-' in o]\n else:\n subjectstring = \" , \".join(subjects)\n sublist = subjects\n\n submission_string = '''fmri_postprocess -config_file={config} -target_dir={targetDir} -target_suffix={targetSuffix} ''' \\\n '''-output_dir={outputDir} -output_suffix={outputSuffix} {procstream} -log_dir={logOutputDir} {taskString} {trString} {beta_series} -single {sub}'''\n task_string = \"\"\n tr_string = \"\"\n beta_series_string = \"\"\n if task is not None:\n task_string = '-task='+task\n if tr is not None:\n tr_string = '-tr='+tr\n if beta_series:\n beta_series_string = '-beta_series'\n if processing_stream is not None:\n procstream = \"-processing_stream=\" + processing_stream\n else:\n procstream = \"\"\n if batch:\n config_string = config.config_json_dump(config.config[output_type]['OutputDirectory'], os.path.basename(config_file))\n batch_manager = BatchManager(config.config['BatchConfig'], config.config[output_type]['LogDirectory'])\n batch_manager.update_mem_usage(config.config['PostProcessingOptions']['PostProcessingMemoryUsage'])\n batch_manager.update_time(config.config['PostProcessingOptions']['PostProcessingTimeUsage'])\n batch_manager.update_nthreads(config.config['PostProcessingOptions']['NThreads'])\n batch_manager.update_email(config.config[\"EmailAddress\"])\n for sub in sublist:\n sub_string_temp = submission_string.format(\n config=config_string,\n targetDir=config.config[output_type]['TargetDirectory'],\n targetSuffix=config.config[output_type]['TargetSuffix'],\n outputDir=config.config[output_type]['OutputDirectory'],\n outputSuffix=config.config[output_type]['OutputSuffix'],\n procstream = procstream,\n taskString = task_string,\n trString = tr_string,\n logOutputDir=config.config[output_type]['LogDirectory'],\n beta_series = beta_series_string,\n sub=sub\n )\n if debug:\n sub_string_temp = sub_string_temp + \" -debug\"\n\n batch_manager.addjob(Job(\"PostProcessing\" + sub, sub_string_temp))\n if submit:\n batch_manager.createsubmissionhead()\n batch_manager.compilejobstrings()\n batch_manager.submit_jobs()\n else:\n batch_manager.createsubmissionhead()\n batch_manager.compilejobstrings()\n click.echo(batch_manager.print_jobs())\n else:\n for sub in subjects:\n logging.debug(beta_series)\n logging.info('Running Subject ' + sub)\n _fmri_postprocess_subject(config, sub, task, tr, beta_series)\n\n\ndef _fmri_postprocess_subject(config, subject, task, tr=None, beta_series = False):\n if beta_series:\n output_type = 'BetaSeriesOptions'\n else:\n output_type = 'PostProcessingOptions'\n search_string = os.path.abspath(\n os.path.join(config.config[output_type]['TargetDirectory'], \"sub-\" + subject, \"**\",\n \"*\" + config.config[output_type]['TargetSuffix']))\n\n subject_files = glob.glob(search_string, recursive=True)\n if config.config['PostProcessingOptions'][\"DropCSV\"] is not \"\":\n drop_tps = pandas.read_csv(config.config['PostProcessingOptions'][\"DropCSV\"])\n\n logging.info('Finding Image Files')\n for image in subject_files:\n if task is None or 'task-' + task in image:\n logging.info('Processing ' + image)\n try:\n tps_drop = None\n temp = None\n if config.config['PostProcessingOptions'][\"DropCSV\"] is not \"\":\n temp = drop_tps[drop_tps['file_name'].str.match(os.path.basename(image))]['TR_round']\n if len(temp) is 1:\n tps_drop = int(temp)\n logging.info('Found drop TP info, will remove last ' + str(tps_drop) + ' time points')\n else:\n tps_drop = None\n _fmri_postprocess_image(config, image, task, tr, beta_series, tps_drop)\n except Exception as err:\n logging.exception(err)\n\n\ndef _fmri_postprocess_image(config, file, task = None, tr=None, beta_series = False, drop_tps = None):\n confound_regressors = _find_confounds(config, file)\n output_file_path = _build_output_directory_structure(config, file, beta_series)\n\n if os.path.exists(output_file_path):\n logging.info(\"Output File Exists! Skipping.\")\n return 0\n\n logging.info('Looking for: ' + confound_regressors)\n\n if not os.path.exists(confound_regressors):\n logging.warning('Could not find a confound file for ' + file + \". Moving onto next scan\")\n return\n else:\n logging.info('Found confound regressors')\n confounds, fdts = _regression_prep(config, confound_regressors)\n if drop_tps is not None:\n confounds = confounds.iloc[:(confounds.shape[0]-(drop_tps))]\n logging.info('Removing last ' + str(drop_tps) + ' time points')\n fdts = fdts.iloc[:(fdts.shape[0]-(drop_tps))]\n if tr is None:\n image_json_path = _find_json(config, file)\n with open(os.path.abspath(image_json_path), \"r\") as json_path:\n image_json = json.load(json_path)\n tr = float(image_json['RepetitionTime'])\n logging.info('TR found: ' + str(tr))\n image = nib.load(file)\n data = image.get_fdata()\n data = data.astype(numpy.float32)\n orgImageShape = data.shape\n coordMap = image.affine\n data = data.reshape((numpy.prod(numpy.shape(data)[:-1]), data.shape[-1]))\n data = numpy.transpose(data)\n if drop_tps is not None:\n data = data[0:(data.shape[0]-(drop_tps)), :]\n orgImageShape = list(orgImageShape)\n orgImageShape[3] = data.shape[0]\n orgImageShape = tuple(orgImageShape)\n row_means = data.mean(axis=0)\n data = (data - data.mean(axis=0))\n if not beta_series:\n regress_toggle = config.config['PostProcessingOptions']['Regress']\n\n scrub_toggle = False\n if config.config['PostProcessingOptions']['Scrubbing']:\n logging.debug('Scrubbing Toggle Activated')\n scrub_toggle = True\n scrub_ahead = int(config.config['PostProcessingOptions']['ScrubAhead'])\n scrub_behind = int(config.config['PostProcessingOptions']['ScrubBehind'])\n scrub_contig = int(config.config['PostProcessingOptions']['ScrubContig'])\n fd_thres = float(config.config['PostProcessingOptions']['ScrubFDThreshold'])\n orig_fdts = fdts\n if config.config['PostProcessingOptions']['RespNotchFilter']:\n fdts = _notch_filter_fd(config, confound_regressors, tr, drop_tps)\n\n scrubTargets = clpipe.postprocutils.utils.scrub_setup(fdts, fd_thres, scrub_behind, scrub_ahead, scrub_contig)\n\n hp = float(config.config['PostProcessingOptions']['FilteringHighPass'])\n lp = float(config.config['PostProcessingOptions']['FilteringLowPass'])\n filter_toggle = False\n if hp > 0 or lp > 0:\n logging.info('Filtering Toggle Activated')\n filter_toggle = True\n order = int(config.config['PostProcessingOptions']['FilteringOrder'])\n filt = clpipe.postprocutils.utils.calc_filter(hp, lp, tr, order)\n confounds = clpipe.postprocutils.utils.apply_filter(filt, confounds)\n\n if scrub_toggle and filter_toggle:\n logging.info('Using Spectral Interpolation')\n ofreq = int(config.config['PostProcessingOptions']['OversamplingFreq'])\n hfreq = float(config.config['PostProcessingOptions']['PercentFreqSample'])\n logging.debug('Memory Usage Before Spectral Interpolation:' +str(psutil.virtual_memory().total >> 30) +' GB')\n data = clpipe.postprocutils.spec_interpolate.spec_inter(data, tr, ofreq, scrubTargets, hfreq, binSize=config.config['PostProcessingOptions'][\"SpectralInterpolationBinSize\"])\n\n gc.collect()\n\n logging.debug('Memory Usage After Spectral Interpolation GC:' +str(psutil.virtual_memory().total >> 30) +' GB')\n\n\n\n if filter_toggle:\n logging.info('Filtering Data Now')\n data = clpipe.postprocutils.utils.apply_filter(filt, data)\n if regress_toggle:\n logging.info('Regressing Data Now')\n logging.debug(str(confounds.shape))\n logging.debug(str(data.shape))\n data = clpipe.postprocutils.utils.regress(confounds, data)\n if scrub_toggle:\n logging.info('Scrubbing data Now')\n data = clpipe.postprocutils.utils.scrub_image(data, scrubTargets)\n\n data = (data + row_means)\n\n data = numpy.transpose(data)\n data = data.reshape(orgImageShape)\n data32 = numpy.float32(data)\n out_image = nib.Nifti1Image(data32, coordMap)\n\n output_file_path = _build_output_directory_structure(config, file)\n logging.info('Saving post processed data to ' + output_file_path)\n nib.save(out_image, output_file_path)\n\n if scrub_toggle:\n file_name = os.path.basename(file)\n sans_ext = os.path.splitext(os.path.splitext(file_name)[0])[0]\n toOut = numpy.column_stack([numpy.arange(1, len(scrubTargets) + 1, 1), numpy.asarray(scrubTargets), fdts, orig_fdts])\n logging.info('Saving Scrub Targets to ' + os.path.join(os.path.dirname(output_file_path),\n sans_ext + \"_scrubTargets.csv\"))\n numpy.savetxt(os.path.join(os.path.dirname(output_file_path), sans_ext + \"_scrubTargets.csv\"), toOut,\n delimiter=\",\")\n else:\n beta_series_options = config.config['BetaSeriesOptions']['TaskSpecificOptions']\n\n avail_tasks = [x['Task'] for x in beta_series_options]\n logging.debug(avail_tasks)\n img_task = _find_image_task(file)\n logging.debug(img_task)\n if img_task not in avail_tasks:\n logging.info('Did not find beta series specification for the task ' +img_task+ ' for image ' +file )\n return\n else:\n beta_series_options = beta_series_options[avail_tasks.index(img_task)]\n\n\n hp = float(config.config['BetaSeriesOptions']['FilteringHighPass'])\n lp = float(config.config['BetaSeriesOptions']['FilteringLowPass'])\n\n events_file = _find_events(config, file)\n logging.debug(events_file)\n if os.path.exists(events_file):\n confounds, fdts = _regression_prep(config, confound_regressors, beta_series)\n ntp = len(confounds)\n if tr is None:\n image_json_path = _find_json(config, file)\n with open(os.path.abspath(image_json_path), \"r\") as json_path:\n image_json = json.load(json_path)\n tr = float(image_json['RepetitionTime'])\n filter_toggle = False\n filt = None\n if hp > 0 or lp > 0:\n logging.info('Filtering Toggle Activated')\n filter_toggle = True\n order = int(config.config['BetaSeriesOptions']['FilteringOrder'])\n filt = clpipe.postprocutils.utils.calc_filter(hp, lp, tr, order)\n confounds = clpipe.postprocutils.utils.apply_filter(filt, confounds)\n filt_ev_array, valid_events = _ev_mat_prep(events_file, filt, tr, ntp, beta_series_options)\n\n image = nib.load(file)\n data = image.get_fdata()\n data = data.astype(numpy.float32)\n orgImageShape = data.shape\n coordMap = image.affine\n data = data.reshape((numpy.prod(numpy.shape(data)[:-1]), data.shape[-1]))\n data = numpy.transpose(data)\n data = (data - data.mean(axis=0))\n logging.debug(filt_ev_array)\n beta_image_2d = _beta_series_calc(data, filt_ev_array, confounds)\n beta_series_dims = orgImageShape[:-1]\n beta_series_dims = beta_series_dims + (len(valid_events),)\n beta_3d = beta_image_2d.transpose().reshape(beta_series_dims)\n beta_image = nib.Nifti1Image(beta_3d, coordMap)\n output_file_path = _build_output_directory_structure(config, file, beta_series)\n events_output = os.path.splitext(os.path.splitext(output_file_path)[0])[0] + \"_usedevents.tsv\"\n nib.save(beta_image, output_file_path)\n valid_events.to_csv(events_output, sep = ' ')\n else:\n logging.info(\"Did not find an events file for \" + file)\n return\n\ndef _find_image_task(filename):\n comps = filename.split(\"_\")\n task_comp = [x for x in comps if 'task-' in x][0]\n task_name = task_comp.split('-')[1]\n return task_name\n\n\ndef _ev_mat_prep(event_file, filt, TR, ntp, config_block):\n events = pandas.read_table(event_file)\n #Change back to 'trial_type' once testing is complete\n trial_types = events.loc[:,'trial_type'].tolist()\n logging.debug(trial_types)\n logging.debug(config_block['ExcludeTrialTypes'])\n valid_trials = [ind for ind, x in enumerate(trial_types) if x not in [config_block['ExcludeTrialTypes']]]\n valid_events = events.iloc[valid_trials,:]\n\n timeCourse = numpy.arange(0, 32 + TR / 16.0, (TR / 16.0))\n time_up = numpy.arange(0, TR * ntp, TR / 16.0)\n n_up = len(time_up)\n hrf = 0\n indexSample = numpy.arange(0, TR * ntp / (TR / 16.0), TR / (TR / 16.0))\n indexSample = indexSample.astype(\"int\")\n eventArray = numpy.zeros((ntp, len(valid_trials)))\n for index, row in valid_events.iterrows():\n ev_loop = numpy.zeros(n_up)\n index1 = numpy.logical_and((time_up >= row[\"onset\"]), (time_up <= row[\"onset\"] + row[\"duration\"]))\n ev_loop[index1] = 1\n ev_loop = numpy.convolve(ev_loop, hrf)\n ev_loop = ev_loop[:-(len(hrf) - 1)]\n ev_loop = ev_loop[indexSample]\n eventArray[:, index] = ev_loop\n if filt is not None:\n filt_event_array = clpipe.postprocutils.utils.apply_filter(filt, eventArray)\n else:\n filt_event_array = eventArray\n return filt_event_array, valid_events\n\n\ndef _beta_series_calc(data, filt_ev_mat, filt_confound_mat):\n beta_maker = numpy.zeros(filt_ev_mat.shape)\n logging.debug(beta_maker.shape)\n for index in range(filt_ev_mat.shape[1]):\n logging.debug(filt_ev_mat[:,index].shape)\n logging.debug(filt_confound_mat.shape)\n logging.debug((numpy.sum(filt_ev_mat,1) - filt_ev_mat[:,index]).shape)\n temp_mat = numpy.concatenate([numpy.expand_dims(filt_ev_mat[:,index],1), numpy.expand_dims(numpy.sum(filt_ev_mat,1) - filt_ev_mat[:,index],1), filt_confound_mat],1)\n temp_beta = numpy.linalg.pinv(temp_mat)\n logging.debug(temp_beta.shape)\n beta_maker[:, index] = temp_beta[0,:]\n\n betas = numpy.matmul(beta_maker.transpose(), data)\n return betas\n\n\ndef _regression_prep(config, confound_filepath):\n confounds = pandas.read_table(confound_filepath, dtype=\"float\", na_values=\"n/a\")\n confounds = confounds.fillna(0)\n if len(config.config[\"PostProcessingOptions\"]['Confounds']) > 0:\n cons_re = [re.compile(regex_wildcard(co)) for co in config.config[\"PostProcessingOptions\"]['Confounds']]\n target_cols = []\n for reg in cons_re:\n logging.debug(str([reg.match(col).group() for col in confounds.columns if reg.match(col) is not None]))\n target_cols.extend([reg.match(col).group() for col in confounds.columns if reg.match(col) is not None])\n logging.debug(\"Confound Columns \" + str(target_cols))\n confounds_mat = confounds[target_cols]\n if len(config.config[\"PostProcessingOptions\"]['ConfoundsQuad']) > 0:\n cons_re = [re.compile(regex_wildcard(co)) for co in config.config[\"PostProcessingOptions\"]['ConfoundsQuad']]\n target_cols = []\n for reg in cons_re:\n target_cols.extend(\n [reg.match(col).group() for col in confounds.columns if reg.match(col) is not None])\n logging.debug(\"Quad Columns \" + str(target_cols))\n confounds_quad_mat = confounds[target_cols]\n confounds_quad_mat.rename(columns=lambda x: x + \"_quad\", inplace=True)\n confounds_quad_mat = confounds_quad_mat ** 2\n confounds_mat = pandas.concat([confounds_mat, confounds_quad_mat], axis=1, ignore_index=True)\n logging.debug(str(confounds_mat.shape))\n if len(config.config[\"PostProcessingOptions\"]['ConfoundsDerive']) > 0:\n cons_re = [re.compile(regex_wildcard(co)) for co in config.config[\"PostProcessingOptions\"]['ConfoundsDerive']]\n target_cols = []\n for reg in cons_re:\n target_cols.extend(\n [reg.match(col).group() for col in confounds.columns if reg.match(col) is not None])\n logging.debug(\"Lagged Columns \" + str(target_cols))\n confounds_lagged_mat = confounds[target_cols]\n confounds_lagged_mat.rename(columns=lambda x: x + \"_lagged\", inplace=True)\n confounds_lagged_mat = confounds_lagged_mat.diff()\n confounds_mat = pandas.concat([confounds_mat, confounds_lagged_mat], axis=1, ignore_index=True)\n logging.debug(str(confounds_mat.shape))\n logging.debug(str(confounds_mat.head(5)))\n if len(config.config[\"PostProcessingOptions\"]['ConfoundsQuadDerive']) > 0:\n cons_re = [re.compile(regex_wildcard(co)) for co in\n config.config[\"PostProcessingOptions\"]['ConfoundsQuadDerive']]\n target_cols = []\n for reg in cons_re:\n target_cols.extend(\n [reg.match(col).group() for col in confounds.columns if reg.match(col) is not None])\n logging.debug(\"Quadlagged Columns \" + str(target_cols))\n confounds_qlagged_mat = confounds[target_cols]\n confounds_qlagged_mat = confounds_qlagged_mat.diff()\n confounds_qlagged_mat = confounds_qlagged_mat ** 2\n confounds_qlagged_mat.rename(columns=lambda x: x + \"_qlagged\", inplace=True)\n confounds_mat = pandas.concat([confounds_mat, confounds_qlagged_mat], axis=1, ignore_index=True)\n logging.debug(str(confounds_mat.shape))\n\n fd = confounds[config.config[\"PostProcessingOptions\"][\"ScrubVar\"]]\n confounds_mat = confounds_mat.fillna(0)\n confounds_mat = numpy.asarray(confounds_mat)\n return confounds_mat, fd\n\n\n# Rewrite find json function, use task information to be very specific.\ndef _find_json(config, filepath):\n file_name = os.path.basename(filepath)\n sans_ext = os.path.splitext(os.path.splitext(file_name)[0])[0]\n components = sans_ext.split('_')\n\n jsons = glob.glob(os.path.join(config.config['FMRIPrepOptions']['BIDSDirectory'], '**', '*.json'), recursive=True)\n logging.debug(jsons)\n task = [task_name for task_name in components if \"task-\" in task_name][0]\n logging.debug(task)\n top_level_json = [json for json in jsons if task + \"_bold.json\" in json]\n\n if len(top_level_json) is not 0:\n target_json = top_level_json[0]\n\n sub_level_json = [json for json in jsons if \"_\".join(components[0:2]) + \"_bold.json\" in json]\n logging.debug(\"_\".join(components[0:2]))\n if len(sub_level_json) is not 0:\n target_json = sub_level_json[0]\n\n scan_level_json = [json for json in jsons if \"_\".join(components[0:3]) + \"_bold.json\" in json]\n \n if len(scan_level_json) is not 0:\n target_json = scan_level_json[0]\n else:\n scan_level_json = [json for json in jsons if \"_\".join(components[0:4]) + \"_bold.json\" in json]\n if len(scan_level_json) is not 0:\n target_json = scan_level_json[0]\n else:\n scan_level_json = [json for json in jsons if \"_\".join(components[0:5]) + \"_bold.json\" in json]\n if len(scan_level_json) is not 0:\n target_json = scan_level_json[0]\n logging.debug(\"_\".join(components[0:5]))\n\n logging.debug(target_json)\n return target_json\n\n\n\ndef _find_confounds(config, filepath):\n file_name = os.path.basename(filepath)\n sans_ext = os.path.splitext(os.path.splitext(file_name)[0])[0]\n root_file = sans_ext[:sans_ext.index('space')]\n return os.path.join(os.path.dirname(filepath), root_file + config.config['PostProcessingOptions']['ConfoundSuffix'])\n\n\ndef _find_events(config, filepath):\n session_toggle = False\n if 'ses-' in filepath:\n session_toggle = True\n\n\n file_name = os.path.basename(filepath)\n file_name = os.path.splitext(os.path.splitext(file_name)[0])[0]\n file_components = file_name.split(\"_\")\n\n file_components = [x for x in file_components if 'desc-' not in x]\n file_components = [x for x in file_components if 'space-' not in x]\n file_components = [x for x in file_components if 'bold' not in x]\n sub_comp = [x for x in file_components if 'sub-' in x]\n ses_comp = [x for x in file_components if 'ses-' in x]\n\n\n event_name = '_'.join(file_components)+\"_events.tsv\"\n if session_toggle:\n event_path = os.path.join(config.config['FMRIPrepOptions']['BIDSDirectory'], sub_comp[0], ses_comp[0], 'func', event_name)\n else:\n event_path = os.path.join(config.config['FMRIPrepOptions']['BIDSDirectory'], sub_comp[0], 'func',\n event_name)\n return event_path\n\ndef _build_output_directory_structure(config, filepath, beta_series_toggle = False):\n output_type = 'PostProcessingOptions'\n if beta_series_toggle:\n output_type = 'BetaSeriesOptions'\n logging.debug(output_type)\n\n target_directory = filepath[filepath.find('sub-'):]\n target_directory = os.path.dirname(target_directory)\n target_directory = os.path.join(config.config[output_type]['OutputDirectory'], target_directory)\n logging.debug(target_directory)\n os.makedirs(target_directory, exist_ok=True)\n file_name = os.path.basename(filepath)\n sans_ext = os.path.splitext(os.path.splitext(file_name)[0])[0]\n logging.debug(config.config[output_type]['OutputSuffix'])\n file_name = sans_ext + '_' + config.config[output_type]['OutputSuffix']\n logging.debug(file_name)\n return os.path.join(target_directory, file_name)\n\n\ndef _notch_filter_fd(config, confounds_filepath, tr, drop_tps = None):\n confounds = pandas.read_table(confounds_filepath, dtype=\"float\", na_values=\"n/a\")\n confounds = confounds.fillna(0)\n if drop_tps is not None:\n confounds = confounds.iloc[:(confounds.shape[0]-drop_tps)]\n confounds = numpy.array(confounds[config.config[\"PostProcessingOptions\"][\"MotionVars\"]])\n band = config.config['PostProcessingOptions']['RespNotchFilterBand']\n filt_fd = clpipe.postprocutils.utils.notch_filter(confounds, band, tr)\n return filt_fd\n\ndef regex_wildcard(string):\n return '^'+re.sub(\"\\*\", \".*\", string)+'$'\n" ]
[ [ "numpy.convolve", "pandas.concat", "pandas.read_csv", "numpy.expand_dims", "numpy.logical_and", "numpy.asarray", "numpy.arange", "pandas.read_table", "numpy.linalg.pinv", "numpy.shape", "numpy.float32", "numpy.transpose", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
pimier15/pyMLBasic
[ "11421946ac574362b0371b8af3c277dc7e9dbc0b" ]
[ "pyMLBasic/pyMLBasic/3 SimpleClassifier/RandomForest.py" ]
[ "import numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom Common.LoadIris import LoadIris\n\nforest = RandomForestClassifier(criterion = 'entropy' , n_estimators = 10 , random_state = 0 , n_jobs = 2)\n \n" ]
[ [ "sklearn.ensemble.RandomForestClassifier" ] ]
Covid19-US-Dashboard/COVID19_Dashboard
[ "fa5ef6bdc6fab7f2e5903077b335a78991524a00" ]
[ "python_dash/app.py" ]
[ "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\n\nfrom dash.dependencies import Input, Output, State\nfrom plotly import graph_objs as go\nfrom plotly.graph_objs import *\nfrom datetime import datetime as dt\n\napp = dash.Dash(\n __name__, meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width\"}]\n)\nserver = app.server\n\nstate_list = ['Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California', 'Colorado', 'Connecticut', 'Delaware', \\\n 'Diamond Princess', 'District of Columbia', 'Florida', 'Georgia', 'Grand Princess', 'Guam', 'Hawaii', \n 'Idaho', 'Illinois', 'Indiana', 'Iowa', 'Kansas', 'Kentucky', 'Louisiana', 'Maine', 'Maryland', 'Massachusetts', \\\n 'Michigan', 'Minnesota', 'Mississippi', 'Missouri', 'Montana', 'Nebraska', 'Nevada', 'New Hampshire', 'New Jersey', \\\n 'New Mexico', 'New York', 'North Carolina', 'North Dakota', 'Northern Mariana Islands', 'Ohio', 'Oklahoma', 'Oregon', \\\n 'Pennsylvania', 'Puerto Rico', 'Rhode Island', 'South Carolina', 'South Dakota', 'Tennessee', 'Texas', 'Utah', 'Vermont', \\\n 'Virgin Islands', 'Virginia', 'Washington', 'West Virginia', 'Wisconsin', 'Wyoming']\n\ndef readFile(base_path, file, choice, col_name):\n df_file = pd.read_csv(base_path+file)\n\n df_file = df_file.drop(\n [\"UID\", \"iso2\", \"iso3\", \"code3\", \"FIPS\", \"Country_Region\", \"Combined_Key\"], axis=1)\n if choice == 'Deaths':\n df_file = df_file.drop([\"Population\"], axis=1)\n df_county = df_file.iloc[:, 0:4]\n df_diff = df_file.iloc[:, 4:len(df_file.columns)].diff(axis=1)\n df_diff = df_county.join(df_diff)\n df_file = df_file.melt(id_vars=col_name,\n var_name=\"Date\",\n value_name=choice)\n df_diff = df_diff.melt(id_vars=col_name,\n var_name=\"Date\",\n value_name=choice)\n df_diff = df_diff.rename(columns={choice: \"Daily_\"+choice})\n df_file = df_file.merge(df_diff, on=['Date']+col_name)\n df_file['Date'] = pd.to_datetime(df_file['Date'])\n return df_file\n\ndef readData():\n col_name = [\"Province_State\", \"Admin2\", \"Lat\",\"Long_\"]\n base_path = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/'\n df = readFile( base_path, 'time_series_covid19_confirmed_US.csv', 'Confirmed', col_name)\n df2 = readFile(base_path, 'time_series_covid19_deaths_US.csv', 'Deaths', col_name)\n df = df.merge(df2, on=['Date', 'Province_State', 'Admin2'])\n\n df = df.drop(['Lat_y', 'Long__y'], axis=1)\n df = df.rename(columns={\"Lat_x\": \"Latitude\", \"Long__x\": \"Longitude\"})\n df = df.drop(df[(df.Confirmed == 0) & (df.Deaths == 0)].index)\n df[[\"Confirmed\", \"Deaths\", \"Daily_Confirmed\", \"Daily_Deaths\"]] = \\\n df[[\"Confirmed\", \"Deaths\", \"Daily_Confirmed\", \"Daily_Deaths\"]].fillna(0)\n return df[[\"Province_State\", \"Admin2\", \"Latitude\", \"Longitude\",\"Date\",\"Confirmed\",\"Deaths\",\n \"Daily_Confirmed\", \"Daily_Deaths\"]]\n\n# Plotly mapbox public token\nmapbox_access_token = \"pk.eyJ1IjoicGxvdGx5bWFwYm94IiwiYSI6ImNqdnBvNDMyaTAxYzkzeW5ubWdpZ2VjbmMifQ.TXcBE-xg9BFdV2ocecc_7g\"\n\n# Load location coordinates\ndf_stateLoc = pd.read_csv(\"data/statelatlong.csv\")\n\n# Initialize data frame\ndef loadData():\n df = readData()\n\n df[\"Admin2\"] = df[\"Admin2\"].fillna(df[\"Province_State\"])\n df_state = df.groupby([\"Province_State\", \"Date\"]).sum().reset_index()\n df_state = df_state[[\"Province_State\",\"Date\",\"Confirmed\",\"Deaths\",\"Daily_Confirmed\",\"Daily_Deaths\"]]\n df_state = pd.merge(left=df_state, right=df_stateLoc, how='left', left_on='Province_State', right_on='State')\n df_state = df_state.drop([\"State\"],axis=1)\n return df, df_state\n\ndef getLocMap():\n state_admin2 = [df[df[\"Province_State\"]==i][\"Admin2\"].unique().tolist() for i in state_list]\n state_dictionary = dict(zip(state_list, state_admin2))\n return state_dictionary\n\ndef getCurrentStatus():\n startDate = df['Date'].iloc[0]\n dayData = df[[\"Date\",\"Confirmed\",\"Deaths\"]].groupby([\"Date\"]).sum().reset_index()\n currentData = dayData[-1:]\n currentDate = currentData.iloc[0][\"Date\"]\n currentConfirmed = currentData.iloc[0][\"Confirmed\"]\n currentDeaths = currentData.iloc[0][\"Deaths\"]\n return startDate, dayData, currentDate, currentConfirmed, currentDeaths\n\n# Load options\ndef getDropdownOptions():\n state_options = [{\"label\":i,\"value\":i} for i in state_list]\n state_options = [{\"label\":\"State\",\"value\":\"US\"}]+state_options\n admin2_list = df[\"Admin2\"].unique().tolist()\n admin2_list.sort()\n admin2_options = [{\"label\":i,\"value\":i} for i in admin2_list]\n admin2_options = [{\"label\":\"County\",\"value\":\"N/A\"}] + admin2_options\n return state_options, admin2_options\n\ndf, df_state = loadData()\nstate_dictionary = getLocMap()\nstartDate, dayData, currentDate, currentConfirmed, currentDeaths = getCurrentStatus()\nstate_options, admin2_options = getDropdownOptions()\n\n# Scale\ncaseConfirmedScale = {range(0,1000): 1,\n range(1000,5000): 5,\n range(5000,10000): 10,\n range(10000,50000): 15,\n range(50000,100000): 20,\n range(100000,500000): 25,\n range(500000,1000000): 30,\n range(1000000,5000000): 35\n }\ncaseDeathScale = {range(1, 100): 1,\n range(100,500): 5,\n range(500,1000): 10,\n range(1000,5000): 15,\n range(5000,10000): 20,\n range(10000,50000): 25,\n range(50000,100000): 30,\n range(100000,500000): 35,\n }\n\n# Layout of Dash App\napp.layout = html.Div(\n children=[\n html.Div(\n className=\"row\",\n children=[\n # Column \n html.Div(\n className=\"twelve columns\",\n children=[\n html.H2(\"COVID 19 US Case Map\"),\n ],\n ),\n # Column \n html.Div(\n className=\"four columns\",\n children=[\n html.P(\n \"\"\"Data Source: Johns Hopkins CSSE (Subjected to data structure change).\"\"\"\n ),\n html.P(\"By {}, there are {:,} confirmed cases and {:,} deaths.\"\n .format(currentDate.strftime(\"%Y-%m-%d\"),\n currentConfirmed,\n currentDeaths),\n id=\"summary\"),\n html.Div(\n className=\"div-for-dropdown\",\n children=[\n dcc.DatePickerSingle(\n id=\"date-picker\",\n min_date_allowed=startDate,\n max_date_allowed=currentDate,\n initial_visible_month=currentDate,\n date=currentDate.date(),\n display_format=\"MMMM D, YYYY\",\n style={\"border\": \"0px solid black\"},\n ),\n ],\n ),\n html.P(\"Confirmed: 0\",\n id=\"graphConfirmed\"),\n html.P(\"Deaths: 0\",\n id=\"graphDeaths\"),\n\n # Change to side-by-side for mobile layout\n html.Div(\n className=\"row\",\n children=[\n html.Div(\n className=\"div-for-dropdown\",\n children=[\n # Dropdown to select times\n dcc.Dropdown(\n id=\"case-selector\",\n options=[\n {\n \"label\": \"Confirmed\",\n \"value\": \"Confirmed\",\n },\n {\n \"label\": \"Deaths\",\n \"value\": \"Deaths\",\n } \n ],\n value=\"Confirmed\", \n ),\n # Dropdown to select times\n dcc.RadioItems(\n id=\"area-selector\",\n options=[\n {\n \"label\": \"By County\",\n \"value\": \"County\",\n },\n {\n \"label\": \"By State\",\n \"value\": \"State\",\n }, \n ],\n value=\"County\", \n ), \n ],\n ),\n ],\n ), \n ],\n ),\n \n # Column for app graphs and plots\n html.Div(\n className=\"seven columns\",\n children=[\n dcc.Graph(id=\"map-graph\"), \n ],\n ),\n ],\n ),\n html.Div(\n className=\"row\",\n children=[\n # Column \n html.Div(\n className=\"two columns\",\n children=[\n \n # Change location\n html.Div(\n children=[\n html.Div(\n className=\"twelve columns\",\n children=[\n dcc.Dropdown( \n options=state_options,\n id=\"state-dropdown\",\n placeholder=\"State\"\n ),\n ],\n ),\n html.Div(\n className=\"twelve columns\",\n children=[\n dcc.Dropdown( \n options=admin2_options,\n id=\"county-dropdown\",\n placeholder=\"County\",\n style={\n \"marginBottom\":\"10px\"\n }\n ),\n ],\n ),\n ],\n ),\n html.P(id=\"area-place\"),\n html.P(id=\"area-confirmed\"),\n html.P(id=\"area-deaths\")\n ],\n ),\n html.Div(\n className=\"six columns\",\n children=[\n dcc.Graph(id=\"time-series-plot\"), \n ],\n ),\n html.Div(\n className=\"four columns\",\n children=[\n dcc.Graph(id=\"pie_graph\"), \n ],\n style={\n 'marginLeft':\"0px\"\n }\n ),\n ],\n style={\n 'paddingLeft':'55px',\n 'paddingTop':'20px',\n }\n ),\n dcc.Markdown(\n children=[\n \"Copyright © 2020 Tianning Li. All rights reserved.\"\n ],\n style={\n 'paddingLeft':'55px',\n 'marginBottom':'20px',\n }\n ),\n ]\n)\n\n@app.callback(\n [\n Output(\"area-place\", \"children\"), \n Output(\"area-confirmed\", \"children\"), \n Output(\"area-deaths\", \"children\"), \n ],\n [\n Input(\"state-dropdown\",\"value\"),\n Input(\"county-dropdown\",\"value\"),\n ]\n)\ndef update_data_confirmed(state,county):\n if county != \"N/A\" and county != \"\" and county is not None: \n df_current = df[df[\"Admin2\"] == county]\n current_place = \"Area: {0}\".format(county)\n elif state != \"US\" and state !=\"\" and state is not None:\n df_current = df_state[df_state[\"Province_State\"] == state]\n current_place = \"Area: {0}\".format(state)\n else :\n df_current = df.groupby([\"Date\"]).sum().reset_index()\n current_place = \"Area: {0}\".format(\"US\")\n current_confirmed = \"Confirmed: {:,}\".format(df_current[\"Confirmed\"][-1:].values[0])\n current_deaths = \"Deaths: {:,}\".format(df_current[\"Deaths\"][-1:].values[0])\n return current_place, current_confirmed, current_deaths\n\n# Update county based on state\n@app.callback(\n Output(\"county-dropdown\",\"options\"),\n [\n Input(\"state-dropdown\",\"value\"),\n ]\n)\ndef updateOptions(state):\n if state == \"US\" or state ==\"\" or state is None:\n return admin2_options\n elif state in state_dictionary:\n county_list = state_dictionary[state]\n county_list.sort()\n county_options = [{\"label\":i,\"value\":i} for i in county_list]\n county_options = [{\"label\":\"Select County\",\"value\":\"N/A\"}]+county_options\n return county_options\n\n# Update state based on county\n@app.callback(\n Output(\"state-dropdown\",\"value\"),\n [\n Input(\"county-dropdown\",\"value\"),\n ]\n)\ndef updateState(county):\n for key, value in state_dictionary.items():\n if county in value:\n return key\n\n# Update Map Graph\n@app.callback(\n [\n Output(\"map-graph\", \"figure\"),\n Output(\"graphConfirmed\", \"children\"),\n Output(\"graphDeaths\", \"children\"),\n ],\n [\n Input(\"date-picker\", \"date\"),\n Input(\"case-selector\", \"value\"),\n Input(\"area-selector\", \"value\"),\n Input(\"state-dropdown\",\"value\"),\n Input(\"county-dropdown\",\"value\"),\n ],\n)\ndef update_graph(datePicked,casePicked,areaPicked,state,county):\n zoom = 2.7\n latInitial = 38.72490\n lonInitial = -95.61446\n bearing = 0\n df_area = df\n if areaPicked ==\"County\":\n df_area[\"hovertext\"] = df_area[\"Admin2\"]+\", \"+df_area[\"Province_State\"]\n elif areaPicked == \"State\":\n df_area = df_state\n df_area[\"hovertext\"] = df_area[\"Province_State\"]\n\n if county != \"N/A\" and county != \"\" and county is not None and county != \"Unassigned\": \n zoom = 6\n location = df[[\"Admin2\",\"Latitude\",\"Longitude\"]].drop_duplicates()\n location = location[location[\"Admin2\"] == county]\n latInitial = location['Latitude'].values[0]\n lonInitial = location['Longitude'].values[0]\n elif state != \"US\" and state !=\"\" and state is not None:\n zoom = 4.4\n location = df_stateLoc[df_stateLoc[\"State\"] == state]\n latInitial = location['Latitude'].values[0]\n lonInitial = location['Longitude'].values[0]\n\n df_area = df_area[df_area['Latitude'] != 0]\n date_picked = dt.strptime(datePicked, \"%Y-%m-%d\")\n listCoords = df_area[df_area[\"Date\"]==date_picked]\n d = caseDeathScale if casePicked==\"Deaths\" else caseConfirmedScale\n\n USmap = go.Figure(\n data=[\n # Data for all rides based on date and time\n Scattermapbox(\n lat=listCoords[\"Latitude\"],\n lon=listCoords[\"Longitude\"],\n mode=\"markers\",\n hoverinfo=\"text\",\n text = listCoords[\"hovertext\"]+\": \"+listCoords[casePicked].astype(str),\n marker=dict( \n color= 'red'if casePicked==\"Deaths\" else '#D79913',\n opacity=0.5,\n size=listCoords[casePicked].apply(lambda x: next((v for k, v in d.items() if int(x) in k), 0)).to_numpy(),\n ),\n ), \n ],\n layout=Layout(\n autosize=True,\n height=500,\n margin=go.layout.Margin(l=0, r=35, t=0, b=0),\n showlegend=False,\n mapbox=dict(\n accesstoken=mapbox_access_token,\n center=dict(lat=latInitial, lon=lonInitial), # 38.72490 # -95.61446\n style=\"dark\",\n bearing=bearing,\n zoom=zoom,\n ),\n \n updatemenus=[\n dict(\n buttons=(\n [\n dict(\n args=[\n {\n \"mapbox.zoom\": zoom,\n \"mapbox.center.lon\": \"-95.61446\",\n \"mapbox.center.lat\": \"38.72490\",\n \"mapbox.bearing\": 0,\n \"mapbox.style\": \"dark\",\n }\n ],\n label=\"Reset Zoom\",\n method=\"relayout\",\n )\n ]\n ),\n direction=\"left\",\n pad={\"r\": 0, \"t\": 0, \"b\": 0, \"l\": 0},\n showactive=False,\n type=\"buttons\",\n x=0.45,\n y=0.02,\n xanchor=\"left\",\n yanchor=\"bottom\",\n bgcolor=\"#323130\",\n borderwidth=1,\n bordercolor=\"#6d6d6d\",\n font=dict(color=\"#FFFFFF\"),\n )\n ],\n ),\n )\n dayData = df[[\"Date\",\"Confirmed\",\"Deaths\"]].groupby([\"Date\"]).sum().reset_index()\n day_data = dayData[dayData[\"Date\"]==date_picked]\n confirmedCase = \"Confirmed: {:,}\".format(day_data[\"Confirmed\"][-1:].values[0])\n deathCase = \"Deaths: {:,}\".format(day_data[\"Deaths\"][-1:].values[0])\n return USmap, confirmedCase, deathCase\n\n# Update plot\n@app.callback(\n Output(\"time-series-plot\", \"figure\"),\n [\n Input(\"case-selector\", \"value\"),\n Input(\"state-dropdown\",\"value\"),\n Input(\"county-dropdown\",\"value\"),\n ],\n)\ndef update_plot(casePicked, state,county):\n if county != \"N/A\" and county != \"\" and county is not None: \n df_day = df[df[\"Admin2\"] == county]\n elif state != \"US\" and state !=\"\" and state is not None:\n df_day = df_state[df_state[\"Province_State\"] == state]\n else :\n df_day = df.groupby([\"Date\"]).sum().reset_index()\n \n yVal = df_day[casePicked]\n yVal2 = df_day[\"Daily_\"+casePicked]\n \n fig = go.Figure()\n fig.add_trace(go.Scatter(\n x=df_day[\"Date\"],\n y=yVal,\n name=casePicked,\n line=dict(\n color='red' if casePicked==\"Deaths\" else '#D79913'\n ),\n ),\n )\n fig.add_trace(go.Bar(\n x=df_day[\"Date\"],\n y=yVal2,\n name=\"Daily_\"+casePicked,\n yaxis=\"y2\",\n marker=dict(\n color='red' if \"Daily_\"+casePicked==\"Daily_Deaths\" else '#D79913' \n ),\n ),\n )\n fig.update_layout(\n height=450,\n legend=dict(font=dict(size=10), orientation=\"h\"),\n title=\"Number of {} Cases\".format(casePicked),\n plot_bgcolor=\"#323130\",\n paper_bgcolor=\"#323130\",\n font=dict(family=\"Open Sans, sans-serif\", size=13, color=\"white\"),\n xaxis=dict(rangeslider=dict(visible=True)),\n yaxis=dict(\n zeroline=False,\n gridcolor='#6c6c6c',\n ),\n yaxis2=dict(\n anchor=\"free\",\n overlaying=\"y\",\n side=\"right\",\n position=1,\n showgrid=False,\n zeroline=False,\n ),\n )\n return fig\n\n# Update Pie Graph\n@app.callback(\n Output(\"pie_graph\", \"figure\"),\n [\n Input(\"date-picker\", \"date\"),\n Input(\"case-selector\", \"value\"),\n Input(\"state-dropdown\",\"value\"),\n ],\n)\ndef update_pie(datePicked, casePicked, state):\n df_area = df\n area = \"Province_State\"\n date_picked = dt.strptime(datePicked, \"%Y-%m-%d\")\n df_day = df_area[df_area[\"Date\"]==date_picked]\n\n if state != \"US\" and state !=\"\" and state is not None:\n area = \"Admin2\"\n df_day = df_day[df_day[\"Province_State\"]==state]\n\n aggregate = df_day.groupby([area]).sum().reset_index()\n aggregate_top = aggregate[[area, casePicked]].nlargest(10, casePicked)\n other = aggregate[casePicked].sum() - aggregate_top[casePicked].sum()\n aggregate_top = aggregate_top.append({area: 'Other',casePicked: other}, ignore_index=True)\n data = [\n dict(\n type=\"pie\",\n labels=aggregate_top[area].tolist(),\n values=aggregate_top[casePicked],\n name=\"Case Breakdown\",\n hoverinfo=\"label+text+value+percent\",\n textinfo=\"label+percent+name\",\n hole=0.5,\n marker=dict(colors=aggregate_top[area].tolist()),\n ),\n ]\n layout_pie = dict(\n autosize=True,\n automargin=True,\n margin=dict(l=30, r=30, b=20, t=40),\n hovermode=\"closest\",\n plot_bgcolor=\"#323130\",\n paper_bgcolor=\"#323130\",\n font=dict(family=\"Open Sans, sans-serif\", size=10, color=\"white\"),\n legend=dict(\n font=dict(color=\"#CCCCCC\", size=\"10\"), orientation=\"v\", bgcolor=\"rgba(0,0,0,0)\"\n ),\n title=\" {} Cases Summary\".format(casePicked),\n )\n\n figure = dict(data=data, layout=layout_pie)\n return figure\n\n\nif __name__ == \"__main__\":\n app.run_server()\n\n\n\n\n\n" ]
[ [ "pandas.merge", "pandas.read_csv", "pandas.to_datetime" ] ]
mathigatti/theBeautyFormula
[ "b5d9cf73af8ca36d3172311aa8e24126116a531f" ]
[ "number2automata.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nON = 255\nOFF = 0\nN = 250\n\ndef rule(neighbours,rules):\n if neighbours in rules:\n return ON\n else:\n return OFF\n\ndef update(grid,rules):\n newGrid = grid.copy()\n for i in range(1,N-1):\n for j in range(1,N-1):\n newGrid[i,j] = rule((grid[i-1,j],grid[i,j+1],grid[i+1,j],grid[i,j-1]),rules)\n\n return newGrid\n\ndef toTupla(n):\n neighbours = []\n n = \"{0:b}\".format(n).zfill(4)\n for c in n:\n if c == \"0\":\n neighbours.append(OFF)\n else:\n neighbours.append(ON)\n return tuple(neighbours)\n\ndef toNumber(tupla):\n n = ''\n for t in tupla:\n if t == ON:\n n += \"1\"\n else:\n n += \"0\"\n return int(n,2)\n\ndef generate(n):\n tuplas = []\n i = 0\n for c in n[::-1]:\n if c == \"1\":\n tuplas.append(toTupla(i))\n i+=1\n\n return tuplas\n\ndef numberToAutomata(number, steps):\n grid = np.random.choice([ON, OFF], N*N, p=[0.5, 0.5]).reshape(N, N)\n\n index = \"{0:b}\".format(number).zfill(256)\n rules = generate(index)\n\n for step_index in range(steps):\n grid = update(grid,rules)\n\n npGrid = np.uint8(np.array(grid))\n image = Image.fromarray(npGrid)\n\n filename = 'images/'+str(number)+'.gif'\n image.save(filename, format='GIF', save_all=True, duration=0, loop=0)\n\n filename = 'images/'+str(number)+'.tiff'\n image.save(filename, format='TIFF')" ]
[ [ "numpy.array", "numpy.random.choice" ] ]
mincloud1501/Python
[ "d6bbd7e22c8b8ff83217199b8af178f07b4cd724" ]
[ "Data_Analytics_Pandas/gonggongInfoAnalysis2.py" ]
[ "import os\nimport sys\nimport urllib.request\nimport datetime\nimport time\nimport json\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom matplotlib import font_manager, rc\n\n# https://www.data.go.kr/\n# 출입국 관광 통계 서비스\ndef get_request_url(url):\n req = urllib.request.Request(url)\n\n try:\n response = urllib.request.urlopen(req)\n if response.getcode() == 200:\n #print(\"[%s] Url Request Success\" % datetime.datetime.now())\n return response.read().decode('utf-8')\n except Exception as e:\n print(e)\n print(\"[%s] Error for URL : %s\" % (datetime.datetime.now(), url))\n return None\n\n\ndef getNatVisitor(yyyymm, nat_cd, ed_cd):\n end_point = \"http://openapi.tour.go.kr/openapi/service/EdrcntTourismStatsService/getEdrcntTourismStatsList\"\n\n parameters = \"?_type=json&serviceKey=\" + access_key\n parameters += \"&YM=\" + yyyymm\n parameters += \"&NAT_CD=\" + nat_cd # 세자리 국가 코드\n parameters += \"&ED_CD=\" + ed_cd # D: 국민 해외 관광객, E: 방한 외래 관광객\n\n url = end_point + parameters\n\n retData = get_request_url(url)\n\n if (retData == None):\n return None\n else:\n return json.loads(retData)\n\n\ndef main():\n jsonResult = []\n\n # 중국:112, 일본:130, 미국:275, 영국:316\n national_code = \"275\"\n ed_cd = \"E\"\n\n nStartYear = 2011\n nEndYear = 2016\n\n for year in range(nStartYear, nEndYear):\n for month in range(1, 13):\n\n yyyymm = \"{0}{1:0>2}\".format(str(year), str(month))\n\n jsonData = getNatVisitor(yyyymm, national_code, ed_cd)\n\n if (jsonData['response']['header']['resultMsg'] == 'OK'):\n krName = jsonData['response']['body']['items']['item'][\"natKorNm\"]\n krName = krName.replace(' ', '')\n iTotalVisit = jsonData['response']['body']['items']['item'][\"num\"]\n print('%s_%s : %s' % (krName, yyyymm, iTotalVisit))\n jsonResult.append({'nat_name': krName, 'nat_cd': national_code,\n 'yyyymm': yyyymm, 'visit_cnt': iTotalVisit})\n\n cnVisit = []\n VisitYM = []\n index = []\n i = 0\n for item in jsonResult:\n index.append(i)\n cnVisit.append(item['visit_cnt'])\n VisitYM.append(item['yyyymm'])\n i = i + 1\n\n with open('%s(%s)_해외방문객정보_%d_%d.json' % (krName, national_code, nStartYear, nEndYear - 1), 'w',\n encoding='utf8') as outfile:\n retJson = json.dumps(jsonResult,\n indent=4, sort_keys=True,\n ensure_ascii=False)\n outfile.write(retJson)\n\n font_location = \"c:/Windows/fonts/malgun.ttf\"\n font_name = font_manager.FontProperties(fname=font_location).get_name()\n matplotlib.rc('font', family=font_name)\n\n plt.xticks(index, VisitYM)\n plt.plot(index, cnVisit)\n plt.xlabel('방문월')\n plt.ylabel('방문객수')\n plt.grid(True)\n plt.show()\n\n\nif __name__ == '__main__':\n main()" ]
[ [ "matplotlib.font_manager.FontProperties", "matplotlib.pyplot.plot", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.rc", "matplotlib.pyplot.ylabel" ] ]
shapedbyiris/audio-super-res
[ "f69a9dbdd4452dc5521ae383ead5f71d08a84664" ]
[ "src/models/audiotfilm.py" ]
[ "import numpy as np\nimport tensorflow as tf\n\nfrom scipy import interpolate\nfrom .model import Model, default_opt\n\nfrom .layers.subpixel import SubPixel1D, SubPixel1D_v2\n\nfrom keras import backend as K\nfrom keras.layers import merge, MaxPooling1D, MaxPooling2D, AveragePooling1D\nfrom keras.layers.core import Activation, Dropout\nfrom keras.layers.convolutional import Convolution1D, UpSampling1D, AtrousConvolution1D\nfrom keras.layers import LSTM\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.initializations import normal, orthogonal\n\n# ----------------------------------------------------------------------------\nDRATE = 2\nclass AudioTfilm(Model):\n\n def __init__(self, from_ckpt=False, n_dim=None, r=2, pool_size = 4, strides=4,\n opt_params=default_opt, log_prefix='./run'):\n # perform the usual initialization\n self.r = r\n self.pool_size = pool_size\n self.strides = strides\n Model.__init__(self, from_ckpt=from_ckpt, n_dim=n_dim, r=r,\n opt_params=opt_params, log_prefix=log_prefix)\n\n def create_model(self, n_dim, r):\n # load inputs\n X, _, _ = self.inputs\n K.set_session(self.sess)\n\n with tf.name_scope('generator'):\n x = X\n L = self.layers\n n_filters = [ 128, 256, 512, 512, 512, 512, 512, 512]\n n_blocks = [ 128, 64, 32, 16, 8]\n n_filtersizes = [65, 33, 17, 9, 9, 9, 9, 9, 9]\n downsampling_l = []\n\n print('building model...')\n\n def _make_normalizer(x_in, n_filters, n_block):\n \"\"\"applies an lstm layer on top of x_in\"\"\" \n x_shape = tf.shape(x_in)\n n_steps = tf.cast(x_shape[1], tf.float32) / n_block # will be 32 at training\n\n # first, apply standard conv layer to reduce the dimension\n # input of (-1, 4096, 128) becomes (-1, 32, 128)\n # input of (-1, 512, 512) becomes (-1, 32, 512)\n \n x_in_down = (MaxPooling1D(pool_length=n_block, border_mode='valid'))(x_in)\n \n # pooling to reduce dimension \n x_shape = tf.shape(x_in_down)\n \n x_rnn = LSTM(output_dim = n_filters, return_sequences = True)(x_in_down)\n \n # output: (-1, n_steps, n_filters)\n return x_rnn\n\n def _apply_normalizer(x_in, x_norm, n_filters, n_block):\n x_shape = tf.shape(x_in)\n n_steps = x_shape[1] / n_block # will be 32 at training\n\n # reshape input into blocks\n x_in = tf.reshape(x_in, shape=(-1, n_steps, n_block, n_filters))\n x_norm = tf.reshape(x_norm, shape=(-1, n_steps, 1, n_filters))\n \n # multiply\n x_out = x_norm * x_in\n\n # return to original shape\n x_out = tf.reshape(x_out, shape=x_shape)\n\n return x_out\n\n\n # downsampling layers\n for l, nf, fs in zip(list(range(L)), n_filters, n_filtersizes):\n with tf.name_scope('downsc_conv%d' % l):\n x = (AtrousConvolution1D(nb_filter=nf, filter_length=fs, atrous_rate = DRATE,\n activation=None, border_mode='same', init=orthogonal_init,\n subsample_length=1))(x)\n x = (MaxPooling1D(pool_length=2,border_mode='valid'))(x)\n x = LeakyReLU(0.2)(x)\n\n # create and apply the normalizer\n nb = 128 / (2**l)\n \n params_before = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]) \n x_norm = _make_normalizer(x, nf, nb)\n params_after = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]) \n \n x = _apply_normalizer(x, x_norm, nf, nb)\n\n print('D-Block: ', x.get_shape())\n downsampling_l.append(x)\n\n # bottleneck layer\n with tf.name_scope('bottleneck_conv'):\n x = (AtrousConvolution1D(nb_filter=n_filters[-1], filter_length=n_filtersizes[-1], atrous_rate = DRATE,\n activation=None, border_mode='same', init=orthogonal_init,\n subsample_length=1))(x)\n x = (MaxPooling1D(pool_length=2,border_mode='valid'))(x)\n x = Dropout(p=0.5)(x)\n x = LeakyReLU(0.2)(x)\n\n # create and apply the normalizer\n nb = 128 / (2**L)\n x_norm = _make_normalizer(x, n_filters[-1], nb)\n x = _apply_normalizer(x, x_norm, n_filters[-1], nb)\n\n # upsampling layers\n for l, nf, fs, l_in in reversed(list(zip(list(range(L)), n_filters, n_filtersizes, downsampling_l))):\n with tf.name_scope('upsc_conv%d' % l):\n # (-1, n/2, 2f)\n x = (AtrousConvolution1D(nb_filter=2*nf, filter_length=fs, atrous_rate = DRATE,\n activation=None, border_mode='same', init=orthogonal_init))(x)\n \n x = Dropout(p=0.5)(x)\n x = Activation('relu')(x)\n # (-1, n, f)\n x = SubPixel1D(x, r=2) \n \n # create and apply the normalizer\n x_norm = _make_normalizer(x, nf, nb)\n x = _apply_normalizer(x, x_norm, nf, nb)\n # (-1, n, 2f)\n x = merge([x, l_in], mode='concat', concat_axis=-1) \n print('U-Block: ', x.get_shape())\n \n # final conv layer\n with tf.name_scope('lastconv'):\n x = Convolution1D(nb_filter=2, filter_length=9, \n activation=None, border_mode='same', init=normal_init)(x) \n x = SubPixel1D(x, r=2) \n\n g = merge([x, X], mode='sum')\n return g\n\n def predict(self, X):\n assert len(X) == 1\n x_sp = spline_up(X, self.r)\n x_sp = x_sp[:len(x_sp) - (len(x_sp) % (2**(self.layers+1)))]\n X = x_sp.reshape((1,len(x_sp),1))\n feed_dict = self.load_batch((X,X), train=False)\n return self.sess.run(self.predictions, feed_dict=feed_dict)\n\n# ----------------------------------------------------------------------------\n# helpers\n\ndef normal_init(shape, dim_ordering='tf', name=None):\n return normal(shape, scale=1e-3, name=name, dim_ordering=dim_ordering)\n\ndef orthogonal_init(shape, dim_ordering='tf', name=None):\n return orthogonal(shape, name=name, dim_ordering=dim_ordering)\n\ndef spline_up(x_lr, r):\n x_lr = x_lr.flatten()\n x_hr_len = len(x_lr) * r\n x_sp = np.zeros(x_hr_len)\n \n i_lr = np.arange(x_hr_len, step=r)\n i_hr = np.arange(x_hr_len)\n \n f = interpolate.splrep(i_lr, x_lr)\n\n x_sp = interpolate.splev(i_hr, f)\n\n return x_sp\n" ]
[ [ "scipy.interpolate.splrep", "tensorflow.shape", "numpy.arange", "tensorflow.reshape", "tensorflow.cast", "scipy.interpolate.splev", "tensorflow.name_scope", "tensorflow.trainable_variables", "numpy.zeros" ] ]
yaochaorui/mmpose
[ "056d8db55373e933a971eadc66f92f1d1e773332" ]
[ "mmpose/datasets/datasets/mesh/mesh_h36m_dataset.py" ]
[ "import os\nfrom collections import OrderedDict\n\nimport json_tricks as json\nimport numpy as np\n\nfrom mmpose.datasets.builder import DATASETS\nfrom ....core.evaluation import compute_similarity_transform\nfrom .mesh_base_dataset import MeshBaseDataset\n\n\n@DATASETS.register_module()\nclass MeshH36MDataset(MeshBaseDataset):\n \"\"\"Human3.6M Dataset for 3D human mesh estimation. It inherits all function\n from MeshBaseDataset and has its own evaluate fuction.\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n \"\"\"\n\n def evaluate(self, outputs, res_folder, metric='joint_error'):\n \"\"\"Evaluate 3D keypoint results.\"\"\"\n assert metric == 'joint_error'\n\n res_file = os.path.join(res_folder, 'result_keypoints.json')\n kpts = []\n for preds, boxes, image_path in outputs:\n kpts.append({\n 'keypoints': preds[0].tolist(),\n 'center': boxes[0][0:2].tolist(),\n 'scale': boxes[0][2:4].tolist(),\n 'area': float(boxes[0][4]),\n 'score': float(boxes[0][5]),\n 'image': image_path,\n })\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file)\n name_value = OrderedDict(info_str)\n return name_value\n\n def _write_keypoint_results(self, keypoints, res_file):\n \"\"\"Write results into a json file.\"\"\"\n\n with open(res_file, 'w') as f:\n json.dump(keypoints, f, sort_keys=True, indent=4)\n\n def _report_metric(self, res_file):\n \"\"\"Keypoint evaluation.\n\n Report mean per joint position error (MPJPE) and mean per joint\n position error after rigid alignment (MPJPE-PA)\n \"\"\"\n\n with open(res_file, 'r') as fin:\n preds = json.load(fin)\n assert len(preds) == len(self.db)\n\n joint_error = []\n joint_error_pa = []\n\n for pred, item in zip(preds, self.db):\n error, error_pa = self.evaluate_kernel(pred['keypoints'][0],\n item['joints_3d'],\n item['joints_3d_visible'])\n joint_error.append(error)\n joint_error_pa.append(error_pa)\n\n mpjpe = np.array(joint_error).mean()\n mpjpe_pa = np.array(joint_error_pa).mean()\n\n info_str = []\n info_str.append(('MPJPE', mpjpe * 1000))\n info_str.append(('MPJPE-PA', mpjpe_pa * 1000))\n return info_str\n\n def evaluate_kernel(self, pred_joints_3d, joints_3d, joints_3d_visible):\n \"\"\"Evaluate one example.\"\"\"\n # Only 14 lsp joints are used for evaluation\n joint_mapper = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 18]\n assert (joints_3d_visible[joint_mapper].min() > 0)\n\n pred_joints_3d = np.array(pred_joints_3d)\n pred_joints_3d = pred_joints_3d[joint_mapper, :]\n pred_pelvis = (pred_joints_3d[[2]] + pred_joints_3d[[3]]) / 2\n pred_joints_3d = pred_joints_3d - pred_pelvis\n\n gt_joints_3d = joints_3d[joint_mapper, :]\n gt_pelvis = (gt_joints_3d[[2]] + gt_joints_3d[[3]]) / 2\n gt_joints_3d = gt_joints_3d - gt_pelvis\n\n error = pred_joints_3d - gt_joints_3d\n error = np.linalg.norm(error, ord=2, axis=-1).mean(axis=-1)\n\n pred_joints_3d_aligned = compute_similarity_transform(\n pred_joints_3d, gt_joints_3d)\n error_pa = pred_joints_3d_aligned - gt_joints_3d\n error_pa = np.linalg.norm(error_pa, ord=2, axis=-1).mean(axis=-1)\n\n return error, error_pa\n" ]
[ [ "numpy.array", "numpy.linalg.norm" ] ]
nomad-coe/workflow-parsers
[ "29a47d873ae921c23b58b95f8496fa0a2d503f15" ]
[ "tests/test_lobsterparser.py" ]
[ "#\n# Copyright The NOMAD Authors.\n#\n# This file is part of NOMAD. See https://nomad-lab.eu for further info.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport pytest\nimport logging\nimport numpy as np\n\nfrom nomad.datamodel import EntryArchive\nfrom nomad.units import ureg as units\n\nfrom workflowparsers.lobster import LobsterParser\n\ne = (1 * units.e).to_base_units().magnitude\neV = (1 * units.e).to_base_units().magnitude\n\n\n@pytest.fixture\ndef parser():\n return LobsterParser()\n\n\ndef A_to_m(value):\n return (value * units.angstrom).to_base_units().magnitude\n\n\ndef eV_to_J(value):\n return (value * units.eV).to_base_units().magnitude\n\n\n# default pytest.approx settings are abs=1e-12, rel=1e-6 so it doesn't work for small numbers\n# use the default just for comparison with zero\ndef approx(value):\n return pytest.approx(value, abs=0, rel=1e-6)\n\n\ndef test_Fe(parser):\n \"\"\"\n Tests spin-polarized Fe calculation with LOBSTER 4.0.0\n \"\"\"\n\n archive = EntryArchive()\n parser.parse('tests/data/lobster/Fe/lobsterout', archive, logging)\n\n run = archive.run[0]\n assert run.program.name == \"LOBSTER\"\n assert run.clean_end is True\n assert run.program.version == \"4.0.0\"\n assert run.time_run.wall_start.magnitude == 1619687985\n\n assert len(run.calculation) == 1\n scc = run.calculation[0]\n assert len(scc.x_lobster_abs_total_spilling) == 2\n assert scc.x_lobster_abs_total_spilling[0] == approx(8.02)\n assert scc.x_lobster_abs_total_spilling[1] == approx(8.96)\n assert len(scc.x_lobster_abs_charge_spilling) == 2\n assert scc.x_lobster_abs_charge_spilling[0] == approx(2.97)\n assert scc.x_lobster_abs_charge_spilling[1] == approx(8.5)\n\n method = run.method\n assert len(method) == 1\n assert method[0].x_lobster_code == \"VASP\"\n assert method[0].basis_set[0].name == \"pbeVaspFit2015\"\n\n # ICOHPLIST.lobster\n cohp = scc.x_lobster_section_cohp\n assert cohp.x_lobster_number_of_cohp_pairs == 20\n assert len(cohp.x_lobster_cohp_atom1_labels) == 20\n assert cohp.x_lobster_cohp_atom1_labels[19] == \"Fe2\"\n assert len(cohp.x_lobster_cohp_atom2_labels) == 20\n assert cohp.x_lobster_cohp_atom1_labels[3] == \"Fe1\"\n assert len(cohp.x_lobster_cohp_distances) == 20\n assert cohp.x_lobster_cohp_distances[0].magnitude == approx(\n A_to_m(2.831775))\n assert cohp.x_lobster_cohp_distances[13].magnitude == approx(\n A_to_m(2.45239))\n assert cohp.x_lobster_cohp_distances[19].magnitude == approx(\n A_to_m(2.831775))\n assert np.shape(cohp.x_lobster_cohp_translations) == (20, 3)\n assert all([a == b for a, b in zip(\n cohp.x_lobster_cohp_translations[0], [0, 0, -1])])\n assert all([a == b for a, b in zip(\n cohp.x_lobster_cohp_translations[13], [0, 0, 0])])\n assert all([a == b for a, b in zip(\n cohp.x_lobster_cohp_translations[19], [0, 0, 1])])\n assert np.shape(cohp.x_lobster_integrated_cohp_at_fermi_level) == (2, 20)\n assert cohp.x_lobster_integrated_cohp_at_fermi_level[0, 0].magnitude == approx(\n eV_to_J(-0.08672))\n assert cohp.x_lobster_integrated_cohp_at_fermi_level[0, 19].magnitude == approx(\n eV_to_J(-0.08672))\n assert cohp.x_lobster_integrated_cohp_at_fermi_level[1, 19].magnitude == approx(\n eV_to_J(-0.16529))\n assert cohp.x_lobster_integrated_cohp_at_fermi_level[1, 7].magnitude == approx(\n eV_to_J(-0.48790))\n\n # COHPCAR.lobster\n assert len(cohp.x_lobster_cohp_energies) == 201\n assert cohp.x_lobster_cohp_energies[0].magnitude == approx(eV_to_J(-10.06030))\n assert cohp.x_lobster_cohp_energies[200].magnitude == approx(eV_to_J(3.00503))\n assert np.shape(cohp.x_lobster_average_cohp_values) == (2, 201)\n assert cohp.x_lobster_average_cohp_values[0][196] == approx(0.02406)\n assert cohp.x_lobster_average_cohp_values[1][200] == approx(0.01816)\n assert np.shape(cohp.x_lobster_average_integrated_cohp_values) == (2, 201)\n assert cohp.x_lobster_average_integrated_cohp_values[0][200].magnitude == approx(\n eV_to_J(-0.06616))\n assert cohp.x_lobster_average_integrated_cohp_values[1][200].magnitude == approx(\n eV_to_J(-0.02265))\n assert np.shape(cohp.x_lobster_cohp_values) == (20, 2, 201)\n assert cohp.x_lobster_cohp_values[10][1][200] == approx(0.02291)\n assert cohp.x_lobster_cohp_values[19][0][200] == approx(0.01439)\n assert np.shape(cohp.x_lobster_integrated_cohp_values) == (20, 2, 201)\n assert cohp.x_lobster_integrated_cohp_values[10][0][200].magnitude == approx(\n eV_to_J(-0.12881))\n assert cohp.x_lobster_integrated_cohp_values[19][1][200].magnitude == approx(\n eV_to_J(-0.06876))\n\n # ICOOPLIST.lobster\n coop = scc.x_lobster_section_coop\n assert coop.x_lobster_number_of_coop_pairs == 20\n assert len(coop.x_lobster_coop_atom1_labels) == 20\n assert coop.x_lobster_coop_atom1_labels[19] == \"Fe2\"\n assert len(coop.x_lobster_coop_atom2_labels) == 20\n assert coop.x_lobster_coop_atom1_labels[3] == \"Fe1\"\n assert len(coop.x_lobster_coop_distances) == 20\n assert coop.x_lobster_coop_distances[0].magnitude == approx(\n A_to_m(2.831775))\n assert coop.x_lobster_coop_distances[13].magnitude == approx(\n A_to_m(2.45239))\n assert coop.x_lobster_coop_distances[19].magnitude == approx(\n A_to_m(2.831775))\n assert np.shape(coop.x_lobster_coop_translations) == (20, 3)\n assert all([a == b for a, b in zip(\n coop.x_lobster_coop_translations[0], [0, 0, -1])])\n assert all([a == b for a, b in zip(\n coop.x_lobster_coop_translations[13], [0, 0, 0])])\n assert all([a == b for a, b in zip(\n coop.x_lobster_coop_translations[19], [0, 0, 1])])\n assert np.shape(coop.x_lobster_integrated_coop_at_fermi_level) == (2, 20)\n assert coop.x_lobster_integrated_coop_at_fermi_level[0, 0].magnitude == approx(\n eV_to_J(-0.06882))\n assert coop.x_lobster_integrated_coop_at_fermi_level[0, 19].magnitude == approx(\n eV_to_J(-0.06882))\n assert coop.x_lobster_integrated_coop_at_fermi_level[1, 19].magnitude == approx(\n eV_to_J(-0.11268))\n assert coop.x_lobster_integrated_coop_at_fermi_level[1, 7].magnitude == approx(\n eV_to_J(-0.05179))\n\n # COOPCAR.lobster\n assert len(coop.x_lobster_coop_energies) == 201\n assert coop.x_lobster_coop_energies[0].magnitude == approx(eV_to_J(-10.06030))\n assert coop.x_lobster_coop_energies[200].magnitude == approx(eV_to_J(3.00503))\n assert np.shape(coop.x_lobster_average_coop_values) == (2, 201)\n assert coop.x_lobster_average_coop_values[0][196] == approx(-0.04773)\n assert coop.x_lobster_average_coop_values[1][200] == approx(-0.04542)\n assert np.shape(coop.x_lobster_average_integrated_coop_values) == (2, 201)\n assert coop.x_lobster_average_integrated_coop_values[0][200].magnitude == approx(\n eV_to_J(-0.12265))\n assert coop.x_lobster_average_integrated_coop_values[1][200].magnitude == approx(\n eV_to_J(-0.14690))\n assert np.shape(coop.x_lobster_coop_values) == (20, 2, 201)\n assert coop.x_lobster_coop_values[3][1][200] == approx(-0.01346)\n assert coop.x_lobster_coop_values[0][0][200] == approx(-0.04542)\n assert np.shape(coop.x_lobster_integrated_coop_values) == (20, 2, 201)\n assert coop.x_lobster_integrated_coop_values[10][0][199].magnitude == approx(\n eV_to_J(-0.07360))\n assert coop.x_lobster_integrated_coop_values[19][1][200].magnitude == approx(\n eV_to_J(-0.13041))\n\n # CHARGE.lobster\n charges = scc.charges\n assert len(charges) == 2\n mulliken = charges[0]\n assert mulliken.analysis_method == \"mulliken\"\n assert np.shape(mulliken.value) == (2,)\n assert mulliken.value[0] == pytest.approx(0.0 * e, abs=1e-6)\n assert mulliken.value[1] == pytest.approx(0.0 * e, abs=1e-6)\n\n loewdin = charges[1]\n assert loewdin.analysis_method == \"loewdin\"\n assert np.shape(loewdin.value) == (2,)\n assert loewdin.value[0] == pytest.approx(0.0 * e, abs=1e-6)\n assert loewdin.value[1] == pytest.approx(0.0 * e, abs=1e-6)\n\n # DOSCAR.lobster total and integrated DOS\n assert len(scc.dos_electronic) == 1\n dos = scc.dos_electronic[0]\n assert dos.n_energies == 201\n assert len(dos.energies) == 201\n assert dos.energies[0].magnitude == approx(eV_to_J(-10.06030))\n assert dos.energies[16].magnitude == approx(eV_to_J(-9.01508))\n assert dos.energies[200].magnitude == approx(eV_to_J(3.00503))\n assert len(dos.total) == 2\n assert np.shape(dos.total[1].value) == (201,)\n assert dos.total[0].value[6] == pytest.approx(0.0, abs=1e-30)\n assert dos.total[0].value[200].magnitude == approx(0.26779 / eV)\n assert dos.total[1].value[195].magnitude == approx(0.37457 / eV)\n assert np.shape(dos.total[0].value_integrated) == (201,)\n assert dos.total[0].value_integrated[10] == approx(0.0 + 18)\n assert dos.total[0].value_integrated[188] == approx(11.07792 + 18)\n assert dos.total[1].value_integrated[200] == approx(10.75031 + 18)\n\n # DOSCAR.lobster atom and lm-projected dos\n assert len(dos.atom_projected) == 24\n dos.atom_projected[0].atom_index == 0\n dos.atom_projected[12].atom_index == 1\n assert dos.atom_projected[1].m_kind == 'real_orbital'\n assert (dos.atom_projected[7].lm == [2, 0]).all()\n assert np.shape(dos.atom_projected[11].value) == (201,)\n assert dos.atom_projected[5].value[190].magnitude == approx(0.21304 / eV)\n assert dos.atom_projected[22].value[200].magnitude == approx(0.00784 / eV)\n assert dos.atom_projected[13].value[35].magnitude == approx(0.01522 / eV)\n\n\ndef test_NaCl(parser):\n \"\"\"\n Test non-spin-polarized NaCl calculation with LOBSTER 3.2.0\n \"\"\"\n\n archive = EntryArchive()\n parser.parse('tests/data/lobster/NaCl/lobsterout', archive, logging)\n\n run = archive.run[0]\n assert run.program.name == \"LOBSTER\"\n assert run.clean_end is True\n assert run.program.version == \"3.2.0\"\n assert run.time_run.wall_start.magnitude == 1619713048\n\n assert len(run.calculation) == 1\n scc = run.calculation[0]\n assert len(scc.x_lobster_abs_total_spilling) == 1\n assert scc.x_lobster_abs_total_spilling[0] == approx(9.29)\n assert len(scc.x_lobster_abs_charge_spilling) == 1\n assert scc.x_lobster_abs_charge_spilling[0] == approx(0.58)\n\n method = run.method\n assert len(method) == 1\n assert method[0].x_lobster_code == \"VASP\"\n assert method[0].basis_set[0].name == \"pbeVaspFit2015\"\n\n # ICOHPLIST.lobster\n cohp = scc.x_lobster_section_cohp\n assert cohp.x_lobster_number_of_cohp_pairs == 72\n assert len(cohp.x_lobster_cohp_atom1_labels) == 72\n assert cohp.x_lobster_cohp_atom1_labels[71] == \"Cl7\"\n assert len(cohp.x_lobster_cohp_atom2_labels) == 72\n assert cohp.x_lobster_cohp_atom2_labels[43] == \"Cl6\"\n assert len(cohp.x_lobster_cohp_distances) == 72\n assert cohp.x_lobster_cohp_distances[0].magnitude == approx(A_to_m(3.99586))\n assert cohp.x_lobster_cohp_distances[47].magnitude == approx(A_to_m(2.82550))\n assert cohp.x_lobster_cohp_distances[71].magnitude == approx(A_to_m(3.99586))\n assert np.shape(cohp.x_lobster_cohp_translations) == (72, 3)\n assert all([a == b for a, b in zip(\n cohp.x_lobster_cohp_translations[0], [-1, 0, 0])])\n assert all([a == b for a, b in zip(\n cohp.x_lobster_cohp_translations[54], [0, -1, 0])])\n assert all([a == b for a, b in zip(\n cohp.x_lobster_cohp_translations[71], [0, 1, 0])])\n assert np.shape(cohp.x_lobster_integrated_cohp_at_fermi_level) == (1, 72)\n assert cohp.x_lobster_integrated_cohp_at_fermi_level[0, 0].magnitude == approx(\n eV_to_J(-0.02652))\n assert cohp.x_lobster_integrated_cohp_at_fermi_level[0, 71].magnitude == approx(\n eV_to_J(-0.02925))\n\n # COHPCAR.lobster\n assert len(cohp.x_lobster_cohp_energies) == 201\n assert cohp.x_lobster_cohp_energies[0].magnitude == approx(eV_to_J(-12.02261))\n assert cohp.x_lobster_cohp_energies[200].magnitude == approx(eV_to_J(2.55025))\n assert np.shape(cohp.x_lobster_average_cohp_values) == (1, 201)\n assert cohp.x_lobster_average_cohp_values[0][0] == pytest.approx(0.0)\n assert cohp.x_lobster_average_cohp_values[0][151] == approx(-0.03162)\n assert np.shape(cohp.x_lobster_average_integrated_cohp_values) == (1, 201)\n assert cohp.x_lobster_average_integrated_cohp_values[0][0].magnitude == approx(\n eV_to_J(-0.15834))\n assert cohp.x_lobster_average_integrated_cohp_values[0][200].magnitude == approx(\n eV_to_J(-0.24310))\n assert np.shape(cohp.x_lobster_cohp_values) == (72, 1, 201)\n assert cohp.x_lobster_cohp_values[1][0][200] == pytest.approx(0.0)\n assert cohp.x_lobster_cohp_values[71][0][140] == approx(-0.00403)\n assert np.shape(cohp.x_lobster_integrated_cohp_values) == (72, 1, 201)\n assert cohp.x_lobster_integrated_cohp_values[2][0][200].magnitude == approx(\n eV_to_J(-0.02652))\n assert cohp.x_lobster_integrated_cohp_values[67][0][199].magnitude == approx(\n eV_to_J(-0.04137))\n\n # ICOOPLIST.lobster\n coop = scc.x_lobster_section_coop\n assert coop.x_lobster_number_of_coop_pairs == 72\n assert len(coop.x_lobster_coop_atom1_labels) == 72\n assert coop.x_lobster_coop_atom1_labels[71] == \"Cl7\"\n assert len(coop.x_lobster_coop_atom2_labels) == 72\n assert coop.x_lobster_coop_atom2_labels[0] == \"Na2\"\n assert len(coop.x_lobster_coop_distances) == 72\n assert coop.x_lobster_coop_distances[0].magnitude == approx(A_to_m(3.99586))\n assert coop.x_lobster_coop_distances[12].magnitude == approx(A_to_m(2.82550))\n assert coop.x_lobster_coop_distances[71].magnitude == approx(A_to_m(3.99586))\n assert np.shape(coop.x_lobster_coop_translations) == (72, 3)\n assert all([a == b for a, b in zip(\n coop.x_lobster_coop_translations[0], [-1, 0, 0])])\n assert all([a == b for a, b in zip(\n coop.x_lobster_coop_translations[13], [0, 1, 0])])\n assert all([a == b for a, b in zip(\n coop.x_lobster_coop_translations[71], [0, 1, 0])])\n assert np.shape(coop.x_lobster_integrated_coop_at_fermi_level) == (1, 72)\n assert coop.x_lobster_integrated_coop_at_fermi_level[0, 0].magnitude == approx(\n eV_to_J(-0.00519))\n assert coop.x_lobster_integrated_coop_at_fermi_level[0, 71].magnitude == approx(\n eV_to_J(-0.00580))\n\n # COOPCAR.lobster\n assert len(coop.x_lobster_coop_energies) == 201\n assert coop.x_lobster_coop_energies[0].magnitude == approx(eV_to_J(-12.02261))\n assert coop.x_lobster_coop_energies[200].magnitude == approx(eV_to_J(2.55025))\n assert np.shape(coop.x_lobster_average_coop_values) == (1, 201)\n assert coop.x_lobster_average_coop_values[0][0] == pytest.approx(0.0)\n assert coop.x_lobster_average_coop_values[0][145] == approx(0.03178)\n assert np.shape(coop.x_lobster_average_integrated_coop_values) == (1, 201)\n assert coop.x_lobster_average_integrated_coop_values[0][0].magnitude == approx(\n eV_to_J(0.00368))\n assert coop.x_lobster_average_integrated_coop_values[0][200].magnitude == approx(\n eV_to_J(0.00682))\n assert np.shape(coop.x_lobster_coop_values) == (72, 1, 201)\n assert coop.x_lobster_coop_values[1][0][200] == pytest.approx(0.0)\n assert coop.x_lobster_coop_values[71][0][143] == approx(0.01862)\n assert np.shape(coop.x_lobster_integrated_coop_values) == (72, 1, 201)\n assert coop.x_lobster_integrated_coop_values[2][0][200].magnitude == approx(\n eV_to_J(-0.00519))\n assert coop.x_lobster_integrated_coop_values[71][0][199].magnitude == approx(\n eV_to_J(-0.00580))\n\n # CHARGE.lobster\n charges = scc.charges\n assert len(charges) == 2\n mulliken = charges[0]\n assert mulliken.analysis_method == \"mulliken\"\n # here the approx is not really working (changing the 0.78 to for example\n # 10 makes the test still pass)\n assert mulliken.value[0].magnitude == approx(0.78 * e)\n assert mulliken.value[7].magnitude == approx(-0.78 * e)\n\n loewdin = charges[1]\n assert loewdin.analysis_method == \"loewdin\"\n assert loewdin.value[0].magnitude == approx(0.67 * e)\n assert loewdin.value[7].magnitude == approx(-0.67 * e)\n\n # DOSCAR.lobster total and integrated DOS\n assert len(scc.dos_electronic) == 1\n dos = scc.dos_electronic[0]\n assert dos.n_energies == 201\n assert len(dos.energies) == 201\n assert dos.energies[0].magnitude == approx(eV_to_J(-12.02261))\n assert dos.energies[25].magnitude == approx(eV_to_J(-10.20101))\n assert dos.energies[200].magnitude == approx(eV_to_J(2.55025))\n assert np.shape(dos.total[0].value) == (201,)\n assert dos.total[0].value[6].magnitude == pytest.approx(0.0, abs=1e-30)\n assert dos.total[0].value[162].magnitude == approx(20.24722 / eV)\n assert dos.total[0].value[200].magnitude == pytest.approx(0.0, abs=1e-30)\n assert np.shape(dos.total[0].value_integrated) == (201,)\n assert dos.total[0].value_integrated[10] == approx(7.99998 + 80)\n assert dos.total[0].value_integrated[160] == approx(27.09225 + 80)\n assert dos.total[0].value_integrated[200] == approx(31.99992 + 80)\n\n # DOSCAR.lobster atom and lm-projected dos\n assert len(dos.atom_projected) == 20\n dos.atom_projected[0].atom_index == 0\n dos.atom_projected[19].atom_index == 7\n assert dos.atom_projected[5].m_kind == 'real_orbital'\n assert (dos.atom_projected[17].lm == [1, 2]).all()\n assert np.shape(dos.atom_projected[13].value) == (201,)\n assert np.shape(dos.atom_projected[8].value) == (201,)\n assert dos.atom_projected[0].value[190].magnitude == pytest.approx(0.0, abs=1e-30)\n assert dos.atom_projected[19].value[141].magnitude == approx(0.32251 / eV)\n assert dos.atom_projected[16].value[152].magnitude == approx(0.00337 / eV)\n\n\ndef test_HfV(parser):\n \"\"\"\n Test non-spin-polarized HfV2 calculation with LOBSTER 2.0.0,\n it has different ICOHPLIST.lobster and ICOOPLIST.lobster scheme.\n Also test backup structure parsing when no CONTCAR is present.\n \"\"\"\n\n archive = EntryArchive()\n parser.parse('tests/data/lobster/HfV2/lobsterout', archive, logging)\n\n run = archive.run[0]\n assert run.program.name == \"LOBSTER\"\n assert run.clean_end is True\n assert run.program.version == \"2.0.0\"\n\n assert len(run.calculation) == 1\n scc = run.calculation[0]\n assert len(scc.x_lobster_abs_total_spilling) == 1\n assert scc.x_lobster_abs_total_spilling[0] == approx(4.39)\n assert len(scc.x_lobster_abs_charge_spilling) == 1\n assert scc.x_lobster_abs_charge_spilling[0] == approx(2.21)\n\n # backup partial system parsing\n system = run.system\n assert len(system) == 1\n assert len(system[0].atoms.species) == 12\n assert all([a == b for a, b in zip(system[0].atoms.species,\n [72, 72, 72, 72, 23, 23, 23, 23, 23, 23, 23, 23])])\n assert all([a == b for a, b in zip(system[0].atoms.periodic,\n [True, True, True])])\n\n # method\n method = run.method\n assert method[0].basis_set[0].name == \"Koga\"\n\n # ICOHPLIST.lobster\n cohp = scc.x_lobster_section_cohp\n assert cohp.x_lobster_number_of_cohp_pairs == 56\n assert len(cohp.x_lobster_cohp_atom1_labels) == 56\n assert cohp.x_lobster_cohp_atom1_labels[41] == \"V6\"\n assert len(cohp.x_lobster_cohp_atom2_labels) == 56\n assert cohp.x_lobster_cohp_atom2_labels[16] == \"V9\"\n assert len(cohp.x_lobster_cohp_distances) == 56\n assert cohp.x_lobster_cohp_distances[0].magnitude == approx(A_to_m(3.17294))\n assert cohp.x_lobster_cohp_distances[47].magnitude == approx(A_to_m(2.60684))\n assert cohp.x_lobster_cohp_distances[55].magnitude == approx(A_to_m(2.55809))\n assert cohp.x_lobster_cohp_translations is None\n assert len(cohp.x_lobster_cohp_number_of_bonds) == 56\n assert cohp.x_lobster_cohp_number_of_bonds[0] == 2\n assert cohp.x_lobster_cohp_number_of_bonds[53] == 1\n assert np.shape(cohp.x_lobster_integrated_cohp_at_fermi_level) == (1, 56)\n assert cohp.x_lobster_integrated_cohp_at_fermi_level[0, 0].magnitude == approx(\n eV_to_J(-1.72125))\n assert cohp.x_lobster_integrated_cohp_at_fermi_level[0, 55].magnitude == approx(\n eV_to_J(-1.62412))\n\n # ICOOPLIST.lobster\n coop = scc.x_lobster_section_coop\n assert coop.x_lobster_number_of_coop_pairs == 56\n assert len(coop.x_lobster_coop_atom1_labels) == 56\n assert coop.x_lobster_coop_atom1_labels[41] == \"V6\"\n assert len(coop.x_lobster_coop_atom2_labels) == 56\n assert coop.x_lobster_coop_atom2_labels[11] == \"Hf4\"\n assert len(coop.x_lobster_coop_distances) == 56\n assert coop.x_lobster_coop_distances[0].magnitude == approx(A_to_m(3.17294))\n assert coop.x_lobster_coop_distances[47].magnitude == approx(A_to_m(2.60684))\n assert coop.x_lobster_coop_distances[55].magnitude == approx(A_to_m(2.55809))\n assert coop.x_lobster_coop_translations is None\n assert len(coop.x_lobster_coop_number_of_bonds) == 56\n assert coop.x_lobster_coop_number_of_bonds[0] == 2\n assert coop.x_lobster_coop_number_of_bonds[53] == 1\n assert np.shape(coop.x_lobster_integrated_coop_at_fermi_level) == (1, 56)\n assert coop.x_lobster_integrated_coop_at_fermi_level[0, 0].magnitude == approx(\n eV_to_J(-0.46493))\n assert coop.x_lobster_integrated_coop_at_fermi_level[0, 55].magnitude == approx(\n eV_to_J(-0.50035))\n\n\ndef test_failed_case(parser):\n \"\"\"\n Check that we also handle gracefully a case where the lobster ends very early.\n Here it is because of a wrong CONTCAR.\n \"\"\"\n\n archive = EntryArchive()\n parser.parse('tests/data/lobster/failed_case/lobsterout', archive, logging)\n\n run = archive.run[0]\n assert run.clean_end is False\n" ]
[ [ "numpy.shape" ] ]
tenokraat/pyarubacentral
[ "a8c04f1e55f0f7a6091d4bf66a8d1ad9970d97f8" ]
[ "nwseg_submodule_getGwDetails.py" ]
[ "\n# Import Aruba Central Base\nfrom pycentral.base import ArubaCentralBase\nfrom pycentral.configuration import Groups\nfrom pycentral.monitoring import Devices\nfrom pprint import pprint\nimport json\nimport csv\nimport pandas\nimport collections\n\n\n# Create the following files by refering to the samples.\ncentral_filename = \"input_credentials.yaml\"\n\n# Get instance of ArubaCentralBase from the central_filename\nfrom pycentral.workflows.workflows_utils import get_conn_from_file\ncentral = get_conn_from_file(filename=central_filename)\n\ng = Groups()\nd = Devices()\n\n#Import gateway export CSV to pandas DataFrame\n\ndf = pandas.read_csv('export_gateway_list_dev.csv', index_col='Device Name')\n\nprint ('Entire pandas DataFrame:')\nprint(df)\n\nfor index in df.index:\n\n uptime = df['Uptime'][index]\n group = df['Group'][index]\n\n if uptime != '0:00:00' and group == 'CH-HS-BNC_LAB':\n\n mac_addr = df['MAC'][index]\n print(mac_addr)\n serial = df['Serial'][index]\n print(serial)\n\n #device_details = d.get_device_details(central, 'mobility_controllers', f'{serial}')\n #print(device_details['msg'])\n #print(type(device_details))\n\n uplink_details = d.get_gateway_uplink_details(central, 'mobility_controllers', f'{serial}', '3H')\n\n uplinks = uplink_details['msg']['uplinks']\n print (type(uplinks))\n\n keys = uplink_details['msg'].keys()\n values = uplink_details['msg'].values()\n\n #print(keys)\n #print(values)\n\n values_list = list(values)\n\n #print(values_list[10])\n #print(type(values_list))\n\n #print(uplink_details['msg'][1])\n\n\n \n \n\n\n#pprint(gateway_dict)\n\n#with open('gw-details.json', 'w') as file1:\n# file1.write(json.dumps(device_details))\n\n" ]
[ [ "pandas.read_csv" ] ]
minhtcuet/creditscoring
[ "5a00f110b1936c7151a6e471cfb52b907b199f4d" ]
[ "credit_scoring/metrics/_ks.py" ]
[ "import numpy as np\r\nimport pandas as pd\r\nimport plotly.express as px\r\n\r\nfrom credit_scoring.metrics.credit_score import CreditScore\r\n\r\n\r\nclass CalculatedKS(CreditScore):\r\n\r\n\tdef ks(self) -> tuple:\r\n\t\t'''\r\n\t\tCalculate the KS table of each decile, KS value, the deicile at KS\r\n\t\t:return: KS Tabel, KS value, Decile\r\n\t\t'''\r\n\r\n\t\t# Create the dataframe contain target labels and probability values\r\n\t\tdata = pd.DataFrame({\"target\": self.target, \"prob\": self.pred})\r\n\r\n\t\t# Calculate the probability of nonvents\r\n\t\tdata['target0'] = 1 - data['target']\r\n\r\n\t\t# Find the suitable bucket\r\n\t\tself.bucket = min(self.bucket, data['prob'].nunique())\r\n\r\n\t\t# Cut the probability values to 'bucket' ranges by quantile\r\n\t\tdata['bucket'] = pd.qcut(data['prob'], self.bucket)\r\n\t\tgrouped = data.groupby('bucket', as_index=False)\r\n\r\n\t\tks_table = pd.DataFrame()\r\n\r\n\t\t# Calculate the min, max of probability,\r\n\t\tks_table['min_prob'] = grouped.min()['prob']\r\n\t\tks_table['max_prob'] = grouped.max()['prob']\r\n\r\n\t\t# Calculate the number of events, nonevents\r\n\t\tks_table['events'] = grouped.sum()['target']\r\n\t\tks_table['nonevents'] = grouped.sum()['target0']\r\n\r\n\t\t# Calculate the cumulative probability of event and non-events in each decile\r\n\t\tks_table = ks_table.sort_values(by=\"min_prob\", ascending=False).reset_index(drop=True)\r\n\t\tks_table['cum_eventrate'] = (ks_table.events / data['target'].sum()).cumsum()\r\n\t\tks_table['cum_noneventrate'] = (ks_table.nonevents / data['target0'].sum()).cumsum()\r\n\r\n\t\t# CAlculate the KS by formula\r\n\t\tks_table['KS'] = np.round(ks_table['cum_eventrate'] - ks_table['cum_noneventrate'], 3) * 100\r\n\t\tks_table.loc[0, 'cum_eventrate'] = ks_table.loc[0, 'cum_noneventrate'] = 0\r\n\t\tks_table.index = range(self.bucket)\r\n\t\tks_table.index.rename('Decile', inplace=True)\r\n\r\n\t\tks_val, decile = max(ks_table['KS']), ks_table.index[ks_table['KS'] == max(ks_table['KS'])][0]\r\n\t\treturn ks_table, ks_val, decile\r\n\r\n\tdef plot_ks_chart(self) -> None:\r\n\t\t\"\"\"\r\n\t\tPlot the KS chart, distribution of cdf of event and non-event\r\n\t\t:return: None\r\n\t\t\"\"\"\r\n\t\tdata, ks_val, decile = self.ks()\r\n\t\ttext = str(ks_val) + \"%\" + \" at decile \" + str(decile)\r\n\t\tfig = px.line(data, x=data.index, y=['cum_eventrate', 'cum_noneventrate'],\r\n\t\t title='Kolmogorov-Smirnov statistic: ' + text)\r\n\t\tfig.add_vline(x=decile, line_width=3, line_dash=\"dash\", line_color=\"green\")\r\n\t\tfig.show()\r\n" ]
[ [ "numpy.round", "pandas.qcut", "pandas.DataFrame" ] ]
Zilv1128/test1
[ "49fbb1392e69b5194c077df9847505ec995b4e3d" ]
[ "recipes/sota/2019/lm_analysis/generate_shuffle_dev_other_tts.py" ]
[ "import os\nimport sys\n\nimport numpy\n\n\nnumpy.random.seed(42)\n\n\nwith open(os.path.join(sys.argv[1], \"dev-other.lst\"), \"r\") as f:\n data = [line.strip() for line in f]\n\nfor n, seed_val in enumerate([0, 2, 3, 4, 5]):\n numpy.random.seed(42 + seed_val)\n data = numpy.random.permutation(data)\n\n with open(\"tts_shuffled_{}.txt\".format(n), \"w\") as fout:\n for line in data:\n line_new = line.split(\" \")\n new_tr = numpy.random.permutation(line_new[3:])\n fout.write(line + \"\\n\")\n fout.write(\"{}\\n\".format(\" \".join(new_tr)))\n" ]
[ [ "numpy.random.permutation", "numpy.random.seed" ] ]
ZircXes/uncmac854course
[ "0b8845889cabb07098a7186120ed56ee0ef392a7" ]
[ "Week 01/Week 01 - Examples.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # Week 01 Example Statistics in Python 3\n# This Notebook introduces basic statistical analysis from week 01 using Python 3.x.\n# \n# Jupyter notebooks blend Markdown (rich formatted text) with software code and output to ease the learning process.\n# \n\n# Version: 01\n# \n# Author: Chris Kennedy\n\n# In[1]:\n\n\nimport pandas as pd\nfrom scipy import stats\n\n\n# In[2]:\n\n\ndfWine = pd.read_excel(r'W1 - Wine Quality.xlsx')\n\n\n# In[3]:\n\n\ndfWine.describe()\n\n\n# ## Practice Question 1\n# What is the 99% confidence interval for the average alcohol level of a bottle of wine?\n\n# In[4]:\n\n\nn = dfWine['alcohol'].count()\nprint(\"# Wines: %6.2f\" % n)\n\n\n# In[5]:\n\n\navg = dfWine['alcohol'].mean()\nprint(\"Average Alcohol: %6.4f\" % avg)\n\n\n# In[6]:\n\n\nstderr = dfWine['alcohol'].sem()\nprint(\"Standard Error: %6.4f\" % stderr)\n\n\n# In[7]:\n\n\nt_CI = stats.t.ppf(0.995,df = n - 1)\nprint(\"T-statistic: %6.4f\" % t_CI)\n\n\n# In[8]:\n\n\nmoe = t_CI * stderr\nprint(\"Margin of error: %6.4f\" % moe)\n\n\n# In[9]:\n\n\nprint(\"Lower: %6.4f\" % (avg - moe))\nprint(\"Upper: %6.4f\" % (avg + moe))\n\n\n# In[10]:\n\n\n# Concise using stats package directly:\nstats.t.interval(0.99, loc=avg, scale=stderr, df = n-1)\n\n\n# ## Practice Question 2\n# What is the 90% confidence interval around the proportion of white wines that are rated very good quality (7 or higher)?\n# \n\n# In[11]:\n\n\nfilteredDF = dfWine[dfWine['type'] == 'white']\nn = filteredDF['type'].count()\n\n\n# In[12]:\n\n\nqfilteredDF = filteredDF[filteredDF['quality'] >= 7]\nnq = qfilteredDF['type'].count()\n\n\n# In[13]:\n\n\nproportion = nq / n\nprint(\"Proportion: %5.2f%%\" % (proportion*100))\n\n\n# In[14]:\n\n\nstderr = (proportion * (1 - proportion) / n)**0.50\nprint(\"Standard error: %5.2f%%\" % (stderr*100))\n\n\n# In[15]:\n\n\nz_CI = stats.norm.ppf(0.95)\n\n\n# In[16]:\n\n\nprint (\"Z for 90%%: %6.4f\" % z_CI)\n\n\n# In[17]:\n\n\nstats.norm.interval(0.90, loc=proportion*100, scale=stderr*100)\n\n\n# ## Question 3\n# Can you conclude (at the 5% significance level) that the average fixed acid level for all wines is above 7.2?\n\n# In[18]:\n\n\ndef getStatistic(x, H0, stderr):\n return (x - H0) / stderr\n\n\n# H$_0$: $\\mu \\le 7.2$\n# \n# H$_a$: $\\mu > 7.2$\n# \n\n# In[19]:\n\n\nH0 = 7.20\navg = dfWine['fixed acidity'].mean()\nstderr = dfWine['fixed acidity'].sem()\ntStatistic = getStatistic(avg, H0, stderr)\nnWines = dfWine['fixed acidity'].count()\nprint(\"T-stat: %7.4f\" %tStatistic)\ndfWine['fixed acidity'].describe()\n\n\n# Right tail test due to alternative hypothesis.\n\n# In[20]:\n\n\nprob_value = 1 - stats.t.cdf(tStatistic, df=nWines-1)\nprint(prob_value)\n\n\n# In[21]:\n\n\nstats.norm.interval(0.9, loc=5,scale=2\n )\n\n\n# ### End of Notebook!\n" ]
[ [ "scipy.stats.norm.ppf", "pandas.read_excel", "scipy.stats.norm.interval", "scipy.stats.t.ppf", "scipy.stats.t.cdf", "scipy.stats.t.interval" ] ]
stebranchi/Incremental-predictive-monitoring-python3
[ "114b080df4afa0653ce03d8eb0059ceda096752b" ]
[ "src/evaluation/prepare_data_resource.py" ]
[ "\"\"\"\nThis script prepares data in the format for the testing\nalgorithms to run\n\nThe script is expanded to the resource attribute\n\"\"\"\n\nfrom __future__ import division\n\nimport copy\nimport csv\nimport re\nimport time\nfrom queue import PriorityQueue\nfrom datetime import datetime\n\nimport numpy as np\n\nimport shared_variables\nfrom shared_variables import get_unicode_from_int\n\n\ndef prepare_testing_data(eventlog):\n csvfile = open(shared_variables.data_folder + '%s.csv' % eventlog, 'r')\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(spamreader, None) # skip the headers\n\n lastcase = ''\n line = ''\n line_group = ''\n first_line = True\n lines_id = []\n lines = []\n lines_group = []\n timeseqs = [] # relative time since previous event\n timeseqs2 = [] # relative time since case start\n timeseqs3 = [] # absolute time of previous event\n timeseqs4 = [] # absolute time of event as a string\n times = []\n times2 = []\n times3 = []\n times4 = []\n numlines = 0\n casestarttime = None\n lasteventtime = None\n\n for row in spamreader:\n t = time.strptime(row[2], \"%Y-%m-%d %H:%M:%S\")\n if row[0] != lastcase:\n casestarttime = t\n lasteventtime = t\n lastcase = row[0]\n if not first_line:\n lines.append(line)\n lines_group.append(line_group)\n timeseqs.append(times)\n timeseqs2.append(times2)\n timeseqs3.append(times3)\n timeseqs4.append(times4)\n lines_id.append(lastcase)\n line = ''\n line_group = ''\n times = []\n times2 = []\n times3 = []\n times4 = []\n numlines += 1\n line += get_unicode_from_int(row[1])\n line_group += get_unicode_from_int(row[3])\n if hasattr(time, 'tzset'):\n # Move the time zone info into os.environ. See ticket #2315 for why\n # we don't do this unconditionally (breaks Windows).\n time.tzset()\n timesincelastevent = datetime.fromtimestamp(time.mktime(t)) - datetime.fromtimestamp(time.mktime(lasteventtime))\n timesincecasestart = datetime.fromtimestamp(time.mktime(t)) - datetime.fromtimestamp(time.mktime(casestarttime))\n timediff = 86400 * timesincelastevent.days + timesincelastevent.seconds\n timediff2 = 86400 * timesincecasestart.days + timesincecasestart.seconds\n times.append(timediff)\n times2.append(timediff2)\n times3.append(datetime.fromtimestamp(time.mktime(t)))\n times4.append(row[2])\n lasteventtime = t\n first_line = False\n\n # add last case\n lines.append(line)\n lines_group.append(line_group)\n timeseqs.append(times)\n timeseqs2.append(times2)\n timeseqs3.append(times3)\n timeseqs4.append(times4)\n numlines += 1\n\n divisor = np.max([item for sublist in timeseqs for item in sublist])\n divisor2 = np.max([item for sublist in timeseqs2 for item in sublist])\n #divisor3 = np.max(map(lambda x: np.max(map(lambda y: x[len(x) - 1] - y, x)), timeseqs2))\n divisor3 = np.max([np.max([x[len(x) - 1] - y for y in x]) for x in timeseqs2])\n\n elems_per_fold = int(round(numlines / 3))\n\n fold1and2lines = lines[:2 * elems_per_fold]\n #fold1and2lines = map(lambda x: x + '!', fold1and2lines)\n #maxlen = max(map(lambda x: len(x), fold1and2lines))\n fold1and2lines = [x + '!' for x in fold1and2lines]\n maxlen = max([len(x) for x in fold1and2lines])\n chars = list(map(lambda x: set(x), fold1and2lines))\n chars = list(set().union(*chars))\n chars.sort()\n target_chars = copy.copy(chars)\n if '!' in chars:\n chars.remove('!')\n char_indices = dict((c, i) for i, c in enumerate(chars))\n target_char_indices = dict((c, i) for i, c in enumerate(target_chars))\n target_indices_char = dict((i, c) for i, c in enumerate(target_chars))\n\n fold1and2lines_group = lines_group[:2 * elems_per_fold]\n # fold1and2lines_group = map(lambda x: x + '!', fold1and2lines_group)\n chars_group = list(map(lambda x: set(x), fold1and2lines_group))\n chars_group = list(set().union(*chars_group))\n chars_group.sort()\n target_chars_group = copy.copy(chars_group)\n # chars_group.remove('!')\n char_indices_group = dict((c, i) for i, c in enumerate(chars_group))\n target_char_indices_group = dict((c, i) for i, c in enumerate(target_chars_group))\n target_indices_char_group = dict((i, c) for i, c in enumerate(target_chars_group))\n\n # we only need the third fold, because first two were used for training\n fold3 = lines[2 * elems_per_fold:]\n fold3_id = lines_id[2 * elems_per_fold:]\n fold3_group = lines_group[2 * elems_per_fold:]\n fold3_t = timeseqs[2 * elems_per_fold:]\n fold3_t2 = timeseqs2[2 * elems_per_fold:]\n fold3_t3 = timeseqs3[2 * elems_per_fold:]\n fold3_t4 = timeseqs4[2 * elems_per_fold:]\n\n lines = fold3\n lines_id = fold3_id\n lines_group = fold3_group\n lines_t = fold3_t\n lines_t2 = fold3_t2\n lines_t3 = fold3_t3\n lines_t4 = fold3_t4\n\n # set parameters\n predict_size = maxlen\n\n return lines, \\\n lines_id, \\\n lines_group, \\\n lines_t, \\\n lines_t2, \\\n lines_t3, \\\n lines_t4, \\\n maxlen, \\\n chars, \\\n chars_group, \\\n char_indices, \\\n char_indices_group, \\\n divisor, \\\n divisor2, \\\n divisor3, \\\n predict_size, \\\n target_indices_char, \\\n target_indices_char_group, \\\n target_char_indices, \\\n target_char_indices_group\n\n\n# selects traces verified by a declare model\ndef select_declare_verified_traces(server_replayer, path_to_declare_model_file, lines, lines_id, lines_group, lines_t, lines_t2,\n lines_t3, lines_t4, prefix=0):\n # select only lines with formula verified\n lines_v = []\n lines_id_v = []\n lines_group_v = []\n lines_t_v = []\n lines_t2_v = []\n lines_t3_v = []\n lines_t4_v = []\n for line, line_id, line_group, times, times2, times3, times4 in zip(lines,\n lines_id,\n lines_group,\n lines_t,\n lines_t2,\n lines_t3,\n lines_t4):\n\n if server_replayer.verify_with_data(path_to_declare_model_file, line_id, line, line_group, times4, prefix):\n lines_v.append(line)\n lines_id_v.append(line_id)\n lines_group_v.append(line_group)\n lines_t_v.append(times)\n lines_t2_v.append(times2)\n lines_t3_v.append(times3)\n lines_t4_v.append(times4)\n\n return lines_v, lines_id_v, lines_group_v, lines_t_v, lines_t2_v, lines_t3_v, lines_t4_v\n\n\n# selects traces verified by LTL formula\ndef select_formula_verified_traces(server_replayer, lines, lines_id, lines_group, lines_t, lines_t2, lines_t3,\n lines_t4, formula, prefix=0):\n # select only lines with formula verified\n lines_v = []\n lines_id_v = []\n lines_group_v = []\n lines_t_v = []\n lines_t2_v = []\n lines_t3_v = []\n lines_t4_v = []\n\n for line, line_id, line_group, times, times2, times3, times4 in zip(lines,\n lines_id,\n lines_group,\n lines_t,\n lines_t2,\n lines_t3,\n lines_t4):\n if server_replayer.verify_formula_as_compliant(line, formula, prefix):\n #if test_mp_checkers_traces(line, formula, prefix):\n lines_v.append(line)\n lines_id_v.append(line_id)\n lines_group_v.append(line_group)\n lines_t_v.append(times)\n lines_t2_v.append(times2)\n lines_t3_v.append(times3)\n lines_t4_v.append(times4)\n\n return lines_v, lines_id_v, lines_group_v, lines_t_v, lines_t2_v, lines_t3_v, lines_t4_v\n\n\n# define helper functions\n# this one encodes the current sentence into the onehot encoding\ndef encode(sentence, sentence_group, times, times3, maxlen, chars, chars_group,\n char_indices, char_indices_group, divisor, divisor2):\n num_features = len(chars) + len(chars_group) + 5\n x = np.zeros((1, maxlen, num_features), dtype=np.float32)\n leftpad = maxlen - len(sentence)\n times2 = np.cumsum(times)\n for t, char in enumerate(sentence):\n midnight = times3[t].replace(hour=0, minute=0, second=0, microsecond=0)\n timesincemidnight = times3[t] - midnight\n for c in chars:\n if c == char:\n x[0, t + leftpad, char_indices[c]] = 1\n for g in chars_group:\n if g == sentence_group[t]:\n x[0, t + leftpad, len(char_indices) + char_indices_group[g]] = 1\n x[0, t + leftpad, len(chars) + len(chars_group)] = t + 1\n x[0, t + leftpad, len(chars) + len(chars_group) + 1] = times[t] / divisor\n x[0, t + leftpad, len(chars) + len(chars_group) + 2] = times2[t] / divisor2\n x[0, t + leftpad, len(chars) + len(chars_group) + 3] = timesincemidnight.seconds / 86400\n x[0, t + leftpad, len(chars) + len(chars_group) + 4] = times3[t].weekday() / 7\n return x\n\n\n# modify to be able to get second best prediction\ndef get_symbol_ampl(predictions, target_indices_char, target_char_indices, start_of_the_cycle_symbol,\n stop_symbol_probability_amplifier_current, ith_best=0):\n a_pred = list(predictions)\n if start_of_the_cycle_symbol in target_char_indices:\n place_of_starting_symbol = target_char_indices[start_of_the_cycle_symbol]\n a_pred[place_of_starting_symbol] = a_pred[place_of_starting_symbol] / stop_symbol_probability_amplifier_current\n i = np.argsort(a_pred)[len(a_pred) - ith_best - 1]\n return target_indices_char[i]\n\n\n# modify to be able to get second best prediction\ndef adjust_probabilities(predictions, target_char_indices, start_of_the_cycle_symbol,\n stop_symbol_probability_amplifier_current):\n a_pred = list(predictions)\n if start_of_the_cycle_symbol in target_char_indices:\n place_of_starting_symbol = target_char_indices[start_of_the_cycle_symbol]\n a_pred[place_of_starting_symbol] = a_pred[place_of_starting_symbol] / stop_symbol_probability_amplifier_current\n return a_pred\n\n\n# find repetitions\ndef repetitions(s):\n r = re.compile(r\"(.+?)\\1+\")\n for match in r.finditer(s):\n yield (match.group(1), len(match.group(0)) / len(match.group(1)))\n\n\ndef amplify(s):\n list_of_rep = list(repetitions(s))\n if list_of_rep:\n str_rep = list_of_rep[-1][0]\n if s.endswith(str_rep):\n return np.math.exp(list_of_rep[-1][-1]), list_of_rep[-1][0][0]\n else:\n return 1, list_of_rep[-1][0][0]\n return 1, \" \"\n\n\ndef create_queue(activites, resources):\n queue = PriorityQueue()\n # resources_standardized = standardize_list(activites, resources)\n for activity_index in range(len(activites)):\n for resource_index in range(len(resources)):\n queue.put((-(np.log(activites[activity_index]) + np.log(resources[resource_index])),\n [activity_index, resource_index]))\n return queue\n\n\ndef standardize_list(list1, list2):\n len1 = float(len(list1))\n len2 = float(len(list2))\n weight = len2 / len1\n #standardized_list = map(lambda x: weight * x, list2)\n standardized_list = [weight * x for x in list2]\n return standardized_list\n" ]
[ [ "numpy.math.exp", "numpy.log", "numpy.cumsum", "numpy.max", "numpy.argsort", "numpy.zeros" ] ]
shingte/CarND-Vehicle-Detection
[ "38b0f9645f1300836f5877a33c612f004e0aaa5b" ]
[ "mrcnn/visualize.py" ]
[ "\"\"\"\nMask R-CNN\nDisplay and Visualization Functions.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\nAdd plot param to display_instances() - S.Li\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport random\nimport itertools\nimport colorsys\nimport cv2\n\nimport numpy as np\nfrom skimage.measure import find_contours\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches, lines\nfrom matplotlib.patches import Polygon\nimport IPython.display\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn import utils\n\n\n############################################################\n# Visualization\n############################################################\n\ndef display_images(images, titles=None, cols=4, cmap=None, norm=None,\n interpolation=None):\n \"\"\"Display the given set of images, optionally with titles.\n images: list or array of image tensors in HWC format.\n titles: optional. A list of titles to display with each image.\n cols: number of images per row\n cmap: Optional. Color map to use. For example, \"Blues\".\n norm: Optional. A Normalize instance to map values to colors.\n interpolation: Optional. Image interporlation to use for display.\n \"\"\"\n titles = titles if titles is not None else [\"\"] * len(images)\n rows = len(images) // cols + 1\n plt.figure(figsize=(14, 14 * rows // cols))\n i = 1\n for image, title in zip(images, titles):\n plt.subplot(rows, cols, i)\n plt.title(title, fontsize=9)\n plt.axis('off')\n plt.imshow(image.astype(np.uint8), cmap=cmap,\n norm=norm, interpolation=interpolation)\n i += 1\n plt.show()\n\n\ndef random_colors(N, bright=True):\n \"\"\"\n Generate random colors.\n To get visually distinct colors, generate them in HSV space then\n convert to RGB.\n \"\"\"\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors\n\n\ndef apply_mask(image, mask, color, alpha=0.5):\n \"\"\"Apply the given mask to the image.\n \"\"\"\n for c in range(3):\n image[:, :, c] = np.where(mask == 1,\n image[:, :, c] *\n (1 - alpha) + alpha * color[c] * 255,\n image[:, :, c])\n return image\n\n\ndef display_instances(image, boxes, masks, class_ids, class_names,\n scores=None, title=\"\",\n figsize=(16, 16), ax=None,\n show_mask=True, show_bbox=True,\n colors=None, captions=None, plot=False, car_only=True):\n \"\"\"\n boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.\n masks: [height, width, num_instances]\n class_ids: [num_instances]\n class_names: list of class names of the dataset\n scores: (optional) confidence scores for each box\n title: (optional) Figure title\n show_mask, show_bbox: To show masks and bounding boxes or not\n figsize: (optional) the size of the image\n colors: (optional) An array or colors to use with each object\n captions: (optional) A list of strings to use as captions for each object\n plot: plot the instance\n car_only: for vehicle detection only\n \"\"\"\n # Number of instances\n N = boxes.shape[0]\n if not N:\n if plot:\n print(\"\\n*** No instances to display *** \\n\")\n else:\n return image\n else:\n assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\n\n # If no axis is passed, create one and automatically call show()\n auto_show = False\n if plot and not ax:\n _, ax = plt.subplots(1, figsize=figsize)\n auto_show = True\n\n # Generate random colors\n colors = colors or random_colors(N)\n\n # Show area outside image boundaries.\n height, width = image.shape[:2]\n if plot:\n ax.set_ylim(height + 10, -10)\n ax.set_xlim(-10, width + 10)\n ax.axis('off')\n ax.set_title(title)\n\n masked_image = image.astype(np.uint32).copy()\n for i in range(N):\n if car_only and class_ids[i] != 3 and class_ids[i] != 6 and class_ids[i] != 8:\n continue\n\n color = colors[i]\n\n # Bounding box\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in image cropping.\n continue\n y1, x1, y2, x2 = boxes[i]\n if show_bbox:\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n alpha=0.7, linestyle=\"dashed\",\n edgecolor=color, facecolor='none')\n if plot:\n ax.add_patch(p)\n if car_only:\n masked_image = masked_image.astype(np.uint8)\n #int_color = (int(color[0]*255), int(color[1]*255), int(color[2]*255))\n cv2.rectangle(masked_image, (x1, y1), (x2, y2), (150,255,150), 2) \n masked_image = masked_image.astype(np.uint32)\n\n # Label\n if not captions:\n class_id = class_ids[i]\n score = scores[i] if scores is not None else None\n label = class_names[class_id]\n x = random.randint(x1, (x1 + x2) // 2)\n caption = \"{} {:.3f}\".format(label, score) if score else label\n else:\n caption = captions[i]\n if plot:\n ax.text(x1, y1 + 8, caption,\n color='w', size=11, backgroundcolor=\"none\")\n\n # Mask\n mask = masks[:, :, i]\n if show_mask:\n masked_image = apply_mask(masked_image, mask, color)\n\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n if car_only == False:\n for verts in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n verts = np.fliplr(verts) - 1\n p = Polygon(verts, facecolor=\"none\", edgecolor=color)\n if plot:\n ax.add_patch(p)\n if plot:\n ax.imshow(masked_image.astype(np.uint8))\n if auto_show:\n plt.show()\n else: \n return masked_image.astype(np.uint8)\n\n\n\ndef display_differences(image,\n gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n class_names, title=\"\", ax=None,\n show_mask=True, show_box=True,\n iou_threshold=0.5, score_threshold=0.5):\n \"\"\"Display ground truth and prediction instances on the same image.\"\"\"\n # Match predictions to ground truth\n gt_match, pred_match, overlaps = utils.compute_matches(\n gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n iou_threshold=iou_threshold, score_threshold=score_threshold)\n # Ground truth = green. Predictions = red\n colors = [(0, 1, 0, .8)] * len(gt_match)\\\n + [(1, 0, 0, 1)] * len(pred_match)\n # Concatenate GT and predictions\n class_ids = np.concatenate([gt_class_id, pred_class_id])\n scores = np.concatenate([np.zeros([len(gt_match)]), pred_score])\n boxes = np.concatenate([gt_box, pred_box])\n masks = np.concatenate([gt_mask, pred_mask], axis=-1)\n # Captions per instance show score/IoU\n captions = [\"\" for m in gt_match] + [\"{:.2f} / {:.2f}\".format(\n pred_score[i],\n (overlaps[i, int(pred_match[i])]\n if pred_match[i] > -1 else overlaps[i].max()))\n for i in range(len(pred_match))]\n # Set title if not provided\n title = title or \"Ground Truth and Detections\\n GT=green, pred=red, captions: score/IoU\"\n # Display\n display_instances(\n image,\n boxes, masks, class_ids,\n class_names, scores, ax=ax,\n show_bbox=show_box, show_mask=show_mask,\n colors=colors, captions=captions,\n title=title)\n\n\ndef draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10):\n \"\"\"\n anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.\n proposals: [n, 4] the same anchors but refined to fit objects better.\n \"\"\"\n masked_image = image.copy()\n\n # Pick random anchors in case there are too many.\n ids = np.arange(rois.shape[0], dtype=np.int32)\n ids = np.random.choice(\n ids, limit, replace=False) if ids.shape[0] > limit else ids\n\n fig, ax = plt.subplots(1, figsize=(12, 12))\n if rois.shape[0] > limit:\n plt.title(\"Showing {} random ROIs out of {}\".format(\n len(ids), rois.shape[0]))\n else:\n plt.title(\"{} ROIs\".format(len(ids)))\n\n # Show area outside image boundaries.\n ax.set_ylim(image.shape[0] + 20, -20)\n ax.set_xlim(-50, image.shape[1] + 20)\n ax.axis('off')\n\n for i, id in enumerate(ids):\n color = np.random.rand(3)\n class_id = class_ids[id]\n # ROI\n y1, x1, y2, x2 = rois[id]\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n edgecolor=color if class_id else \"gray\",\n facecolor='none', linestyle=\"dashed\")\n ax.add_patch(p)\n # Refined ROI\n if class_id:\n ry1, rx1, ry2, rx2 = refined_rois[id]\n p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n # Connect the top-left corners of the anchor and proposal for easy visualization\n ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))\n\n # Label\n label = class_names[class_id]\n ax.text(rx1, ry1 + 8, \"{}\".format(label),\n color='w', size=11, backgroundcolor=\"none\")\n\n # Mask\n m = utils.unmold_mask(mask[id], rois[id]\n [:4].astype(np.int32), image.shape)\n masked_image = apply_mask(masked_image, m, color)\n\n ax.imshow(masked_image)\n\n # Print stats\n print(\"Positive ROIs: \", class_ids[class_ids > 0].shape[0])\n print(\"Negative ROIs: \", class_ids[class_ids == 0].shape[0])\n print(\"Positive Ratio: {:.2f}\".format(\n class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))\n\n\n# TODO: Replace with matplotlib equivalent?\ndef draw_box(image, box, color):\n \"\"\"Draw 3-pixel width bounding boxes on the given image array.\n color: list of 3 int values for RGB.\n \"\"\"\n y1, x1, y2, x2 = box\n image[y1:y1 + 2, x1:x2] = color\n image[y2:y2 + 2, x1:x2] = color\n image[y1:y2, x1:x1 + 2] = color\n image[y1:y2, x2:x2 + 2] = color\n return image\n\n\ndef display_top_masks(image, mask, class_ids, class_names, limit=4):\n \"\"\"Display the given image and the top few class masks.\"\"\"\n to_display = []\n titles = []\n to_display.append(image)\n titles.append(\"H x W={}x{}\".format(image.shape[0], image.shape[1]))\n # Pick top prominent classes in this image\n unique_class_ids = np.unique(class_ids)\n mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]])\n for i in unique_class_ids]\n top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),\n key=lambda r: r[1], reverse=True) if v[1] > 0]\n # Generate images and titles\n for i in range(limit):\n class_id = top_ids[i] if i < len(top_ids) else -1\n # Pull masks of instances belonging to the same class.\n m = mask[:, :, np.where(class_ids == class_id)[0]]\n m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1)\n to_display.append(m)\n titles.append(class_names[class_id] if class_id != -1 else \"-\")\n display_images(to_display, titles=titles, cols=limit + 1, cmap=\"Blues_r\")\n\n\ndef plot_precision_recall(AP, precisions, recalls):\n \"\"\"Draw the precision-recall curve.\n\n AP: Average precision at IoU >= 0.5\n precisions: list of precision values\n recalls: list of recall values\n \"\"\"\n # Plot the Precision-Recall curve\n _, ax = plt.subplots(1)\n ax.set_title(\"Precision-Recall Curve. AP@50 = {:.3f}\".format(AP))\n ax.set_ylim(0, 1.1)\n ax.set_xlim(0, 1.1)\n _ = ax.plot(recalls, precisions)\n\n\ndef plot_overlaps(gt_class_ids, pred_class_ids, pred_scores,\n overlaps, class_names, threshold=0.5):\n \"\"\"Draw a grid showing how ground truth objects are classified.\n gt_class_ids: [N] int. Ground truth class IDs\n pred_class_id: [N] int. Predicted class IDs\n pred_scores: [N] float. The probability scores of predicted classes\n overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictins and GT boxes.\n class_names: list of all class names in the dataset\n threshold: Float. The prediction probability required to predict a class\n \"\"\"\n gt_class_ids = gt_class_ids[gt_class_ids != 0]\n pred_class_ids = pred_class_ids[pred_class_ids != 0]\n\n plt.figure(figsize=(12, 10))\n plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues)\n plt.yticks(np.arange(len(pred_class_ids)),\n [\"{} ({:.2f})\".format(class_names[int(id)], pred_scores[i])\n for i, id in enumerate(pred_class_ids)])\n plt.xticks(np.arange(len(gt_class_ids)),\n [class_names[int(id)] for id in gt_class_ids], rotation=90)\n\n thresh = overlaps.max() / 2.\n for i, j in itertools.product(range(overlaps.shape[0]),\n range(overlaps.shape[1])):\n text = \"\"\n if overlaps[i, j] > threshold:\n text = \"match\" if gt_class_ids[j] == pred_class_ids[i] else \"wrong\"\n color = (\"white\" if overlaps[i, j] > thresh\n else \"black\" if overlaps[i, j] > 0\n else \"grey\")\n plt.text(j, i, \"{:.3f}\\n{}\".format(overlaps[i, j], text),\n horizontalalignment=\"center\", verticalalignment=\"center\",\n fontsize=9, color=color)\n\n plt.tight_layout()\n plt.xlabel(\"Ground Truth\")\n plt.ylabel(\"Predictions\")\n\n\ndef draw_boxes(image, boxes=None, refined_boxes=None,\n masks=None, captions=None, visibilities=None,\n title=\"\", ax=None):\n \"\"\"Draw bounding boxes and segmentation masks with differnt\n customizations.\n\n boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.\n refined_boxes: Like boxes, but draw with solid lines to show\n that they're the result of refining 'boxes'.\n masks: [N, height, width]\n captions: List of N titles to display on each box\n visibilities: (optional) List of values of 0, 1, or 2. Determine how\n prominant each bounding box should be.\n title: An optional title to show over the image\n ax: (optional) Matplotlib axis to draw on.\n \"\"\"\n # Number of boxes\n assert boxes is not None or refined_boxes is not None\n N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]\n\n # Matplotlib Axis\n if not ax:\n _, ax = plt.subplots(1, figsize=(12, 12))\n\n # Generate random colors\n colors = random_colors(N)\n\n # Show area outside image boundaries.\n margin = image.shape[0] // 10\n ax.set_ylim(image.shape[0] + margin, -margin)\n ax.set_xlim(-margin, image.shape[1] + margin)\n ax.axis('off')\n\n ax.set_title(title)\n\n masked_image = image.astype(np.uint32).copy()\n for i in range(N):\n # Box visibility\n visibility = visibilities[i] if visibilities is not None else 1\n if visibility == 0:\n color = \"gray\"\n style = \"dotted\"\n alpha = 0.5\n elif visibility == 1:\n color = colors[i]\n style = \"dotted\"\n alpha = 1\n elif visibility == 2:\n color = colors[i]\n style = \"solid\"\n alpha = 1\n\n # Boxes\n if boxes is not None:\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in cropping.\n continue\n y1, x1, y2, x2 = boxes[i]\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n alpha=alpha, linestyle=style,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n\n # Refined boxes\n if refined_boxes is not None and visibility > 0:\n ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)\n p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n # Connect the top-left corners of the anchor and proposal\n if boxes is not None:\n ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))\n\n # Captions\n if captions is not None:\n caption = captions[i]\n # If there are refined boxes, display captions on them\n if refined_boxes is not None:\n y1, x1, y2, x2 = ry1, rx1, ry2, rx2\n x = random.randint(x1, (x1 + x2) // 2)\n ax.text(x1, y1, caption, size=11, verticalalignment='top',\n color='w', backgroundcolor=\"none\",\n bbox={'facecolor': color, 'alpha': 0.5,\n 'pad': 2, 'edgecolor': 'none'})\n\n # Masks\n if masks is not None:\n mask = masks[:, :, i]\n masked_image = apply_mask(masked_image, mask, color)\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n for verts in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n verts = np.fliplr(verts) - 1\n p = Polygon(verts, facecolor=\"none\", edgecolor=color)\n ax.add_patch(p)\n ax.imshow(masked_image.astype(np.uint8))\n\n\ndef display_table(table):\n \"\"\"Display values in a table format.\n table: an iterable of rows, and each row is an iterable of values.\n \"\"\"\n html = \"\"\n for row in table:\n row_html = \"\"\n for col in row:\n row_html += \"<td>{:40}</td>\".format(str(col))\n html += \"<tr>\" + row_html + \"</tr>\"\n html = \"<table>\" + html + \"</table>\"\n IPython.display.display(IPython.display.HTML(html))\n\n\ndef display_weight_stats(model):\n \"\"\"Scans all the weights in the model and returns a list of tuples\n that contain stats about each weight.\n \"\"\"\n layers = model.get_trainable_layers()\n table = [[\"WEIGHT NAME\", \"SHAPE\", \"MIN\", \"MAX\", \"STD\"]]\n for l in layers:\n weight_values = l.get_weights() # list of Numpy arrays\n weight_tensors = l.weights # list of TF tensors\n for i, w in enumerate(weight_values):\n weight_name = weight_tensors[i].name\n # Detect problematic layers. Exclude biases of conv layers.\n alert = \"\"\n if w.min() == w.max() and not (l.__class__.__name__ == \"Conv2D\" and i == 1):\n alert += \"<span style='color:red'>*** dead?</span>\"\n if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000:\n alert += \"<span style='color:red'>*** Overflow?</span>\"\n # Add row\n table.append([\n weight_name + alert,\n str(w.shape),\n \"{:+9.4f}\".format(w.min()),\n \"{:+10.4f}\".format(w.max()),\n \"{:+9.4f}\".format(w.std()),\n ])\n display_table(table)\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.concatenate", "numpy.any", "numpy.where", "matplotlib.patches.Polygon", "matplotlib.pyplot.tight_layout", "numpy.unique", "numpy.fliplr", "numpy.arange", "matplotlib.pyplot.subplot", "matplotlib.pyplot.axis", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.random.choice", "matplotlib.patches.Rectangle", "numpy.random.rand", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.lines.Line2D", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlabel" ] ]
pramenku/tensorflow-upstream
[ "1464c9b4823d88c19e2969170c6149a58d4ec6cd" ]
[ "tensorflow/python/keras/backend_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras backend.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gc\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport scipy.sparse\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import combinations\nfrom tensorflow.python.keras.engine import input_layer\nfrom tensorflow.python.keras.layers import advanced_activations\nfrom tensorflow.python.keras.layers import normalization\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import tf_inspect\n\n\ndef compare_single_input_op_to_numpy(keras_op,\n np_op,\n input_shape,\n dtype='float32',\n negative_values=True,\n keras_args=None,\n keras_kwargs=None,\n np_args=None,\n np_kwargs=None):\n keras_args = keras_args or []\n keras_kwargs = keras_kwargs or {}\n np_args = np_args or []\n np_kwargs = np_kwargs or {}\n inputs = 2. * np.random.random(input_shape)\n if negative_values:\n inputs -= 1.\n keras_output = keras_op(\n backend.variable(inputs, dtype=dtype), *keras_args, **keras_kwargs)\n keras_output = backend.eval(keras_output)\n np_output = np_op(inputs.astype(dtype), *np_args, **np_kwargs)\n try:\n np.testing.assert_allclose(keras_output, np_output, atol=1e-4)\n except AssertionError:\n raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; '\n 'Expected ' + str(np_output) + ' but got ' +\n str(keras_output))\n\n\ndef compare_two_inputs_op_to_numpy(keras_op,\n np_op,\n input_shape_a,\n input_shape_b,\n dtype='float32',\n keras_args=None,\n keras_kwargs=None,\n np_args=None,\n np_kwargs=None):\n keras_args = keras_args or []\n keras_kwargs = keras_kwargs or {}\n np_args = np_args or []\n np_kwargs = np_kwargs or {}\n input_a = np.random.random(input_shape_a)\n input_b = np.random.random(input_shape_b)\n keras_output = keras_op(\n backend.variable(input_a, dtype=dtype),\n backend.variable(input_b, dtype=dtype), *keras_args, **keras_kwargs)\n keras_output = backend.eval(keras_output)\n np_output = np_op(input_a.astype(dtype), input_b.astype(dtype),\n *np_args, **np_kwargs)\n try:\n np.testing.assert_allclose(keras_output, np_output, atol=1e-4)\n except AssertionError:\n raise AssertionError('Test for op `' + str(keras_op.__name__) + '` failed; '\n 'Expected ' + str(np_output) + ' but got ' +\n str(keras_output))\n\n\nclass BackendResetTest(test.TestCase, parameterized.TestCase):\n\n def test_new_config(self):\n # User defined jit setting\n config.set_optimizer_jit(False)\n sess = backend.get_session()\n default_config = context.context().config\n self.assertEqual(\n sess._config.graph_options.optimizer_options.global_jit_level,\n default_config.graph_options.optimizer_options.global_jit_level)\n backend.clear_session()\n\n # New session has the same jit setting\n sess = backend.get_session()\n default_config = context.context().config\n self.assertEqual(\n sess._config.graph_options.optimizer_options.global_jit_level,\n default_config.graph_options.optimizer_options.global_jit_level)\n backend.clear_session()\n\n # Change respected\n config.set_optimizer_jit(True)\n sess = backend.get_session()\n default_config = context.context().config\n self.assertEqual(\n sess._config.graph_options.optimizer_options.global_jit_level,\n default_config.graph_options.optimizer_options.global_jit_level)\n backend.clear_session()\n\n # We can't use the normal parameterized decorator because the test session\n # will block graph clearing.\n @parameterized.named_parameters(('_v1', context.graph_mode),\n ('_v2', context.eager_mode))\n def test_new_graph(self, test_context):\n with test_context():\n g_old = backend.get_graph()\n backend.clear_session()\n g = backend.get_graph()\n\n assert g_old is not g\n\n\n@combinations.generate(combinations.combine(mode=['graph', 'eager']))\nclass BackendUtilsTest(test.TestCase):\n\n def test_backend(self):\n self.assertEqual(backend.backend(), 'tensorflow')\n\n def test_get_reset_uids(self):\n self.assertEqual(backend.get_uid('foo'), 1)\n self.assertEqual(backend.get_uid('foo'), 2)\n\n backend.reset_uids()\n self.assertEqual(backend.get_uid('foo'), 1)\n\n def test_learning_phase(self):\n with self.cached_session() as sess:\n with self.assertRaises(ValueError):\n backend.set_learning_phase(2)\n\n # Test running with a learning-phase-consuming layer\n with backend.learning_phase_scope(0):\n x = input_layer.Input((3,))\n y = normalization.BatchNormalization()(x)\n if not context.executing_eagerly():\n self.evaluate(variables.global_variables_initializer())\n sess.run(y, feed_dict={x: np.random.random((2, 3))})\n\n def test_learning_phase_name(self):\n with ops.name_scope('test_scope'):\n # Test that outer name scopes do not affect the learning phase's name.\n lp = backend.symbolic_learning_phase()\n self.assertEqual(lp.name, 'keras_learning_phase:0')\n\n def test_learning_phase_scope(self):\n initial_learning_phase = backend.learning_phase()\n with backend.learning_phase_scope(1):\n self.assertEqual(backend.learning_phase(), 1)\n self.assertEqual(backend.learning_phase(), initial_learning_phase)\n with backend.learning_phase_scope(0):\n self.assertEqual(backend.learning_phase(), 0)\n self.assertEqual(backend.learning_phase(), initial_learning_phase)\n with self.assertRaises(ValueError):\n with backend.learning_phase_scope(None):\n pass\n self.assertEqual(backend.learning_phase(), initial_learning_phase)\n\n new_learning_phase = 0\n backend.set_learning_phase(new_learning_phase)\n self.assertEqual(backend.learning_phase(), new_learning_phase)\n with backend.learning_phase_scope(1):\n self.assertEqual(backend.learning_phase(), 1)\n self.assertEqual(backend.learning_phase(), new_learning_phase)\n\n def test_learning_phase_scope_in_graph(self):\n initial_learning_phase_outside_graph = backend.learning_phase()\n with backend.get_graph().as_default():\n initial_learning_phase_in_graph = backend.learning_phase()\n\n self.assertEqual(backend.learning_phase(),\n initial_learning_phase_outside_graph)\n with backend.learning_phase_scope(1):\n self.assertEqual(backend.learning_phase(), 1)\n self.assertEqual(backend.learning_phase(),\n initial_learning_phase_outside_graph)\n\n with backend.get_graph().as_default():\n self.assertIs(backend.learning_phase(), initial_learning_phase_in_graph)\n\n self.assertEqual(backend.learning_phase(),\n initial_learning_phase_outside_graph)\n\n def test_int_shape(self):\n x = backend.ones(shape=(3, 4))\n self.assertEqual(backend.int_shape(x), (3, 4))\n\n if not context.executing_eagerly():\n x = backend.placeholder(shape=(None, 4))\n self.assertEqual(backend.int_shape(x), (None, 4))\n\n def test_in_train_phase(self):\n y1 = backend.variable(1)\n y2 = backend.variable(2)\n if context.executing_eagerly():\n with backend.learning_phase_scope(0):\n y_val_test = backend.in_train_phase(y1, y2).numpy()\n with backend.learning_phase_scope(1):\n y_val_train = backend.in_train_phase(y1, y2).numpy()\n else:\n y = backend.in_train_phase(y1, y2)\n f = backend.function([backend.learning_phase()], [y])\n y_val_test = f([0])[0]\n y_val_train = f([1])[0]\n self.assertAllClose(y_val_test, 2)\n self.assertAllClose(y_val_train, 1)\n\n def test_is_keras_tensor(self):\n x = backend.variable(1)\n self.assertEqual(backend.is_keras_tensor(x), False)\n x = input_layer.Input(shape=(1,))\n self.assertEqual(backend.is_keras_tensor(x), True)\n x = input_layer.Input(shape=(None,), ragged=True)\n self.assertEqual(backend.is_keras_tensor(x), True)\n x = input_layer.Input(shape=(None, None), sparse=True)\n self.assertEqual(backend.is_keras_tensor(x), True)\n with self.assertRaises(ValueError):\n backend.is_keras_tensor(0)\n\n def test_stop_gradient(self):\n x = backend.variable(1)\n y = backend.stop_gradient(x)\n if not context.executing_eagerly():\n self.assertEqual(y.op.name[:12], 'StopGradient')\n\n xs = [backend.variable(1) for _ in range(3)]\n ys = backend.stop_gradient(xs)\n if not context.executing_eagerly():\n for y in ys:\n self.assertEqual(y.op.name[:12], 'StopGradient')\n\n def test_placeholder(self):\n x = backend.placeholder(shape=(3, 4))\n self.assertEqual(x.shape.as_list(), [3, 4])\n x = backend.placeholder(shape=(3, 4), sparse=True)\n self.assertEqual(x.shape.as_list(), [3, 4])\n\n def test_is_placeholder(self):\n x = backend.placeholder(shape=(1,))\n self.assertEqual(backend.is_placeholder(x), True)\n x = backend.variable(1)\n self.assertEqual(backend.is_placeholder(x), False)\n\n def test_print_tensor(self):\n # Unfortunately it seems impossible to use `mock` (or any other method)\n # to capture stdout when used inside a graph or graph function, thus\n # we cannot test correctness.\n # The message gets correctly printed in practice.\n x = backend.placeholder(shape=())\n y = backend.print_tensor(x, 'eager=%s' % context.executing_eagerly())\n f = backend.function(x, y)\n f(0)\n\n def test_cast_to_floatx(self):\n x = backend.variable(1, dtype='float64')\n x = backend.cast_to_floatx(x)\n self.assertEqual(x.dtype.name, 'float32')\n x = backend.cast_to_floatx(2)\n self.assertEqual(x.dtype.name, 'float32')\n\n\n@combinations.generate(combinations.combine(mode=['graph', 'eager']))\nclass BackendVariableTest(test.TestCase):\n\n def test_zeros(self):\n x = backend.zeros((3, 4))\n val = backend.eval(x)\n self.assertAllClose(val, np.zeros((3, 4)))\n\n def test_ones(self):\n x = backend.ones((3, 4))\n val = backend.eval(x)\n self.assertAllClose(val, np.ones((3, 4)))\n\n def test_eye(self):\n x = backend.eye(4)\n val = backend.eval(x)\n self.assertAllClose(val, np.eye(4))\n\n def test_zeros_like(self):\n x = backend.zeros((3, 4))\n y = backend.zeros_like(x)\n val = backend.eval(y)\n self.assertAllClose(val, np.zeros((3, 4)))\n\n def test_ones_like(self):\n x = backend.zeros((3, 4))\n y = backend.ones_like(x)\n val = backend.eval(y)\n self.assertAllClose(val, np.ones((3, 4)))\n\n def test_random_uniform_variable(self):\n x = backend.random_uniform_variable((30, 20), low=1, high=2, seed=0)\n val = backend.eval(x)\n self.assertAllClose(val.mean(), 1.5, atol=1e-1)\n self.assertAllClose(val.max(), 2., atol=1e-1)\n self.assertAllClose(val.min(), 1., atol=1e-1)\n\n def test_random_normal_variable(self):\n x = backend.random_normal_variable((30, 20), 1., 0.5, seed=0)\n val = backend.eval(x)\n self.assertAllClose(val.mean(), 1., atol=1e-1)\n self.assertAllClose(val.std(), 0.5, atol=1e-1)\n\n def test_count_params(self):\n x = backend.zeros((4, 5))\n val = backend.count_params(x)\n self.assertAllClose(val, 20)\n\n def test_constant(self):\n ref_val = np.random.random((3, 4)).astype('float32')\n x = backend.constant(ref_val)\n val = backend.eval(x)\n self.assertAllClose(val, ref_val)\n\n def test_sparse_variable(self):\n val = scipy.sparse.eye(10)\n x = backend.variable(val)\n self.assertTrue(isinstance(x, sparse_tensor.SparseTensor))\n\n y = backend.to_dense(x)\n self.assertFalse(backend.is_sparse(y))\n\n\n@combinations.generate(combinations.combine(mode=['graph', 'eager']))\nclass BackendLinearAlgebraTest(test.TestCase, parameterized.TestCase):\n\n def test_dot(self):\n x = backend.ones(shape=(2, 3))\n y = backend.ones(shape=(3, 4))\n xy = backend.dot(x, y)\n self.assertEqual(xy.shape.as_list(), [2, 4])\n\n x = backend.ones(shape=(32, 28, 3))\n y = backend.ones(shape=(3, 4))\n xy = backend.dot(x, y)\n self.assertEqual(xy.shape.as_list(), [32, 28, 4])\n\n @parameterized.parameters(\n [(2, 3, 4, 5), (2, 5, 6, 7), (2, 3, 4, 6, 7), (3, 1)],\n [(2, 20, 1), (2, 30, 20), (2, 1, 30), (1, 2)],\n [(4, 2, 3), (4, 5, 3), (4, 2, 5), (2, 2)],\n [(4, 2), (4, 2, 3), (4, 3), (1, 1)],\n [(4, 2), (4, 2, 3), (4, 3), 1],\n [(4, 2, 3), (4, 3), (4, 2), (2, 1)],\n )\n def test_batch_dot(self, x_shape, y_shape, output_shape, axes):\n x_val = np.random.random(x_shape)\n y_val = np.random.random(y_shape)\n x = backend.variable(x_val)\n y = backend.variable(y_val)\n xy = backend.batch_dot(x, y, axes=axes)\n self.assertEqual(tuple(xy.shape.as_list()), output_shape)\n xy_val = backend.eval(xy)\n ref_val = self._reference_batch_dot(x_val, y_val, axes)\n self.assertAllClose(xy_val, ref_val, atol=1e-5)\n\n def _reference_batch_dot(self, x, y, axes):\n if isinstance(axes, int):\n axes = [axes, axes]\n elif isinstance(axes, tuple):\n axes = list(axes)\n if axes is None:\n if y.ndim == 2:\n axes = [x.ndim - 1, y.ndim - 1]\n else:\n axes = [x.ndim - 1, y.ndim - 2]\n if axes[0] < 0:\n axes[0] += x.ndim\n if axes[1] < 0:\n axes[1] += y.ndim\n result = []\n axes = [axes[0] - 1, axes[1] - 1]\n for xi, yi in zip(x, y):\n result.append(np.tensordot(xi, yi, axes))\n result = np.array(result)\n if result.ndim == 1:\n result = np.expand_dims(result, -1)\n return result\n\n def test_reduction_ops(self):\n ops_to_test = [\n (backend.max, np.max),\n (backend.min, np.min),\n (backend.sum, np.sum),\n (backend.prod, np.prod),\n (backend.var, np.var),\n (backend.std, np.std),\n (backend.mean, np.mean),\n (backend.argmin, np.argmin),\n (backend.argmax, np.argmax),\n ]\n for keras_op, np_op in ops_to_test:\n compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7, 5),\n keras_kwargs={'axis': 1},\n np_kwargs={'axis': 1})\n compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7, 5),\n keras_kwargs={'axis': -1},\n np_kwargs={'axis': -1})\n if 'keepdims' in tf_inspect.getargspec(keras_op).args:\n compare_single_input_op_to_numpy(keras_op, np_op,\n input_shape=(4, 7, 5),\n keras_kwargs={'axis': 1,\n 'keepdims': True},\n np_kwargs={'axis': 1,\n 'keepdims': True})\n\n def test_elementwise_ops(self):\n ops_to_test = [\n (backend.square, np.square),\n (backend.abs, np.abs),\n (backend.round, np.round),\n (backend.sign, np.sign),\n (backend.sin, np.sin),\n (backend.cos, np.cos),\n (backend.exp, np.exp),\n ]\n for keras_op, np_op in ops_to_test:\n compare_single_input_op_to_numpy(keras_op, np_op, input_shape=(4, 7))\n\n ops_to_test = [\n (backend.sqrt, np.sqrt),\n (backend.log, np.log),\n ]\n for keras_op, np_op in ops_to_test:\n compare_single_input_op_to_numpy(keras_op, np_op,\n input_shape=(4, 7),\n negative_values=False)\n\n compare_single_input_op_to_numpy(\n backend.clip,\n np.clip,\n input_shape=(6, 4),\n keras_kwargs={\n 'min_value': 0.1,\n 'max_value': 2.4\n },\n np_kwargs={\n 'a_min': 0.1,\n 'a_max': 1.4\n })\n\n compare_single_input_op_to_numpy(\n backend.pow, np.power, input_shape=(6, 4), keras_args=[3], np_args=[3])\n\n def test_two_tensor_ops(self):\n ops_to_test = [\n (backend.equal, np.equal),\n (backend.not_equal, np.not_equal),\n (backend.greater, np.greater),\n (backend.greater_equal, np.greater_equal),\n (backend.less, np.less),\n (backend.less_equal, np.less_equal),\n (backend.maximum, np.maximum),\n (backend.minimum, np.minimum),\n ]\n for keras_op, np_op in ops_to_test:\n compare_two_inputs_op_to_numpy(keras_op, np_op,\n input_shape_a=(4, 7),\n input_shape_b=(4, 7))\n\n def test_relu(self):\n x = ops.convert_to_tensor_v2([[-4, 0], [2, 7]], 'float32')\n\n # standard relu\n relu_op = backend.relu(x)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 7]])\n\n # alpha (leaky relu used)\n relu_op = backend.relu(x, alpha=0.5)\n if not context.executing_eagerly():\n self.assertTrue('LeakyRelu' in relu_op.name)\n self.assertAllClose(backend.eval(relu_op), [[-2, 0], [2, 7]])\n\n # max_value < some elements\n relu_op = backend.relu(x, max_value=5)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 5]])\n\n # nn.relu6 used\n relu_op = backend.relu(x, max_value=6)\n if not context.executing_eagerly():\n self.assertTrue('Relu6' in relu_op.name) # uses tf.nn.relu6\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 6]])\n\n # max value > 6\n relu_op = backend.relu(x, max_value=10)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 7]])\n\n # max value is float\n relu_op = backend.relu(x, max_value=4.3)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 4.3]])\n\n # max value == 0\n relu_op = backend.relu(x, max_value=0)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [0, 0]])\n\n # alpha and max_value\n relu_op = backend.relu(x, alpha=0.25, max_value=3)\n self.assertAllClose(backend.eval(relu_op), [[-1, 0], [2, 3]])\n\n # threshold\n relu_op = backend.relu(x, threshold=3)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [0, 7]])\n\n # threshold is float\n relu_op = backend.relu(x, threshold=1.5)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [2, 7]])\n\n # threshold is negative\n relu_op = backend.relu(x, threshold=-5)\n self.assertAllClose(backend.eval(relu_op), [[-4, 0], [2, 7]])\n\n # threshold and max_value\n relu_op = backend.relu(x, threshold=3, max_value=5)\n self.assertAllClose(backend.eval(relu_op), [[0, 0], [0, 5]])\n\n # threshold and alpha\n relu_op = backend.relu(x, alpha=0.25, threshold=4)\n self.assertAllClose(backend.eval(relu_op), [[-2, -1], [-0.5, 7]])\n\n # threshold, alpha, and max_value\n relu_op = backend.relu(x, alpha=0.25, threshold=4, max_value=5)\n self.assertAllClose(backend.eval(relu_op), [[-2, -1], [-0.5, 5]])\n\n # Test case for GitHub issue 35430, with integer dtype\n x = input_layer.Input(shape=(), name='x', dtype='int64')\n _ = advanced_activations.ReLU(max_value=100, dtype='int64')(x)\n\n\n@combinations.generate(combinations.combine(mode=['graph', 'eager']))\nclass BackendShapeOpsTest(test.TestCase):\n\n def test_reshape(self):\n compare_single_input_op_to_numpy(\n backend.reshape,\n np.reshape,\n input_shape=(4, 7),\n keras_args=[(2, 14)],\n np_args=[(2, 14)])\n\n def test_concatenate(self):\n a = backend.variable(np.ones((1, 2, 3)))\n b = backend.variable(np.ones((1, 2, 2)))\n y = backend.concatenate([a, b], axis=-1)\n self.assertEqual(y.shape.as_list(), [1, 2, 5])\n\n def test_permute_dimensions(self):\n compare_single_input_op_to_numpy(\n backend.permute_dimensions,\n np.transpose,\n input_shape=(4, 7),\n keras_args=[(1, 0)],\n np_args=[(1, 0)])\n\n def test_resize_images(self):\n height_factor = 2\n width_factor = 2\n data_format = 'channels_last'\n x = backend.variable(np.ones((1, 2, 2, 3)))\n y = backend.resize_images(x, height_factor, width_factor, data_format)\n self.assertEqual(y.shape.as_list(), [1, 4, 4, 3])\n\n data_format = 'channels_first'\n x = backend.variable(np.ones((1, 3, 2, 2)))\n y = backend.resize_images(x, height_factor, width_factor, data_format)\n self.assertEqual(y.shape.as_list(), [1, 3, 4, 4])\n\n # Invalid use:\n with self.assertRaises(ValueError):\n backend.resize_images(\n x, height_factor, width_factor, data_format='unknown')\n\n def test_resize_volumes(self):\n height_factor = 2\n width_factor = 2\n depth_factor = 2\n data_format = 'channels_last'\n x = backend.variable(np.ones((1, 2, 2, 2, 3)))\n y = backend.resize_volumes(x, depth_factor, height_factor, width_factor,\n data_format)\n self.assertEqual(y.shape.as_list(), [1, 4, 4, 4, 3])\n\n data_format = 'channels_first'\n x = backend.variable(np.ones((1, 3, 2, 2, 2)))\n y = backend.resize_volumes(x, depth_factor, height_factor, width_factor,\n data_format)\n self.assertEqual(y.shape.as_list(), [1, 3, 4, 4, 4])\n\n # Invalid use:\n with self.assertRaises(ValueError):\n backend.resize_volumes(\n x, depth_factor, height_factor, width_factor, data_format='unknown')\n\n def test_repeat_elements(self):\n x = backend.variable(np.ones((1, 3, 2)))\n y = backend.repeat_elements(x, 3, axis=1)\n self.assertEqual(y.shape.as_list(), [1, 9, 2])\n\n # Use with a dynamic axis:\n if not context.executing_eagerly():\n x = backend.placeholder(shape=(2, None, 2))\n y = backend.repeat_elements(x, 3, axis=1)\n self.assertEqual(y.shape.as_list(), [2, None, 2])\n\n def test_repeat(self):\n x = backend.variable(np.ones((1, 3)))\n y = backend.repeat(x, 2)\n self.assertEqual(y.shape.as_list(), [1, 2, 3])\n\n def test_flatten(self):\n compare_single_input_op_to_numpy(\n backend.flatten,\n np.reshape,\n input_shape=(4, 7, 6),\n np_args=[(4 * 7 * 6,)])\n\n def test_batch_flatten(self):\n compare_single_input_op_to_numpy(\n backend.batch_flatten,\n np.reshape,\n input_shape=(4, 7, 6),\n np_args=[(4, 7 * 6)])\n\n def test_temporal_padding(self):\n\n def ref_op(x, padding):\n shape = list(x.shape)\n shape[1] += padding[0] + padding[1]\n y = np.zeros(tuple(shape))\n y[:, padding[0]:-padding[1], :] = x\n return y\n\n compare_single_input_op_to_numpy(\n backend.temporal_padding,\n ref_op,\n input_shape=(4, 7, 6),\n keras_args=[(2, 3)],\n np_args=[(2, 3)])\n\n def test_spatial_2d_padding(self):\n\n def ref_op(x, padding, data_format='channels_last'):\n shape = list(x.shape)\n if data_format == 'channels_last':\n shape[1] += padding[0][0] + padding[0][1]\n shape[2] += padding[1][0] + padding[1][1]\n y = np.zeros(tuple(shape))\n y[:, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1], :] = x\n else:\n shape[2] += padding[0][0] + padding[0][1]\n shape[3] += padding[1][0] + padding[1][1]\n y = np.zeros(tuple(shape))\n y[:, :, padding[0][0]:-padding[0][1], padding[1][0]:-padding[1][1]] = x\n return y\n\n compare_single_input_op_to_numpy(\n backend.spatial_2d_padding,\n ref_op,\n input_shape=(2, 3, 2, 3),\n keras_args=[((2, 3), (1, 2))],\n keras_kwargs={'data_format': 'channels_last'},\n np_args=[((2, 3), (1, 2))],\n np_kwargs={'data_format': 'channels_last'})\n compare_single_input_op_to_numpy(\n backend.spatial_2d_padding,\n ref_op,\n input_shape=(2, 3, 2, 3),\n keras_args=[((2, 3), (1, 2))],\n keras_kwargs={'data_format': 'channels_first'},\n np_args=[((2, 3), (1, 2))],\n np_kwargs={'data_format': 'channels_first'})\n\n def test_spatial_3d_padding(self):\n\n def ref_op(x, padding, data_format='channels_last'):\n shape = list(x.shape)\n if data_format == 'channels_last':\n shape[1] += padding[0][0] + padding[0][1]\n shape[2] += padding[1][0] + padding[1][1]\n shape[3] += padding[2][0] + padding[2][1]\n y = np.zeros(tuple(shape))\n y[:,\n padding[0][0]:-padding[0][1],\n padding[1][0]:-padding[1][1],\n padding[2][0]:-padding[2][1],\n :] = x\n else:\n shape[2] += padding[0][0] + padding[0][1]\n shape[3] += padding[1][0] + padding[1][1]\n shape[4] += padding[2][0] + padding[2][1]\n y = np.zeros(tuple(shape))\n y[:, :,\n padding[0][0]:-padding[0][1],\n padding[1][0]:-padding[1][1],\n padding[2][0]:-padding[2][1]] = x\n return y\n\n compare_single_input_op_to_numpy(\n backend.spatial_3d_padding,\n ref_op,\n input_shape=(2, 3, 2, 3, 2),\n keras_args=[((2, 3), (1, 2), (2, 3))],\n keras_kwargs={'data_format': 'channels_last'},\n np_args=[((2, 3), (1, 2), (2, 3))],\n np_kwargs={'data_format': 'channels_last'})\n compare_single_input_op_to_numpy(\n backend.spatial_3d_padding,\n ref_op,\n input_shape=(2, 3, 2, 3, 2),\n keras_args=[((2, 3), (1, 2), (2, 3))],\n keras_kwargs={'data_format': 'channels_first'},\n np_args=[((2, 3), (1, 2), (2, 3))],\n np_kwargs={'data_format': 'channels_first'})\n\n\n@combinations.generate(combinations.combine(mode=['graph', 'eager']))\nclass BackendNNOpsTest(test.TestCase, parameterized.TestCase):\n\n def test_bias_add(self):\n keras_op = backend.bias_add\n np_op = np.add\n compare_two_inputs_op_to_numpy(keras_op, np_op,\n input_shape_a=(4, 7),\n input_shape_b=(7,))\n compare_two_inputs_op_to_numpy(keras_op, np_op,\n input_shape_a=(4, 3, 7),\n input_shape_b=(7,))\n compare_two_inputs_op_to_numpy(keras_op, np_op,\n input_shape_a=(4, 3, 5, 7),\n input_shape_b=(7,))\n compare_two_inputs_op_to_numpy(keras_op, np_op,\n input_shape_a=(4, 3, 5, 2, 7),\n input_shape_b=(7,))\n\n with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):\n x = backend.variable((3, 4))\n b = backend.variable((3, 4))\n backend.bias_add(x, b)\n with self.assertRaises(ValueError):\n x = backend.variable((3, 4))\n b = backend.variable((4,))\n backend.bias_add(x, b, data_format='unknown')\n\n def test_bias_add_channels_first(self):\n\n def keras_op(x, b):\n return backend.bias_add(x, b, data_format='channels_first')\n\n def np_op(x, b):\n if x.ndim == 3:\n b = b.reshape((1, b.shape[0], 1))\n if x.ndim == 4:\n b = b.reshape((1, b.shape[0], 1, 1))\n return x + b\n\n compare_two_inputs_op_to_numpy(keras_op, np_op,\n input_shape_a=(4, 3, 7),\n input_shape_b=(3,))\n compare_two_inputs_op_to_numpy(keras_op, np_op,\n input_shape_a=(4, 3, 5, 7),\n input_shape_b=(3,))\n\n def test_pool2d(self):\n val = np.random.random((10, 3, 10, 10))\n x = backend.variable(val)\n y = backend.pool2d(\n x, (2, 2),\n strides=(1, 1),\n padding='valid',\n data_format='channels_first',\n pool_mode='max')\n self.assertEqual(y.shape.as_list(), [10, 3, 9, 9])\n\n y = backend.pool2d(\n x, (2, 2),\n strides=(1, 1),\n padding='valid',\n data_format='channels_first',\n pool_mode='avg')\n self.assertEqual(y.shape.as_list(), [10, 3, 9, 9])\n\n val = np.random.random((10, 10, 10, 3))\n x = backend.variable(val)\n y = backend.pool2d(\n x, (2, 2), strides=(1, 1), padding='valid', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 9, 9, 3])\n\n val = np.random.random((10, 10, 10, 3))\n x = backend.variable(val)\n y = backend.pool2d(\n x, (2, 2), strides=(1, 1), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 10, 10, 3])\n\n val = np.random.random((10, 10, 10, 3))\n x = backend.variable(val)\n y = backend.pool2d(\n x, (2, 2), strides=(2, 2), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 5, 5, 3])\n\n with self.assertRaises(ValueError):\n y = backend.pool2d(\n x, (2, 2),\n strides=(2, 2),\n padding='other',\n data_format='channels_last')\n with self.assertRaises(ValueError):\n y = backend.pool2d(x, (2, 2), strides=(2, 2), data_format='other')\n with self.assertRaises(ValueError):\n y = backend.pool2d(x, (2, 2, 2), strides=(2, 2))\n with self.assertRaises(ValueError):\n y = backend.pool2d(x, (2, 2), strides=(2, 2, 2))\n with self.assertRaises(ValueError):\n y = backend.pool2d(x, (2, 2), strides=(2, 2), pool_mode='other')\n\n def test_pool3d(self):\n if test.is_built_with_rocm():\n self.skipTest('Pooling with 3D tensors is not supported in ROCm')\n val = np.random.random((10, 3, 10, 10, 10))\n x = backend.variable(val)\n y = backend.pool3d(\n x, (2, 2, 2),\n strides=(1, 1, 1),\n padding='valid',\n data_format='channels_first',\n pool_mode='max')\n self.assertEqual(y.shape.as_list(), [10, 3, 9, 9, 9])\n\n y = backend.pool3d(\n x, (2, 2, 2),\n strides=(1, 1, 1),\n padding='valid',\n data_format='channels_first',\n pool_mode='avg')\n self.assertEqual(y.shape.as_list(), [10, 3, 9, 9, 9])\n\n val = np.random.random((10, 10, 10, 10, 3))\n x = backend.variable(val)\n y = backend.pool3d(\n x, (2, 2, 2),\n strides=(1, 1, 1),\n padding='valid',\n data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 9, 9, 9, 3])\n\n val = np.random.random((10, 10, 10, 10, 3))\n x = backend.variable(val)\n y = backend.pool3d(\n x, (2, 2, 2),\n strides=(1, 1, 1),\n padding='same',\n data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 10, 10, 10, 3])\n\n val = np.random.random((10, 10, 10, 10, 3))\n x = backend.variable(val)\n y = backend.pool3d(\n x, (2, 2, 2),\n strides=(2, 2, 2),\n padding='same',\n data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 5, 5, 5, 3])\n\n def test_conv1d(self):\n val = np.random.random((10, 4, 10))\n x = backend.variable(val)\n kernel_val = np.random.random((3, 4, 5))\n k = backend.variable(kernel_val)\n y = backend.conv1d(\n x, k, strides=(1,), padding='valid', data_format='channels_first')\n self.assertEqual(y.shape.as_list(), [10, 5, 8])\n\n val = np.random.random((10, 10, 4))\n x = backend.variable(val)\n y = backend.conv1d(\n x, k, strides=(1,), padding='valid', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 8, 5])\n\n val = np.random.random((10, 10, 4))\n x = backend.variable(val)\n y = backend.conv1d(\n x, k, strides=(1,), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 10, 5])\n\n val = np.random.random((10, 10, 4))\n x = backend.variable(val)\n y = backend.conv1d(\n x, k, strides=(2,), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 5, 5])\n\n def test_local_conv_channels_dim(self):\n filters = 3\n batch_size = 2\n\n for input_shape in [(3, 5), (2, 3, 5), (2, 5, 3, 4)]:\n channels_in = input_shape[0]\n input_spatial_shape = input_shape[1:]\n dim = len(input_spatial_shape)\n\n inputs = np.random.normal(0, 1, (batch_size,) + input_shape)\n inputs_cf = backend.variable(inputs)\n\n for kernel_size in [1, 2]:\n for stride in [1, 2]:\n kernel_sizes = (kernel_size,) * dim\n strides = (stride,) * dim\n\n output_shape = tuple([(i - kernel_size + stride) // stride\n for i in input_spatial_shape])\n\n kernel_shape = (np.prod(output_shape),\n np.prod(kernel_sizes) * channels_in,\n filters)\n\n kernel = np.random.normal(\n 0,\n 1,\n output_shape + (channels_in, np.prod(kernel_sizes), filters)\n )\n\n kernel_cf = np.reshape(kernel, kernel_shape)\n kernel_cf = backend.variable(kernel_cf)\n\n conv_cf = backend.local_conv(inputs_cf, kernel_cf, kernel_sizes,\n strides, output_shape, 'channels_first')\n\n inputs_cl = np.transpose(inputs, [0, 2] + list(range(3, dim + 2)) +\n [1])\n inputs_cl = backend.variable(inputs_cl)\n\n kernel_cl = np.reshape(\n np.transpose(kernel, list(range(dim)) + [dim + 1, dim, dim + 2]),\n kernel_shape\n )\n kernel_cl = backend.variable(kernel_cl)\n\n conv_cl = backend.local_conv(inputs_cl, kernel_cl, kernel_sizes,\n strides, output_shape, 'channels_last')\n\n conv_cf = backend.eval(conv_cf)\n conv_cl = backend.eval(conv_cl)\n\n self.assertAllCloseAccordingToType(\n conv_cf,\n np.transpose(conv_cl,\n [0, dim + 1] + list(range(1, dim + 1))),\n atol=1e-5\n )\n\n @parameterized.named_parameters(\n ('local_conv1d', (5, 6), (3,), (1,), (3,)),\n ('local_conv2d', (4, 5, 6), (3, 3), (1, 1), (2, 3)))\n def test_local_conv_1d_and_2d(self,\n input_shape,\n kernel_sizes,\n strides,\n output_shape):\n filters = 3\n batch_size = 2\n\n inputs = np.random.normal(0, 1, (batch_size,) + input_shape)\n inputs = backend.variable(inputs)\n\n kernel = np.random.normal(0, 1, (np.prod(output_shape),\n np.prod(kernel_sizes) * input_shape[-1],\n filters))\n kernel = backend.variable(kernel)\n\n local_conv = backend.local_conv(inputs, kernel, kernel_sizes, strides,\n output_shape, 'channels_last')\n if len(output_shape) == 1:\n local_conv_dim = backend.local_conv1d(inputs, kernel, kernel_sizes,\n strides, 'channels_last')\n else:\n local_conv_dim = backend.local_conv2d(inputs, kernel, kernel_sizes,\n strides, output_shape,\n 'channels_last')\n\n local_conv = backend.eval(local_conv)\n local_conv_dim = backend.eval(local_conv_dim)\n\n self.assertAllCloseAccordingToType(local_conv, local_conv_dim)\n\n def test_conv2d(self):\n kernel_val = np.random.random((3, 3, 4, 5))\n k = backend.variable(kernel_val)\n\n # Test channels_first\n val = np.random.random((10, 4, 10, 10))\n x = backend.variable(val)\n y = backend.conv2d(x, k, padding='valid', data_format='channels_first')\n self.assertEqual(y.shape.as_list(), [10, 5, 8, 8])\n\n # Test channels_last\n val = np.random.random((10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.conv2d(\n x, k, strides=(1, 1), padding='valid', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 8, 8, 5])\n\n # Test same padding\n val = np.random.random((10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.conv2d(x, k, padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 10, 10, 5])\n\n # Test dilation_rate\n val = np.random.random((10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.conv2d(\n x, k, dilation_rate=(2, 2), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 10, 10, 5])\n\n # Test strides\n val = np.random.random((10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.conv2d(\n x, k, strides=(2, 2), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 5, 5, 5])\n\n # Test invalid arguments\n with self.assertRaises(ValueError):\n y = backend.conv2d(\n x, k, (2, 2), padding='other', data_format='channels_last')\n with self.assertRaises(ValueError):\n y = backend.conv2d(x, k, (2, 2), data_format='other')\n with self.assertRaises(ValueError):\n y = backend.conv2d(x, k, (2, 2, 2))\n\n def test_conv2d_transpose(self):\n input_size = (7, 8)\n kernel_size = (3, 3)\n input_depth = 6\n filters = 6\n batch_size = 2\n\n kernel_val = np.random.random(kernel_size + (input_depth, filters))\n k = backend.variable(kernel_val)\n\n # Test channels_first\n input_val = np.random.random((batch_size, input_depth) + input_size)\n x = backend.variable(input_val)\n y = backend.conv2d_transpose(\n x,\n k, (batch_size, filters) + input_size,\n padding='same',\n data_format='channels_first')\n self.assertEqual(\n tuple(y.shape.as_list()), (batch_size, filters) + input_size)\n\n # Test channels_last\n input_val = np.random.random((batch_size,) + input_size + (input_depth,))\n x = backend.variable(input_val)\n y = backend.conv2d_transpose(\n x,\n k, (batch_size,) + input_size + (filters,),\n padding='same',\n data_format='channels_last')\n self.assertEqual(\n tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,))\n\n # Test dilation_rate\n y = backend.conv2d_transpose(\n x,\n k, (batch_size,) + input_size + (filters,),\n padding='same',\n data_format='channels_last',\n dilation_rate=(2, 2))\n self.assertEqual(\n tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,))\n\n # Test batch size of None in output_shape\n y = backend.conv2d_transpose(\n x,\n k, (None,) + input_size + (filters,),\n padding='same',\n data_format='channels_last')\n self.assertEqual(\n tuple(y.shape.as_list()), (batch_size,) + input_size + (filters,))\n\n # Test invalid values\n with self.assertRaises(ValueError):\n y = backend.conv2d_transpose(\n x, k, (2, 2, 8, 9), padding='other', data_format='channels_last')\n with self.assertRaises(ValueError):\n y = backend.conv2d_transpose(x, k, (2, 2, 8, 9), data_format='other')\n\n def test_separable_conv2d(self):\n val = np.random.random((10, 4, 10, 10))\n x = backend.variable(val)\n depthwise_kernel_val = np.random.random((3, 3, 4, 1))\n pointwise_kernel_val = np.random.random((1, 1, 4, 5))\n dk = backend.variable(depthwise_kernel_val)\n pk = backend.variable(pointwise_kernel_val)\n y = backend.separable_conv2d(\n x, dk, pk, padding='valid', data_format='channels_first')\n self.assertEqual(y.shape.as_list(), [10, 5, 8, 8])\n\n val = np.random.random((10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.separable_conv2d(\n x, dk, pk, strides=(1, 1), padding='valid', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 8, 8, 5])\n\n val = np.random.random((10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.separable_conv2d(\n x, dk, pk, strides=(1, 1), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 10, 10, 5])\n\n val = np.random.random((10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.separable_conv2d(\n x, dk, pk, strides=(2, 2), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 5, 5, 5])\n with self.assertRaises(ValueError):\n y = backend.separable_conv2d(\n x, dk, pk, (2, 2), padding='other', data_format='channels_last')\n with self.assertRaises(ValueError):\n y = backend.separable_conv2d(x, dk, pk, (2, 2), data_format='other')\n with self.assertRaises(ValueError):\n y = backend.separable_conv2d(x, dk, pk, (2, 2, 2))\n\n def test_conv3d(self):\n val = np.random.random((10, 4, 10, 10, 10))\n x = backend.variable(val)\n kernel_val = np.random.random((3, 3, 3, 4, 5))\n k = backend.variable(kernel_val)\n y = backend.conv3d(x, k, padding='valid', data_format='channels_first')\n self.assertEqual(y.shape.as_list(), [10, 5, 8, 8, 8])\n\n val = np.random.random((10, 10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.conv3d(\n x, k, strides=(1, 1, 1), padding='valid', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 8, 8, 8, 5])\n\n val = np.random.random((10, 10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.conv3d(\n x, k, strides=(1, 1, 1), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 10, 10, 10, 5])\n\n val = np.random.random((10, 10, 10, 10, 4))\n x = backend.variable(val)\n y = backend.conv3d(\n x, k, strides=(2, 2, 2), padding='same', data_format='channels_last')\n self.assertEqual(y.shape.as_list(), [10, 5, 5, 5, 5])\n with self.assertRaises(ValueError):\n y = backend.conv3d(\n x, k, (2, 2, 2), padding='other', data_format='channels_last')\n with self.assertRaises(ValueError):\n y = backend.conv3d(x, k, (2, 2, 2), data_format='other')\n with self.assertRaises(ValueError):\n y = backend.conv3d(x, k, (2, 2))\n\n def test_rnn(self):\n # implement a simple RNN\n num_samples = 4\n input_dim = 5\n output_dim = 3\n timesteps = 6\n\n input_val = np.random.random(\n (num_samples, timesteps, input_dim)).astype(np.float32)\n init_state_val = np.random.random(\n (num_samples, output_dim)).astype(np.float32)\n w_i_val = np.random.random((input_dim, output_dim)).astype(np.float32)\n w_o_val = np.random.random((output_dim, output_dim)).astype(np.float32)\n np_mask = np.random.randint(2, size=(num_samples, timesteps))\n\n def rnn_step_fn():\n w_i = backend.variable(w_i_val)\n w_o = backend.variable(w_o_val)\n\n def step_function(x, states):\n assert len(states) == 1\n prev_output = states[0]\n output = backend.dot(x, w_i) + backend.dot(prev_output, w_o)\n return output, [output]\n\n return step_function\n\n # test default setup\n last_output_list = [[], [], [], [], [], []]\n outputs_list = [[], [], [], [], [], []]\n state_list = [[], [], [], [], [], []]\n\n rnn_fn = rnn_step_fn()\n inputs = backend.variable(input_val)\n initial_states = [backend.variable(init_state_val)]\n mask = backend.variable(np_mask)\n\n kwargs_list = [\n {'go_backwards': False, 'mask': None},\n {'go_backwards': False, 'mask': None, 'unroll': True},\n {'go_backwards': True, 'mask': None},\n {'go_backwards': True, 'mask': None, 'unroll': True},\n {'go_backwards': False, 'mask': mask},\n {'go_backwards': False, 'mask': mask, 'unroll': True},\n ]\n for i, kwargs in enumerate(kwargs_list):\n last_output, outputs, new_states = backend.rnn(rnn_fn, inputs,\n initial_states, **kwargs)\n # check static shape inference\n self.assertEqual(last_output.shape.as_list(), [num_samples, output_dim])\n self.assertEqual(outputs.shape.as_list(),\n [num_samples, timesteps, output_dim])\n for state in new_states:\n self.assertEqual(state.shape.as_list(), [num_samples, output_dim])\n\n last_output_list[i].append(backend.eval(last_output))\n outputs_list[i].append(backend.eval(outputs))\n self.assertLen(new_states, 1)\n state_list[i].append(backend.eval(new_states[0]))\n\n def assert_list_pairwise(z_list, atol=1e-05):\n for (z1, z2) in zip(z_list[1:], z_list[:-1]):\n self.assertAllClose(z1, z2, atol=atol)\n\n assert_list_pairwise(last_output_list[0], atol=1e-04)\n assert_list_pairwise(outputs_list[0], atol=1e-04)\n assert_list_pairwise(state_list[0], atol=1e-04)\n assert_list_pairwise(last_output_list[2], atol=1e-04)\n assert_list_pairwise(outputs_list[2], atol=1e-04)\n assert_list_pairwise(state_list[2], atol=1e-04)\n\n for l, u_l in zip(last_output_list[0], last_output_list[1]):\n self.assertAllClose(l, u_l, atol=1e-04)\n\n for o, u_o in zip(outputs_list[0], outputs_list[1]):\n self.assertAllClose(o, u_o, atol=1e-04)\n\n for s, u_s in zip(state_list[0], state_list[1]):\n self.assertAllClose(s, u_s, atol=1e-04)\n\n for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]):\n self.assertAllClose(b_l, b_u_l, atol=1e-04)\n\n for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]):\n self.assertAllClose(b_o, b_u_o, atol=1e-04)\n\n for b_s, b_u_s in zip(state_list[2], state_list[3]):\n self.assertAllClose(b_s, b_u_s, atol=1e-04)\n\n def test_rnn_additional_states(self):\n # implement a simple RNN\n num_samples = 4\n input_dim = 5\n output_dim = 3\n timesteps = 6\n\n input_val = np.random.random(\n (num_samples, timesteps, input_dim)).astype(np.float32)\n init_state_val = np.random.random(\n (num_samples, output_dim)).astype(np.float32)\n w_i_val = np.random.random((input_dim, output_dim)).astype(np.float32)\n w_o_val = np.random.random((output_dim, output_dim)).astype(np.float32)\n np_mask = np.random.randint(2, size=(num_samples, timesteps))\n\n def rnn_step_fn():\n w_i = backend.variable(w_i_val)\n w_o = backend.variable(w_o_val)\n\n def step_function(x, states):\n assert len(states) == 2\n prev_output = states[0]\n output = backend.dot(x, w_i) + backend.dot(prev_output, w_o)\n return output, [output, backend.concatenate([output, output], axis=-1)]\n\n return step_function\n\n # test default setup\n last_output_list = [[], [], [], [], [], []]\n outputs_list = [[], [], [], [], [], []]\n state_list = [[], [], [], [], [], []]\n additional_state_list = [[], [], [], [], [], []]\n\n rnn_fn = rnn_step_fn()\n inputs = backend.variable(input_val)\n initial_states = [\n backend.variable(init_state_val),\n ops.convert_to_tensor_v2(\n np.concatenate([init_state_val, init_state_val], axis=-1))\n ]\n mask = backend.variable(np_mask)\n\n kwargs_list = [\n {'go_backwards': False, 'mask': None},\n {'go_backwards': False, 'mask': None, 'unroll': True},\n {'go_backwards': True, 'mask': None},\n {'go_backwards': True, 'mask': None, 'unroll': True},\n {'go_backwards': False, 'mask': mask},\n {'go_backwards': False, 'mask': mask, 'unroll': True},\n ]\n for i, kwargs in enumerate(kwargs_list):\n last_output, outputs, new_states = backend.rnn(rnn_fn, inputs,\n initial_states, **kwargs)\n # check static shape inference\n self.assertEqual(last_output.shape.as_list(), [num_samples, output_dim])\n self.assertEqual(outputs.shape.as_list(),\n [num_samples, timesteps, output_dim])\n # for state in new_states:\n # self.assertEqual(state.shape.as_list(),\n # [num_samples, output_dim])\n self.assertEqual(new_states[0].shape.as_list(), [num_samples, output_dim])\n self.assertEqual(new_states[1].shape.as_list(),\n [num_samples, 2 * output_dim])\n\n last_output_list[i].append(backend.eval(last_output))\n outputs_list[i].append(backend.eval(outputs))\n self.assertLen(new_states, 2)\n state_list[i].append(backend.eval(new_states[0]))\n additional_state_list[i].append(backend.eval(new_states[1]))\n\n def assert_list_pairwise(z_list, atol=1e-05):\n for (z1, z2) in zip(z_list[1:], z_list[:-1]):\n self.assertAllClose(z1, z2, atol=atol)\n\n assert_list_pairwise(last_output_list[0], atol=1e-04)\n assert_list_pairwise(outputs_list[0], atol=1e-04)\n assert_list_pairwise(state_list[0], atol=1e-04)\n assert_list_pairwise(additional_state_list[0], atol=1e-04)\n assert_list_pairwise(last_output_list[2], atol=1e-04)\n assert_list_pairwise(outputs_list[2], atol=1e-04)\n assert_list_pairwise(state_list[2], atol=1e-04)\n assert_list_pairwise(additional_state_list[2], atol=1e-04)\n\n for l, u_l in zip(last_output_list[0], last_output_list[1]):\n self.assertAllClose(l, u_l, atol=1e-04)\n\n for o, u_o in zip(outputs_list[0], outputs_list[1]):\n self.assertAllClose(o, u_o, atol=1e-04)\n\n for s, u_s in zip(state_list[0], state_list[1]):\n self.assertAllClose(s, u_s, atol=1e-04)\n\n for s, u_s in zip(additional_state_list[0], additional_state_list[1]):\n self.assertAllClose(s, u_s, atol=1e-04)\n\n for b_l, b_u_l in zip(last_output_list[2], last_output_list[3]):\n self.assertAllClose(b_l, b_u_l, atol=1e-04)\n\n for b_o, b_u_o in zip(outputs_list[2], outputs_list[3]):\n self.assertAllClose(b_o, b_u_o, atol=1e-04)\n\n for b_s, b_u_s in zip(state_list[2], state_list[3]):\n self.assertAllClose(b_s, b_u_s, atol=1e-04)\n\n for s, u_s in zip(additional_state_list[2], additional_state_list[3]):\n self.assertAllClose(s, u_s, atol=1e-04)\n\n def test_rnn_output_and_state_masking_independent(self):\n num_samples = 2\n num_timesteps = 4\n state_and_io_size = 2\n mask_last_num_timesteps = 2 # for second sample only\n\n # a step function that just outputs inputs,\n # but increments states +1 per timestep\n def step_function(inputs, states):\n return inputs, [s + 1 for s in states]\n\n inputs_vals = np.random.random((num_samples, num_timesteps,\n state_and_io_size))\n initial_state_vals = np.random.random((num_samples, state_and_io_size))\n # masking of two last timesteps for second sample only\n mask_vals = np.ones((num_samples, num_timesteps))\n mask_vals[1, -mask_last_num_timesteps:] = 0\n\n # outputs expected to be same as inputs for the first sample\n expected_outputs = inputs_vals.copy()\n # but for the second sample all outputs in masked region should be the same\n # as last output before masked region\n expected_outputs[1, -mask_last_num_timesteps:] = \\\n expected_outputs[1, -(mask_last_num_timesteps + 1)]\n\n expected_last_state = initial_state_vals.copy()\n # first state should be incremented for every timestep (no masking)\n expected_last_state[0] += num_timesteps\n # second state should not be incremented for last two timesteps\n expected_last_state[1] += (num_timesteps - mask_last_num_timesteps)\n\n # verify same expected output for `unroll=true/false`\n inputs = backend.variable(inputs_vals)\n initial_states = [backend.variable(initial_state_vals)]\n mask = backend.variable(mask_vals)\n for unroll in [True, False]:\n _, outputs, last_states = backend.rnn(\n step_function,\n inputs,\n initial_states,\n mask=mask,\n unroll=unroll,\n input_length=num_timesteps if unroll else None)\n\n self.assertAllClose(backend.eval(outputs), expected_outputs)\n self.assertAllClose(backend.eval(last_states[0]), expected_last_state)\n\n def test_rnn_output_num_dim_larger_than_2_masking(self):\n num_samples = 3\n num_timesteps = 4\n num_features = 5\n\n def step_function(inputs, states):\n outputs = backend.tile(backend.expand_dims(inputs), [1, 1, 2])\n return outputs, [backend.identity(s) for s in states]\n # Note: cannot just return states (which can be a problem) ->\n # tensorflow/python/ops/resource_variable_ops.py\", line 824, in set_shape\n # NotImplementedError: ResourceVariable does not implement set_shape()\n\n inputs_vals = np.random.random((num_samples, num_timesteps, num_features))\n initial_state_vals = np.random.random((num_samples, 6))\n mask_vals = np.ones((num_samples, num_timesteps))\n mask_vals[-1, -1] = 0 # final timestep masked for last sample\n\n expected_outputs = np.repeat(inputs_vals[..., None], repeats=2, axis=-1)\n # for the last sample, the final timestep (in masked region) should be the\n # same as the second to final output (before masked region)\n expected_outputs[-1, -1] = expected_outputs[-1, -2]\n\n inputs = backend.variable(inputs_vals)\n initial_states = [backend.variable(initial_state_vals)]\n mask = backend.variable(mask_vals)\n for unroll in [True, False]:\n _, outputs, _ = backend.rnn(\n step_function,\n inputs,\n initial_states,\n mask=mask,\n unroll=unroll,\n input_length=num_timesteps if unroll else None)\n\n self.assertAllClose(backend.eval(outputs), expected_outputs)\n\n def test_rnn_state_num_dim_larger_than_2_masking(self):\n num_samples = 3\n num_timesteps = 4\n\n def step_function(inputs, states):\n return inputs, [s + 1 for s in states]\n\n inputs_vals = np.random.random((num_samples, num_timesteps, 5))\n initial_state_vals = np.random.random((num_samples, 6, 7))\n mask_vals = np.ones((num_samples, num_timesteps))\n mask_vals[0, -2:] = 0 # final two timesteps masked for first sample\n\n expected_last_state = initial_state_vals.copy()\n expected_last_state[0] += (num_timesteps - 2)\n expected_last_state[1:] += num_timesteps\n\n inputs = backend.variable(inputs_vals)\n initial_states = [backend.variable(initial_state_vals)]\n mask = backend.variable(mask_vals)\n for unroll in [True, False]:\n _, _, last_states = backend.rnn(\n step_function,\n inputs,\n initial_states,\n mask=mask,\n unroll=unroll,\n input_length=num_timesteps if unroll else None)\n\n self.assertAllClose(backend.eval(last_states[0]), expected_last_state)\n\n def test_batch_normalization(self):\n g_val = np.random.random((3,))\n b_val = np.random.random((3,))\n gamma = backend.variable(g_val)\n beta = backend.variable(b_val)\n\n # 3D NHC case\n val = np.random.random((10, 5, 3))\n x = backend.variable(val)\n mean, var = nn.moments(x, (0, 1), None, None, False)\n normed = backend.batch_normalization(\n x, mean, var, beta, gamma, axis=-1, epsilon=1e-3)\n self.assertEqual(normed.shape.as_list(), [10, 5, 3])\n\n # 4D NHWC case\n val = np.random.random((10, 5, 5, 3))\n x = backend.variable(val)\n mean, var = nn.moments(x, (0, 1, 2), None, None, False)\n normed = backend.batch_normalization(\n x, mean, var, beta, gamma, axis=-1, epsilon=1e-3)\n self.assertEqual(normed.shape.as_list(), [10, 5, 5, 3])\n\n # 4D NCHW case\n if not context.executing_eagerly():\n # Eager CPU kernel for NCHW does not exist.\n val = np.random.random((10, 3, 5, 5))\n x = backend.variable(val)\n mean, var = nn.moments(x, (0, 2, 3), None, None, False)\n normed = backend.batch_normalization(\n x, mean, var, beta, gamma, axis=1, epsilon=1e-3)\n self.assertEqual(normed.shape.as_list(), [10, 3, 5, 5])\n\n def test_normalize_batch_in_training(self):\n val = np.random.random((10, 3, 10, 10))\n x = backend.variable(val)\n reduction_axes = (0, 2, 3)\n\n g_val = np.random.random((3,))\n b_val = np.random.random((3,))\n gamma = backend.variable(g_val)\n beta = backend.variable(b_val)\n normed, mean, var = backend.normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=1e-3)\n self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10])\n self.assertEqual(mean.shape.as_list(), [\n 3,\n ])\n self.assertEqual(var.shape.as_list(), [\n 3,\n ])\n\n # case: gamma=None\n gamma = None\n normed, mean, var = backend.normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=1e-3)\n self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10])\n self.assertEqual(mean.shape.as_list(), [\n 3,\n ])\n self.assertEqual(var.shape.as_list(), [\n 3,\n ])\n\n # case: beta=None\n beta = None\n normed, mean, var = backend.normalize_batch_in_training(\n x, gamma, beta, reduction_axes, epsilon=1e-3)\n self.assertEqual(normed.shape.as_list(), [10, 3, 10, 10])\n self.assertEqual(mean.shape.as_list(), [\n 3,\n ])\n self.assertEqual(var.shape.as_list(), [\n 3,\n ])\n\n def test_dropout(self):\n inputs = array_ops.ones((200, 200))\n outputs = backend.dropout(inputs, 0.2)\n outputs_val = backend.eval(outputs)\n self.assertEqual(np.min(outputs_val), 0)\n self.assertAllClose(np.count_nonzero(outputs_val), 32000, atol=1000)\n # Test noise shape\n # MIOpen do not support noise shape feature yet, skip on ROCm\n if not test.is_built_with_rocm():\n outputs = backend.dropout(inputs, 0.2, noise_shape=(200, 1))\n outputs_val = backend.eval(outputs)\n self.assertAllClose(outputs_val[2, :], outputs_val[3, :], atol=1e-5)\n\n\nclass BackendCrossEntropyLossesTest(test.TestCase, parameterized.TestCase):\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_binary_crossentropy_with_sigmoid(self):\n t = backend.constant([[0, 1, 0]])\n logits = backend.constant([[8., 1., 1.]])\n p = backend.sigmoid(logits)\n p = array_ops.identity(array_ops.identity(p))\n result = self.evaluate(backend.binary_crossentropy(t, p))\n self.assertArrayNear(result[0], [8., 0.313, 1.313], 1e-3)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_categorical_crossentropy_loss(self):\n t = backend.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n\n p = backend.constant([[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]])\n result = backend.categorical_crossentropy(t, p)\n self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)\n\n p = backend.constant([[.9, .05, .05], [.05, .89, .01], [.05, .06, .94]])\n result = backend.categorical_crossentropy(t, p, axis=0)\n self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)\n\n p = backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])\n result = backend.categorical_crossentropy(t, p, from_logits=True),\n self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)\n\n p = backend.constant([[8., 0., 2.], [1., 9., 3.], [1., 1., 5.]])\n result = backend.categorical_crossentropy(t, p, from_logits=True, axis=0),\n self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_categorical_crossentropy_loss_with_unknown_rank_tensor(self):\n t = backend.placeholder()\n p = backend.placeholder()\n o = backend.categorical_crossentropy(t, p)\n\n t_val = ops.convert_to_tensor_v2([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])\n p_val = ops.convert_to_tensor_v2([[.9, .05, .05], [.05, .89, .06],\n [.05, .01, .94]])\n f = backend.function([t, p], o)\n\n result = f([t_val, p_val])\n self.assertArrayNear(result, [.105, .116, .062], 1e-3)\n\n # With axis set\n o = backend.categorical_crossentropy(t, p, axis=0)\n f = backend.function([t, p], o)\n\n result = f([t_val, p_val])\n self.assertArrayNear(result, [.105, .065, .111], 1e-3)\n\n # from logits\n p_val = ops.convert_to_tensor_v2([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])\n o = backend.categorical_crossentropy(t, p, from_logits=True)\n f = backend.function([t, p], o)\n\n result = f([t_val, p_val])\n self.assertArrayNear(result, [.002, 0, .17], 1e-3)\n\n # from logits and axis set\n o = backend.categorical_crossentropy(t, p, from_logits=True, axis=0)\n f = backend.function([t, p], o)\n\n result = f([t_val, p_val])\n self.assertArrayNear(result, [.002, .003, .036], 1e-3)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_categorical_crossentropy_with_softmax(self):\n t = backend.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n logits = backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])\n p = backend.softmax(logits)\n p = array_ops.identity(array_ops.identity(p))\n result = self.evaluate(backend.categorical_crossentropy(t, p))\n self.assertArrayNear(result, [0.002, 0.0005, 0.17], 1e-3)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_sparse_categorical_crossentropy_loss(self):\n t = backend.constant([0, 1, 2])\n\n p = backend.constant([[.9, .05, .05], [.05, .89, .06], [.05, .01, .94]])\n result = backend.sparse_categorical_crossentropy(t, p)\n self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)\n\n p = backend.constant([[.9, .05, .05], [.05, .89, .01], [.05, .06, .94]])\n result = backend.sparse_categorical_crossentropy(t, p, axis=0)\n self.assertArrayNear(self.evaluate(result), [.105, .116, .062], 1e-3)\n\n p = backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])\n result = backend.sparse_categorical_crossentropy(t, p, from_logits=True),\n self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)\n\n p = backend.constant([[8., 0., 2.], [1., 9., 3.], [1., 1., 5.]])\n result = backend.sparse_categorical_crossentropy(\n t, p, from_logits=True, axis=0),\n self.assertArrayNear(self.evaluate(result)[0], [.002, 0, .17], 1e-3)\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_sparse_categorical_crossentropy_loss_with_unknown_rank_tensor(self):\n t = backend.placeholder()\n p = backend.placeholder()\n o = backend.sparse_categorical_crossentropy(t, p)\n\n t_val = ops.convert_to_tensor_v2([0, 1, 2])\n p_val = ops.convert_to_tensor_v2([[.9, .05, .05], [.05, .89, .06],\n [.05, .01, .94]])\n f = backend.function([t, p], o)\n\n result = f([t_val, p_val])\n self.assertArrayNear(result, [.105, .116, .062], 1e-3)\n\n # With axis set\n with self.assertRaisesRegex(\n ValueError,\n 'Cannot compute sparse categorical crossentropy with `axis=0`'):\n o = backend.sparse_categorical_crossentropy(t, p, axis=0)\n f = backend.function([t, p], o)\n\n _ = f([t_val, p_val])\n\n # from logits\n p_val = ops.convert_to_tensor_v2([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])\n o = backend.sparse_categorical_crossentropy(t, p, from_logits=True)\n f = backend.function([t, p], o)\n\n result = f([t_val, p_val])\n self.assertArrayNear(result, [.002, 0, .17], 1e-3)\n\n # from logits and axis set\n with self.assertRaisesRegex(\n ValueError,\n 'Cannot compute sparse categorical crossentropy with `axis=0`'):\n o = backend.sparse_categorical_crossentropy(\n t, p, from_logits=True, axis=0)\n f = backend.function([t, p], o)\n\n _ = f([t_val, p_val])\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_sparse_categorical_crossentropy_with_softmax(self):\n t = backend.constant([0, 1, 2])\n logits = backend.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])\n p = backend.softmax(logits)\n p = array_ops.identity(array_ops.identity(p))\n result = self.evaluate(backend.sparse_categorical_crossentropy(t, p))\n self.assertArrayNear(result, [0.002, 0.0005, 0.17], 1e-3)\n\n\n@test_util.with_control_flow_v2\n@combinations.generate(combinations.combine(mode=['graph', 'eager']))\nclass TestCTC(test.TestCase):\n\n def test_ctc_decode(self):\n depth = 6\n seq_len_0 = 5\n input_prob_matrix_0 = np.asarray(\n [[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],\n [0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],\n [0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],\n [0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],\n [0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],\n # Random entry added in at time=5\n [0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]],\n dtype=np.float32)\n\n # len max_time_steps array of batch_size x depth matrices\n inputs = ([input_prob_matrix_0[t, :][np.newaxis, :]\n for t in range(seq_len_0)] + # Pad to max_time_steps = 8\n 2 * [np.zeros((1, depth), dtype=np.float32)])\n\n inputs = backend.variable(np.asarray(inputs).transpose((1, 0, 2)))\n\n # batch_size length vector of sequence_lengths\n input_length = backend.variable(np.array([seq_len_0], dtype=np.int32))\n # batch_size length vector of negative log probabilities\n log_prob_truth = np.array([\n -3.5821197, # output beam 0\n -3.777835 # output beam 1\n ], np.float32)[np.newaxis, :]\n\n decode_truth = [np.array([1, 0]), np.array([0, 1, 0])]\n beam_width = 2\n top_paths = 2\n\n decode_pred_tf, log_prob_pred_tf = backend.ctc_decode(\n inputs,\n input_length,\n greedy=False,\n beam_width=beam_width,\n top_paths=top_paths)\n\n self.assertEqual(len(decode_pred_tf), top_paths)\n log_prob_pred = backend.eval(log_prob_pred_tf)\n for i in range(top_paths):\n self.assertTrue(\n np.alltrue(decode_truth[i] == backend.eval(decode_pred_tf[i])))\n self.assertAllClose(log_prob_truth, log_prob_pred)\n\n def test_ctc_batch_cost(self):\n with self.cached_session():\n label_lens = np.expand_dims(np.asarray([5, 4]), 1)\n input_lens = np.expand_dims(np.asarray([5, 5]), 1) # number of timesteps\n loss_log_probs = [3.34211, 5.42262]\n\n # dimensions are batch x time x categories\n labels = np.asarray([[0, 1, 2, 1, 0], [0, 1, 1, 0, -1]])\n inputs = np.asarray(\n [[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],\n [0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],\n [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],\n [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],\n [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],\n [[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],\n [0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],\n [0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],\n [0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],\n [0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]],\n dtype=np.float32)\n\n labels = backend.variable(labels, dtype='int32')\n inputs = backend.variable(inputs, dtype='float32')\n input_lens = backend.variable(input_lens, dtype='int32')\n label_lens = backend.variable(label_lens, dtype='int32')\n res = backend.eval(\n backend.ctc_batch_cost(labels, inputs, input_lens, label_lens))\n self.assertAllClose(res[:, 0], loss_log_probs, atol=1e-05)\n\n # test when batch_size = 1, that is, one sample only\n ref = [3.34211]\n input_lens = np.expand_dims(np.asarray([5]), 1)\n label_lens = np.expand_dims(np.asarray([5]), 1)\n\n labels = np.asarray([[0, 1, 2, 1, 0]])\n inputs = np.asarray(\n [[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553], [\n 0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436\n ], [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],\n [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],\n [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]]\n ],\n dtype=np.float32)\n\n k_labels = backend.variable(labels, dtype='int32')\n k_inputs = backend.variable(inputs, dtype='float32')\n k_input_lens = backend.variable(input_lens, dtype='int32')\n k_label_lens = backend.variable(label_lens, dtype='int32')\n res = backend.eval(\n backend.ctc_batch_cost(k_labels, k_inputs, k_input_lens,\n k_label_lens))\n self.assertAllClose(res[:, 0], ref, atol=1e-05)\n\n\n@combinations.generate(combinations.combine(mode=['graph', 'eager']))\nclass TestRandomOps(test.TestCase):\n\n def test_random_normal(self):\n np.random.seed(123)\n x = backend.random_normal((500, 500))\n val = backend.eval(x)\n self.assertAllClose(np.mean(val), 0., atol=0.01)\n self.assertAllClose(np.std(val), 1., atol=0.01)\n\n def test_random_uniform(self):\n np.random.seed(123)\n x = backend.random_uniform((500, 500))\n val = backend.eval(x)\n self.assertAllClose(np.mean(val), 0.5, atol=0.01)\n self.assertAllClose(np.max(val), 1., atol=0.01)\n self.assertAllClose(np.min(val), 0., atol=0.01)\n\n def test_random_binomial(self):\n np.random.seed(123)\n x = backend.random_binomial((500, 500), p=0.5)\n self.assertAllClose(np.mean(backend.eval(x)), 0.5, atol=0.01)\n\n def test_truncated_normal(self):\n np.random.seed(123)\n x = backend.truncated_normal((500, 500), mean=0.0, stddev=1.0)\n x = backend.truncated_normal((1000, 1000), mean=0.0, stddev=1.0)\n y = backend.eval(x)\n self.assertAllClose(np.mean(y), 0., atol=0.01)\n self.assertAllClose(np.std(y), 0.88, atol=0.01)\n self.assertAllClose(np.max(y), 2., atol=0.01)\n self.assertAllClose(np.min(y), -2., atol=0.01)\n\n\n@combinations.generate(combinations.combine(mode=['graph', 'eager']))\nclass FunctionTest(test.TestCase):\n\n def test_function_basics(self):\n x1 = backend.placeholder(shape=(), dtype='float32')\n x2 = backend.placeholder(shape=(), dtype='int32')\n v = backend.variable(10.)\n\n y1 = x1 + backend.cast(x2, 'float32') + v\n y2 = x1 * backend.cast(x2, 'float32')\n\n with ops.control_dependencies([y1]):\n u = backend.update(v, x1)\n\n f = backend.function([x1, x2], [y1, y2], updates=[u])\n output_values = f([2, 3])\n self.assertEqual(output_values, [15., 6.])\n self.assertEqual(backend.eval(v), 2.)\n\n def test_function_dict_outputs(self):\n x_ph = backend.placeholder(shape=(), name='x')\n y_ph = backend.placeholder(shape=(), name='y')\n outputs = {'x*y': y_ph * x_ph, 'x*x': x_ph * x_ph}\n\n f = backend.function(inputs=[x_ph, y_ph], outputs=outputs)\n x, y = 2., 5.\n results = f([x, y])\n\n self.assertEqual(results['x*y'], 10.)\n self.assertEqual(results['x*x'], 4)\n\n def test_function_dict_inputs(self):\n placeholders = {\n 'x': backend.placeholder(shape=()),\n 'y': backend.placeholder(shape=())\n }\n outputs = [placeholders['x'] * placeholders['y']]\n\n f = backend.function(inputs=placeholders, outputs=outputs)\n results = f({'x': 2., 'y': 3.})\n self.assertEqual(results[0], 6.)\n\n def test_function_single_input_output(self):\n x_ph = backend.placeholder(shape=(), name='x')\n output = x_ph * x_ph\n f = backend.function(x_ph, output)\n result = f(2.)\n self.assertEqual(result, 4.)\n\n def test_tuple_updates(self):\n x_ph = backend.placeholder(ndim=2)\n v = backend.variable(np.ones((4, 2)))\n output = x_ph ** 2 + v\n new_v = v + x_ph\n f = backend.function(x_ph, output, updates=[(v, new_v)])\n input_val = np.random.random((4, 2))\n result = f(input_val)\n self.assertAllClose(result, input_val ** 2 + 1)\n self.assertAllClose(backend.get_value(v), np.ones((4, 2)) + input_val)\n\n\nclass BackendGraphTests(test.TestCase, parameterized.TestCase):\n\n @combinations.generate(combinations.combine(mode=['graph', 'eager']))\n def test_function_placeholder_with_default(self):\n with backend.get_graph().as_default():\n x1 = array_ops.placeholder_with_default(\n np.array(2., dtype='float32'), shape=())\n x2 = array_ops.placeholder_with_default(\n np.array(3, dtype='int32'), shape=())\n y1 = x1 + backend.cast(x2, 'float32')\n y2 = x1 * backend.cast(x2, 'float32')\n f = backend.function([x1, x2], [y1, y2])\n output_values = f([4, 5])\n self.assertEqual(output_values, [9., 20.])\n output_values = f([None, None])\n self.assertEqual(output_values, [5., 6.])\n\n def test_function_tf_feed_symbols(self):\n # Test Keras backend functions with TF tensor inputs.\n with ops.Graph().as_default(), self.cached_session():\n # Test feeding a resource variable to `function`.\n x1 = backend.placeholder(shape=())\n x2 = backend.placeholder(shape=())\n lr = backend.learning_phase() # Include a placeholder_with_default.\n\n y1 = backend.variable(10.)\n y2 = 3\n\n f = backend.function(\n inputs=[x1, x2, lr],\n outputs=[x1 + 1, backend.in_train_phase(x2 + 2, x2 - 1)])\n outs = f([y1, y2, None]) # Use default learning_phase value.\n self.assertEqual(outs, [11., 2.])\n outs = f([y1, y2, 1]) # Set learning phase value.\n self.assertEqual(outs, [11., 5.])\n\n # Test triggering a callable refresh by changing the input.\n y3 = backend.constant(20.) # Test with tensor\n outs = f([y3, y2, None])\n self.assertEqual(outs, [21., 2.])\n\n y4 = 4 # Test with non-symbol\n outs = f([y4, y2, None])\n self.assertEqual(outs, [5., 2.])\n\n # Test with a different dtype\n y5 = backend.constant(10., dtype='float64')\n outs = f([y5, y2, None])\n self.assertEqual(outs, [11., 2.])\n\n def test_function_tf_fetches(self):\n # Additional operations can be passed to tf.compat.v1.Session().run() via\n # its `fetches` arguments. In contrast to `updates` argument of\n # backend.function() these do not have control dependency on `outputs`\n # so they can run in parallel. Also they should not contribute to output of\n # backend.function().\n with ops.Graph().as_default(), self.cached_session():\n x = backend.variable(0.)\n y = backend.variable(0.)\n x_placeholder = backend.placeholder(shape=())\n y_placeholder = backend.placeholder(shape=())\n\n f = backend.function(\n inputs=[x_placeholder, y_placeholder],\n outputs=[x_placeholder + y_placeholder],\n updates=[(x, x_placeholder + 1.)],\n fetches=[backend.update(y, 5.)])\n output = f([10., 20.])\n self.assertEqual(output, [30.])\n self.assertEqual(backend.get_session().run(fetches=[x, y]), [11., 5.])\n\n def test_function_tf_feed_dict(self):\n # Additional substitutions can be passed to `tf.compat.v1.Session().run()`\n # via its `feed_dict` arguments. Note that the feed_dict is passed once in\n # the constructor but we can modify the values in the dictionary. Through\n # this feed_dict we can provide additional substitutions besides Keras\n # inputs.\n with ops.Graph().as_default(), self.cached_session():\n x = backend.variable(0.)\n y = backend.variable(0.)\n x_placeholder = backend.placeholder(shape=())\n y_placeholder = backend.placeholder(shape=())\n\n feed_dict = {y_placeholder: 3.}\n fetches = [backend.update(y, y_placeholder * 10.)]\n f = backend.function(\n inputs=[x_placeholder],\n outputs=[x_placeholder + 1.],\n updates=[(x, x_placeholder + 10.)],\n feed_dict=feed_dict,\n fetches=fetches)\n output = f([10.])\n self.assertEqual(output, [11.])\n self.assertEqual(backend.get_session().run(fetches=[x, y]), [20., 30.])\n\n # updated value in feed_dict will be modified within the K.function()\n feed_dict[y_placeholder] = 4.\n output = f([20.])\n self.assertEqual(output, [21.])\n self.assertEqual(backend.get_session().run(fetches=[x, y]), [30., 40.])\n\n def test_function_tf_run_options_with_run_metadata(self):\n with ops.Graph().as_default(), self.cached_session():\n x_placeholder = backend.placeholder(shape=())\n y_placeholder = backend.placeholder(shape=())\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n run_metadata = config_pb2.RunMetadata()\n # enable run_options.\n f = backend.function(\n inputs=[x_placeholder, y_placeholder],\n outputs=[x_placeholder + y_placeholder],\n options=run_options,\n run_metadata=run_metadata)\n output = f([10., 20.])\n self.assertEqual(output, [30.])\n self.assertNotEmpty(run_metadata.partition_graphs)\n # disable run_options.\n f1 = backend.function(\n inputs=[x_placeholder, y_placeholder],\n outputs=[x_placeholder + y_placeholder],\n run_metadata=run_metadata)\n output1 = f1([10., 20.])\n self.assertEqual(output1, [30.])\n self.assertEmpty(run_metadata.partition_graphs)\n\n def test_function_fetch_callbacks(self):\n\n class CallbackStub(object):\n\n def __init__(self):\n self.times_called = 0\n self.callback_result = 0\n\n def _fetch_callback(self, result):\n self.times_called += 1\n self.callback_result = result\n\n with ops.Graph().as_default(), self.cached_session():\n callback = CallbackStub()\n x_placeholder = backend.placeholder(shape=())\n y_placeholder = backend.placeholder(shape=())\n\n callback_op = x_placeholder * y_placeholder\n\n f = backend.function(\n inputs=[x_placeholder, y_placeholder],\n outputs=[x_placeholder + y_placeholder])\n f.fetches.append(callback_op)\n f.fetch_callbacks[callback_op] = callback._fetch_callback\n\n _ = f([10., 20.])\n\n self.assertEqual(callback.times_called, 1)\n self.assertEqual(callback.callback_result, 200)\n\n def test_get_session_different_graphs(self):\n with ops.Graph().as_default():\n x = backend.constant(1)\n session = backend.get_session()\n self.assertIs(session, backend.get_session((x,)))\n self.assertIs(session, backend.get_session())\n with ops.Graph().as_default():\n self.assertIs(session, backend.get_session((x,)))\n self.assertIsNot(session, backend.get_session())\n\n\n@combinations.generate(combinations.combine(mode=['graph', 'eager']))\nclass ControlOpsTests(test.TestCase):\n\n def test_function_switch_basics(self):\n x = array_ops.constant(2.0)\n y = array_ops.constant(3.0)\n\n def xpowy():\n return backend.pow(x, y)\n\n def ypowx():\n return backend.pow(y, x)\n\n tensor = backend.switch(backend.less(x, y), xpowy, ypowx)\n self.assertEqual(backend.eval(tensor), [8.0])\n\n tensor = backend.switch(backend.greater(x, y), xpowy, ypowx)\n self.assertEqual(backend.eval(tensor), [9.0])\n\n def test_unequal_rank(self):\n x = ops.convert_to_tensor_v2(\n np.array([[1, 2, 3], [4, 5, 6]]), dtype='float32')\n y = ops.convert_to_tensor_v2(np.array([1, 2, 3]), dtype='float32')\n\n def true_func():\n return x\n\n def false_func():\n return y\n\n with self.assertRaisesRegexp(ValueError,\n 'Rank of `condition` should be less than'):\n backend.switch(backend.equal(x, x), false_func, true_func)\n\n\nclass ContextValueCacheTest(test.TestCase):\n\n def test_cache(self):\n cache = backend.ContextValueCache(list)\n graph1 = ops.Graph()\n graph2 = ops.Graph()\n\n cache[graph1].append(1)\n with graph1.as_default():\n cache[None].append(2)\n\n with graph2.as_default():\n cache[None].append(3)\n cache[graph2].append(4)\n\n self.assertAllEqual(cache[graph1], [1, 2])\n self.assertAllEqual(cache[graph2], [3, 4])\n\n with context.eager_mode():\n cache[None].append(5)\n cache[None].append(6)\n self.assertAllEqual(cache[None], [5, 6])\n\n self.assertLen(cache, 3)\n\n del graph1\n gc.collect()\n self.assertLen(cache, 2)\n\n def test_cache_in_parent_graph(self):\n cache = backend.ContextValueCache(int)\n cache.setdefault(None, backend.constant(5))\n\n with ops.Graph().as_default() as g:\n # g is not a child graph of the default test context, so the recursive\n # lookup will create a new default value.\n self.assertAllEqual(cache[g], 0)\n\n @def_function.function\n def fn():\n # The function graph is a child of the default test context, so\n # __getitem__ will return the previously saved value.\n return cache[ops.get_default_graph()]\n\n self.assertEqual(self.evaluate(fn()), 5)\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "tensorflow.python.keras.backend.zeros", "tensorflow.python.keras.backend.softmax", "tensorflow.core.protobuf.config_pb2.RunMetadata", "tensorflow.python.keras.backend.reset_uids", "tensorflow.python.keras.backend.variable", "tensorflow.python.keras.backend.function", "tensorflow.python.keras.backend.symbolic_learning_phase", "tensorflow.python.eager.context.context", "tensorflow.python.keras.backend.sigmoid", "tensorflow.python.keras.backend.categorical_crossentropy", "tensorflow.python.keras.backend.greater", "numpy.zeros", "tensorflow.python.eager.context.eager_mode", "tensorflow.python.keras.backend.normalize_batch_in_training", "numpy.array", "tensorflow.python.keras.combinations.combine", "tensorflow.python.ops.nn.moments", "tensorflow.python.keras.backend.clear_session", "tensorflow.python.keras.backend.cast_to_floatx", "tensorflow.python.keras.backend.eval", "tensorflow.python.keras.backend.is_sparse", "tensorflow.python.keras.backend.zeros_like", "tensorflow.python.util.tf_inspect.getargspec", "tensorflow.python.keras.backend.resize_images", "tensorflow.python.keras.backend.get_graph", "tensorflow.python.keras.backend.batch_dot", "numpy.expand_dims", "numpy.asarray", "tensorflow.python.framework.ops.convert_to_tensor_v2", "numpy.concatenate", "tensorflow.python.keras.backend.less", "tensorflow.python.platform.test.is_built_with_rocm", "tensorflow.python.keras.backend.pool2d", "numpy.reshape", "tensorflow.python.keras.backend.is_keras_tensor", "tensorflow.python.keras.backend.conv3d", "numpy.std", "tensorflow.python.keras.backend.cast", "tensorflow.python.platform.test.main", "tensorflow.python.keras.backend.batch_normalization", "tensorflow.python.keras.backend.separable_conv2d", "numpy.min", "tensorflow.python.keras.engine.input_layer.Input", "tensorflow.python.keras.backend.get_value", "tensorflow.python.keras.backend.equal", "tensorflow.python.keras.backend.random_normal_variable", "tensorflow.python.keras.backend.conv2d_transpose", "tensorflow.python.keras.backend.dropout", "tensorflow.python.keras.backend.ones_like", "numpy.ones", "tensorflow.python.keras.backend.sparse_categorical_crossentropy", "tensorflow.python.keras.backend.backend", "tensorflow.python.keras.backend.conv2d", "tensorflow.python.ops.array_ops.constant", "tensorflow.python.keras.backend.constant", "tensorflow.python.keras.backend.placeholder", "tensorflow.python.keras.backend.relu", "tensorflow.core.protobuf.config_pb2.RunOptions", "tensorflow.python.keras.layers.normalization.BatchNormalization", "numpy.mean", "tensorflow.python.keras.backend.identity", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.array_ops.identity", "numpy.random.randint", "tensorflow.python.keras.backend.truncated_normal", "tensorflow.python.keras.backend.learning_phase_scope", "numpy.eye", "tensorflow.python.keras.backend.count_params", "tensorflow.python.keras.backend.is_placeholder", "tensorflow.python.ops.array_ops.ones", "numpy.count_nonzero", "tensorflow.python.keras.backend.binary_crossentropy", "numpy.repeat", "tensorflow.python.keras.backend.local_conv2d", "tensorflow.python.keras.backend.update", "tensorflow.python.keras.backend.pool3d", "tensorflow.python.keras.backend.random_normal", "tensorflow.python.keras.backend.concatenate", "tensorflow.python.keras.backend.ContextValueCache", "tensorflow.python.keras.backend.stop_gradient", "numpy.testing.assert_allclose", "tensorflow.python.keras.backend.repeat", "tensorflow.python.framework.ops.Graph", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.keras.backend.eye", "tensorflow.python.keras.backend.learning_phase", "tensorflow.python.keras.backend.set_learning_phase", "tensorflow.python.keras.backend.local_conv", "tensorflow.python.keras.backend.int_shape", "tensorflow.python.keras.backend.get_uid", "tensorflow.python.keras.backend.ctc_decode", "numpy.max", "tensorflow.python.keras.backend.dot", "tensorflow.python.keras.backend.random_binomial", "tensorflow.python.keras.backend.repeat_elements", "tensorflow.python.keras.backend.bias_add", "tensorflow.python.keras.backend.to_dense", "numpy.tensordot", "tensorflow.python.keras.backend.get_session", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.keras.backend.ctc_batch_cost", "tensorflow.python.keras.backend.ones", "tensorflow.python.keras.backend.pow", "tensorflow.python.keras.backend.in_train_phase", "tensorflow.python.keras.backend.expand_dims", "tensorflow.python.keras.layers.advanced_activations.ReLU", "tensorflow.python.keras.backend.rnn", "numpy.random.random", "tensorflow.python.framework.config.set_optimizer_jit", "tensorflow.python.keras.backend.resize_volumes", "numpy.random.seed", "tensorflow.python.keras.backend.local_conv1d", "numpy.random.normal", "numpy.prod", "tensorflow.python.keras.backend.conv1d", "tensorflow.python.keras.backend.random_uniform_variable", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.keras.backend.random_uniform" ] ]
jamienoss/hcipy
[ "49604bd0dab5653cea2b934f7c1b327c4b5d4a67" ]
[ "hcipy/plotting/error_bars.py" ]
[ "import numpy as np\nimport matplotlib as mpl\n\ndef errorfill(x, y, y_err_pos, y_err_neg=None, color=None, alpha_fill=0.25, ax=None, **kwargs):\n\t\"\"\"\n\tPlot a line with filled errorbars.\n\t\"\"\"\n\timport matplotlib.pyplot as plt\n\n\tif ax is None:\n\t\tax = plt.gca()\n\n\tif y_err_neg is None:\n\t\ty_err_neg = y_err_pos\n\n\ty_min = np.array(y) - np.array(y_err_neg)\n\ty_max = np.array(y) + np.array(y_err_pos)\n\tif color is None:\n\t\tl = ax.plot(x, y, **kwargs)\n\t\tcolor = l[0].get_color()\n\telse:\n\t\tl = ax.plot(x, y, color=color, **kwargs)\n\n\tfacecolor = mpl.colors.colorConverter.to_rgba(color, alpha=alpha_fill)\n\tedgecolor = (0,0,0,0)\n\tax.fill_between(x, y_max, y_min, edgecolor=edgecolor, facecolor=facecolor)\n\n\treturn l" ]
[ [ "matplotlib.colors.colorConverter.to_rgba", "matplotlib.pyplot.gca", "numpy.array" ] ]
feifeibear/ColossalAI
[ "4c4388c46ed1cee9e535bf9ea5a5e5fb61d9a769" ]
[ "tests/test_zero/test_zero_engine.py" ]
[ "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nfrom functools import partial\n\nimport colossalai\nimport pytest\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom colossalai.core import global_context as gpc\nfrom colossalai.testing import rerun_if_address_is_in_use\nfrom colossalai.utils import free_port\nfrom colossalai.zero.init_ctx import ZeroInitContext\nfrom colossalai.zero.sharded_model.utils import col_model_deepcopy\nfrom colossalai.zero.sharded_optim._utils import has_inf_or_nan\nfrom tests.components_to_test.registry import non_distributed_component_funcs\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom common import (MP_PARALLEL_CONFIG, ZERO_PARALLEL_CONFIG, check_params, check_sharded_model_params)\n\n\ndef run_dist(rank, world_size, port, parallel_config):\n colossalai.launch(config=parallel_config,\n rank=rank,\n world_size=world_size,\n host='localhost',\n port=port,\n backend='nccl')\n\n test_models = ['repeated_computed_layers', 'resnet18', 'bert']\n for model_name in test_models:\n get_components_func = non_distributed_component_funcs.get_callable(model_name)\n model_builder, train_dataloader, _, optimizer_class, criterion = get_components_func()\n with ZeroInitContext(target_device=torch.cuda.current_device(),\n shard_strategy=gpc.config.zero.model_config.shard_strategy,\n shard_param=True):\n colo_model = model_builder(checkpoint=True)\n\n colo_optimizer = optimizer_class(colo_model.parameters(), lr=1e-3)\n engine, train_dataloader, _, _ = colossalai.initialize(colo_model,\n optimizer=colo_optimizer,\n criterion=criterion,\n train_dataloader=train_dataloader)\n torch_model = model_builder(checkpoint=True).half()\n col_model_deepcopy(engine.model, torch_model)\n torch_model = torch_model.cuda().float()\n\n engine.train()\n torch_optimizer = optimizer_class(torch_model.parameters(), lr=1e-3)\n\n if dist.get_world_size() > 1:\n torch_model = DDP(torch_model, device_ids=[torch.cuda.current_device()])\n\n i = 0\n for data, label in train_dataloader:\n if i > 4:\n break\n\n data, label = data.cuda(), label.cuda()\n\n engine.zero_grad()\n torch_optimizer.zero_grad()\n\n if criterion:\n output = engine(data)\n loss = engine.criterion(output, label)\n\n torch_output = torch_model(data)\n torch_loss = engine.criterion(torch_output, label)\n else:\n loss = engine(data, label)\n torch_loss = torch_model(data, label)\n\n engine.backward(loss)\n engine.step()\n\n torch_loss.backward()\n\n for param in torch_model.parameters():\n if param.grad is not None:\n assert not has_inf_or_nan(param.grad)\n\n torch_optimizer.step()\n i += 1\n\n if parallel_config == MP_PARALLEL_CONFIG:\n check_params(torch_model, colo_model, loose=True)\n elif parallel_config == ZERO_PARALLEL_CONFIG:\n check_sharded_model_params(torch_model, colo_model, loose=True)\n\n\n# FIXME: enable this test in next PR\n\n\n@pytest.mark.skip\n@pytest.mark.dist\n@pytest.mark.parametrize(\"world_size\", [2, 4])\n@rerun_if_address_is_in_use()\ndef test_mp_engine(world_size):\n run_func = partial(run_dist, world_size=world_size, port=free_port(), parallel_config=MP_PARALLEL_CONFIG)\n mp.spawn(run_func, nprocs=world_size)\n\n\n@pytest.mark.dist\n@pytest.mark.parametrize(\"world_size\", [1, 2])\n@rerun_if_address_is_in_use()\ndef test_zero_engine(world_size):\n run_func = partial(run_dist, world_size=world_size, port=free_port(), parallel_config=ZERO_PARALLEL_CONFIG)\n mp.spawn(run_func, nprocs=world_size)\n\n\nif __name__ == '__main__':\n test_zero_engine(world_size=4)\n" ]
[ [ "torch.distributed.get_world_size", "torch.multiprocessing.spawn", "torch.cuda.current_device" ] ]
jorgepiloto/lamberthub
[ "fedf6aaa939ee08e3bccb754b519397352cb5a84" ]
[ "src/lamberthub/universal_solvers/izzo.py" ]
[ "\"\"\" A module hosting all algorithms devised by Izzo \"\"\"\n\nimport time\n\nimport numpy as np\nfrom numpy import cross, pi\nfrom numpy.linalg import norm\nfrom scipy.special import hyp2f1\n\nfrom lamberthub.utils.assertions import assert_parameters_are_valid\n\n\ndef izzo2015(\n mu,\n r1,\n r2,\n tof,\n M=0,\n prograde=True,\n low_path=True,\n maxiter=35,\n atol=1e-5,\n rtol=1e-7,\n full_output=False,\n):\n r\"\"\"\n Solves Lambert problem using Izzo's devised algorithm.\n\n Parameters\n ----------\n mu: float\n Gravitational parameter, equivalent to :math:`GM` of attractor body.\n r1: numpy.array\n Initial position vector.\n r2: numpy.array\n Final position vector.\n M: int\n Number of revolutions. Must be equal or greater than 0 value.\n prograde: bool\n If `True`, specifies prograde motion. Otherwise, retrograde motion is imposed.\n low_path: bool\n If two solutions are available, it selects between high or low path.\n maxiter: int\n Maximum number of iterations.\n atol: float\n Absolute tolerance.\n rtol: float\n Relative tolerance.\n full_output: bool\n If True, the number of iterations is also returned.\n\n Returns\n -------\n v1: numpy.array\n Initial velocity vector.\n v2: numpy.array\n Final velocity vector.\n numiter: list\n Number of iterations.\n\n Notes\n -----\n This is the algorithm devised by Dario Izzo[1] in 2015. It inherits from\n the one developed by Lancaster[2] during the 60s, following the universal\n formulae approach. It is one of the most modern solvers, being a complete\n Lambert's problem solver (zero and Multiple-revolution solutions). It shows\n high performance and robustness while requiring no more than four iterations\n to reach a solution.\n\n All credits of the implementation go to Juan Luis Cano Rodríguez and the\n poliastro development team, from which this routine inherits. Some changes\n were made to adapt it to `lamberthub` API. In addition, the hypergeometric\n function is the one from SciPy.\n\n Copyright (c) 2012-2021 Juan Luis Cano Rodríguez and the poliastro development team\n\n References\n ----------\n [1] Izzo, D. (2015). Revisiting Lambert’s problem. Celestial Mechanics\n and Dynamical Astronomy, 121(1), 1-15.\n\n [2] Lancaster, E. R., & Blanchard, R. C. (1969). A unified form of\n Lambert's theorem (Vol. 5368). National Aeronautics and Space\n Administration.\n\n \"\"\"\n\n # Check that input parameters are safe\n assert_parameters_are_valid(mu, r1, r2, tof, M)\n\n # Chord\n c = r2 - r1\n c_norm, r1_norm, r2_norm = norm(c), norm(r1), norm(r2)\n\n # Semiperimeter\n s = (r1_norm + r2_norm + c_norm) * 0.5\n\n # Versors\n i_r1, i_r2 = r1 / r1_norm, r2 / r2_norm\n i_h = cross(i_r1, i_r2)\n i_h = i_h / norm(i_h)\n\n # Geometry of the problem\n ll = np.sqrt(1 - min(1.0, c_norm / s))\n\n # Compute the fundamental tangential directions\n if i_h[2] < 0:\n ll = -ll\n i_t1, i_t2 = cross(i_r1, i_h), cross(i_r2, i_h)\n else:\n i_t1, i_t2 = cross(i_h, i_r1), cross(i_h, i_r2)\n\n # Correct transfer angle parameter and tangential vectors regarding orbit's\n # inclination\n ll, i_t1, i_t2 = (-ll, -i_t1, -i_t2) if prograde is False else (ll, i_t1, i_t2)\n\n # Non dimensional time of flight\n T = np.sqrt(2 * mu / s ** 3) * tof\n\n # Find solutions and filter them\n x, y, numiter, tpi = _find_xy(ll, T, M, maxiter, atol, rtol, low_path)\n\n # Reconstruct\n gamma = np.sqrt(mu * s / 2)\n rho = (r1_norm - r2_norm) / c_norm\n sigma = np.sqrt(1 - rho ** 2)\n\n # Compute the radial and tangential components at initial and final\n # position vectors\n V_r1, V_r2, V_t1, V_t2 = _reconstruct(x, y, r1_norm, r2_norm, ll, gamma, rho, sigma)\n\n # Solve for the initial and final velocity\n v1 = V_r1 * (r1 / r1_norm) + V_t1 * i_t1\n v2 = V_r2 * (r2 / r2_norm) + V_t2 * i_t2\n\n return (v1, v2, numiter, tpi) if full_output is True else (v1, v2)\n\n\ndef _reconstruct(x, y, r1, r2, ll, gamma, rho, sigma):\n \"\"\"Reconstruct solution velocity vectors.\"\"\"\n V_r1 = gamma * ((ll * y - x) - rho * (ll * y + x)) / r1\n V_r2 = -gamma * ((ll * y - x) + rho * (ll * y + x)) / r2\n V_t1 = gamma * sigma * (y + ll * x) / r1\n V_t2 = gamma * sigma * (y + ll * x) / r2\n return [V_r1, V_r2, V_t1, V_t2]\n\n\ndef _find_xy(ll, T, M, maxiter, atol, rtol, low_path):\n \"\"\"Computes all x, y for given number of revolutions.\"\"\"\n # For abs(ll) == 1 the derivative is not continuous\n assert abs(ll) < 1\n\n M_max = np.floor(T / pi)\n T_00 = np.arccos(ll) + ll * np.sqrt(1 - ll ** 2) # T_xM\n\n # Refine maximum number of revolutions if necessary\n if T < T_00 + M_max * pi and M_max > 0:\n _, T_min = _compute_T_min(ll, M_max, maxiter, atol, rtol)\n if T < T_min:\n M_max -= 1\n\n # Check if a feasible solution exist for the given number of revolutions\n # This departs from the original paper in that we do not compute all solutions\n if M > M_max:\n raise ValueError(\"No feasible solution, try lower M!\")\n\n # Initial guess\n x_0 = _initial_guess(T, ll, M, low_path)\n\n # Start Householder iterations from x_0 and find x, y\n x, numiter, tpi = _householder(x_0, T, ll, M, atol, rtol, maxiter)\n y = _compute_y(x, ll)\n\n return x, y, numiter, tpi\n\n\ndef _compute_y(x, ll):\n \"\"\"Computes y.\"\"\"\n return np.sqrt(1 - ll ** 2 * (1 - x ** 2))\n\n\ndef _compute_psi(x, y, ll):\n \"\"\"Computes psi.\n\n \"The auxiliary angle psi is computed using Eq.(17) by the appropriate\n inverse function\"\n\n \"\"\"\n if -1 <= x < 1:\n # Elliptic motion\n # Use arc cosine to avoid numerical errors\n return np.arccos(x * y + ll * (1 - x ** 2))\n elif x > 1:\n # Hyperbolic motion\n # The hyperbolic sine is bijective\n return np.arcsinh((y - x * ll) * np.sqrt(x ** 2 - 1))\n else:\n # Parabolic motion\n return 0.0\n\n\ndef _tof_equation(x, T0, ll, M):\n \"\"\"Time of flight equation.\"\"\"\n return _tof_equation_y(x, _compute_y(x, ll), T0, ll, M)\n\n\ndef _tof_equation_y(x, y, T0, ll, M):\n \"\"\"Time of flight equation with externally computated y.\"\"\"\n if M == 0 and np.sqrt(0.6) < x < np.sqrt(1.4):\n eta = y - ll * x\n S_1 = (1 - ll - x * eta) * 0.5\n Q = 4 / 3 * hyp2f1(3, 1, 5 / 2, S_1)\n T_ = (eta ** 3 * Q + 4 * ll * eta) * 0.5\n else:\n psi = _compute_psi(x, y, ll)\n T_ = np.divide(\n np.divide(psi + M * pi, np.sqrt(np.abs(1 - x ** 2))) - x + ll * y,\n (1 - x ** 2),\n )\n\n return T_ - T0\n\n\ndef _tof_equation_p(x, y, T, ll):\n # TODO: What about derivatives when x approaches 1?\n return (3 * T * x - 2 + 2 * ll ** 3 * x / y) / (1 - x ** 2)\n\n\ndef _tof_equation_p2(x, y, T, dT, ll):\n return (3 * T + 5 * x * dT + 2 * (1 - ll ** 2) * ll ** 3 / y ** 3) / (1 - x ** 2)\n\n\ndef _tof_equation_p3(x, y, _, dT, ddT, ll):\n return (7 * x * ddT + 8 * dT - 6 * (1 - ll ** 2) * ll ** 5 * x / y ** 5) / (\n 1 - x ** 2\n )\n\n\ndef _compute_T_min(ll, M, maxiter, atol, rtol):\n \"\"\"Compute minimum T.\"\"\"\n if ll == 1:\n x_T_min = 0.0\n T_min = _tof_equation(x_T_min, 0.0, ll, M)\n else:\n if M == 0:\n x_T_min = np.inf\n T_min = 0.0\n else:\n # Set x_i > 0 to avoid problems at ll = -1\n x_i = 0.1\n T_i = _tof_equation(x_i, 0.0, ll, M)\n x_T_min = _halley(x_i, T_i, ll, atol, rtol, maxiter)\n T_min = _tof_equation(x_T_min, 0.0, ll, M)\n\n return [x_T_min, T_min]\n\n\ndef _initial_guess(T, ll, M, low_path):\n \"\"\"Initial guess.\"\"\"\n if M == 0:\n # Single revolution\n T_0 = np.arccos(ll) + ll * np.sqrt(1 - ll ** 2) + M * pi # Equation 19\n T_1 = 2 * (1 - ll ** 3) / 3 # Equation 21\n if T >= T_0:\n x_0 = (T_0 / T) ** (2 / 3) - 1\n elif T < T_1:\n x_0 = 5 / 2 * T_1 / T * (T_1 - T) / (1 - ll ** 5) + 1\n else:\n # This is the real condition, which is not exactly equivalent\n # elif T_1 < T < T_0\n x_0 = (T_0 / T) ** (np.log2(T_1 / T_0)) - 1\n\n return x_0\n else:\n # Multiple revolution\n x_0l = (((M * pi + pi) / (8 * T)) ** (2 / 3) - 1) / (\n ((M * pi + pi) / (8 * T)) ** (2 / 3) + 1\n )\n x_0r = (((8 * T) / (M * pi)) ** (2 / 3) - 1) / (\n ((8 * T) / (M * pi)) ** (2 / 3) + 1\n )\n\n # Filter out the solution\n x_0 = np.max([x_0l, x_0r]) if low_path is True else np.min([x_0l, x_0r])\n\n return x_0\n\n\ndef _halley(p0, T0, ll, atol, rtol, maxiter):\n \"\"\"Find a minimum of time of flight equation using the Halley method.\n\n Note\n ----\n This function is private because it assumes a calling convention specific to\n this module and is not really reusable.\n\n \"\"\"\n for ii in range(1, maxiter + 1):\n y = _compute_y(p0, ll)\n fder = _tof_equation_p(p0, y, T0, ll)\n fder2 = _tof_equation_p2(p0, y, T0, fder, ll)\n if fder2 == 0:\n raise RuntimeError(\"Derivative was zero\")\n fder3 = _tof_equation_p3(p0, y, T0, fder, fder2, ll)\n\n # Halley step (cubic)\n p = p0 - 2 * fder * fder2 / (2 * fder2 ** 2 - fder * fder3)\n\n if abs(p - p0) < rtol * np.abs(p0) + atol:\n return p\n p0 = p\n\n raise RuntimeError(\"Failed to converge\")\n\n\ndef _householder(p0, T0, ll, M, atol, rtol, maxiter):\n \"\"\"Find a zero of time of flight equation using the Householder method.\n\n Note\n ----\n This function is private because it assumes a calling convention specific to\n this module and is not really reusable.\n\n \"\"\"\n\n # The clock starts together with the iteration\n tic = time.perf_counter()\n for numiter in range(1, maxiter + 1):\n y = _compute_y(p0, ll)\n fval = _tof_equation_y(p0, y, T0, ll, M)\n T = fval + T0\n fder = _tof_equation_p(p0, y, T, ll)\n fder2 = _tof_equation_p2(p0, y, T, fder, ll)\n fder3 = _tof_equation_p3(p0, y, T, fder, fder2, ll)\n\n # Householder step (quartic)\n p = p0 - fval * (\n (fder ** 2 - fval * fder2 / 2)\n / (fder * (fder ** 2 - fval * fder2) + fder3 * fval ** 2 / 6)\n )\n\n if abs(p - p0) < rtol * np.abs(p0) + atol:\n # Stop the clock and compute the time per iteration\n tac = time.perf_counter()\n tpi = (tac - tic) / numiter\n\n return p, numiter, tpi\n p0 = p\n\n raise RuntimeError(\"Failed to converge\")\n" ]
[ [ "numpy.log2", "numpy.sqrt", "numpy.abs", "numpy.min", "numpy.arccos", "numpy.linalg.norm", "numpy.max", "scipy.special.hyp2f1", "numpy.floor", "numpy.cross" ] ]
dsosnoski/morepork
[ "669e53827052549c402b8f5b0ad24a374a7b3559" ]
[ "latest/loop_trainer_rnn.py" ]
[ "\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport tensorflow as tf\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint\n\nimport morepork_support\nfrom sampler_dataset import DataSampler\nfrom resnet_rnn import ResnetRNNBuilder\nimport training_parameters\n\ntrain_fraction = .8\ntrainings_count = 20\nresnet_size = 34\nconv_size = (7,7)\nconv_strides = (2,2)\nbatch_size = 128\nmax_pooling = True\nmax_noise = None\nif max_pooling:\n pooling = 'max'\nelse:\n pooling = 'avg'\n\nclass Checkpointer(ModelCheckpoint):\n\n def __init__(self, path, start, decay, rate):\n super().__init__(filepath=path, monitor='val_binary_accuracy', mode='max', save_best_only=True)\n self.start_epoch = start\n self.decay_time = decay\n self.decay_rate = rate\n self.save_active = False\n self.last_decay = 0\n self.monitor_op = self._check\n self.last_save = 0.0\n\n def _check(self, current, best):\n if self._current_epoch >= self.start_epoch:\n self.save_active = True\n if current > best:\n self.best = current\n self.last_save = current\n return self.save_active\n elif self.save_active and self.epochs_since_last_save >= self.decay_time:\n if self.epochs_since_last_save % self.decay_time == 0:\n self.best *= self.decay_rate\n\n def last_save_accuracy(self):\n return self.last_save\n\ndef build_model(conv_size, conv_strides, input_dims):\n model = ResnetRNNBuilder.build(input_dims, 1, conv_size, conv_strides, repetitions=[3, 4])\n optimizer = tf.keras.optimizers.Adam(lr=0.002, epsilon=0.002)\n model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['binary_accuracy'])\n return model\n\ndef first_time_model(model):\n print(model.summary())\n with open(f'{save_directory}/model.txt', 'w') as f:\n def summary_print(s):\n print(s, file=f)\n\n print(model.summary(print_fn=summary_print))\n model_json = model.to_json()\n with open(f'{save_directory}/model.json', 'w') as f:\n f.write(model_json)\n\ndef compute_scores(tp, fp, fn):\n if tp != 0:\n precision = tp / (tp + fp)\n recall = tp / (tp + fn)\n fscore = 2. * precision * recall / (precision + recall)\n return precision, recall, fscore\n else:\n return 0.0, 0.0, 0.0\n\npositive_segments, negative_segments = morepork_support.load_samples()\nsegments = list(positive_segments.values()) + list(negative_segments.values())\nactuals = [1.0] * len(positive_segments) + [0.0] * len(negative_segments)\nvalidation_count = int(len(segments) * (1 - train_fraction) // batch_size) * batch_size\ntrain_count = len(segments) - validation_count\nsample_dims = (training_parameters.num_buckets, training_parameters.slices_per_sample)\nrandom_generator = np.random.default_rng()\nsave_directory = f'{training_parameters.base_path}/morepork-resnet{resnet_size}-rnn-3-4-{conv_size[0]}-{conv_size[1]}-{conv_strides[0]}-{conv_strides[1]}-unigrurandom'\nif not os.path.exists(save_directory):\n os.mkdir(save_directory)\nprint(f'training with {train_count} samples, validating with {validation_count} samples, saving to {save_directory}')\nsum_validation_accuracies = 0.0\nfor i in range(trainings_count):\n permuted = np.random.permutation(len(segments))\n permuted_segments = [segments[i] for i in permuted]\n permuted_actuals = [actuals[i] for i in permuted]\n training_sampler = DataSampler(permuted_segments[:train_count], permuted_actuals[:train_count], sample_dims)\n training_ds = training_sampler.to_dataset()\n training_ds = training_ds.shuffle(buffer_size=train_count).batch(batch_size).repeat()\n validation_sampler = DataSampler(permuted_segments[train_count:], permuted_actuals[train_count:], sample_dims)\n validation_ds = validation_sampler.to_dataset()\n validation_ds = validation_ds.shuffle(buffer_size=validation_count).batch(\n batch_size).repeat()\n training_steps = train_count // batch_size\n validation_steps = validation_count // batch_size\n reduce_lr_callback = ReduceLROnPlateau(monitor='val_loss', factor=0.65, patience=25, min_lr=0.0002, cooldown=25, verbose=1)\n save_path = f'{save_directory}/weights{i}'\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n checkpoint_callback = Checkpointer(save_path + '/model-{epoch:02d}-{val_binary_accuracy:.4f}', 50, 100, .9999)\n input_dims = (training_parameters.num_buckets, training_parameters.slices_per_sample, 1)\n model = build_model(conv_size, conv_strides, input_dims)\n if i == 0:\n first_time_model(model)\n history = model.fit(training_ds, steps_per_epoch=training_steps,\n validation_data=validation_ds, validation_steps=validation_steps,\n callbacks=[reduce_lr_callback, checkpoint_callback], epochs=1000)\n sum_validation_accuracies += checkpoint_callback.last_save_accuracy()\n plt.figure(figsize=(15,5))\n plt.subplot(121)\n plt.plot(history.history['binary_accuracy'])\n plt.plot(history.history['val_binary_accuracy'])\n plt.title('Accuracy vs. epochs')\n plt.ylabel('Binary Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Training', 'Validation'], loc='lower right')\n\n plt.subplot(122)\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Loss vs. epochs')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Training', 'Validation'], loc='upper right')\n plt.savefig(f'{save_path}/history.png')\n plt.close()\n\n tf.keras.backend.clear_session()\n\nprint(f'Average best validation accuracy {sum_validation_accuracies/trainings_count:04f}')" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "tensorflow.keras.callbacks.ReduceLROnPlateau", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "tensorflow.keras.optimizers.Adam", "matplotlib.pyplot.subplot", "tensorflow.keras.backend.clear_session", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "numpy.random.default_rng", "matplotlib.pyplot.figure" ] ]
kbaseapps/mags_mash
[ "40f66e6b6ca7d96adf2f1cb6f83718c3dae19c4f" ]
[ "lib/mags_mash/utils/mags_filter.py" ]
[ "from installed_clients.WorkspaceClient import Workspace\nfrom installed_clients.DataFileUtilClient import DataFileUtil\n\nimport numpy as np\nimport pandas as pd\nimport os\nfrom collections import defaultdict\nfrom scipy.cluster.hierarchy import linkage, leaves_list\n\n\ndef create_tree(GOLD, tree_cols, dist_compl, source_order=None):\n \"\"\"\n \"\"\"\n tree = []\n if len(tree_cols) == 0:\n return tree\n col = tree_cols[0]\n type_count = GOLD[col].value_counts().to_dict()\n\n for t in type_count:\n # if len(t) > name_max_len:\n # name = t[:name_max_len] + '...'\n # else:\n # name = t\n count = \"({})\".format(type_count[t])\n leaf = create_tree(GOLD[GOLD[col]==t], tree_cols[1:], dist_compl, source_order=source_order)\n if leaf == []:\n if col == \"Project / Study Name\":\n mag_dict = dist_compl[t]\n dist = {mag:val[0] for mag, val in mag_dict.items()}\n compl = {mag:val[1] for mag, val in mag_dict.items()}\n cont = {mag:val[2] for mag, val in mag_dict.items()}\n else:\n dist, compl, cont = \"\", \"\", \"\"\n print(\"-\"*90)\n print('project name:',t)\n print(\"gold stuff:\",GOLD[GOLD[\"Project / Study Name\"]==t].iloc[0])\n print(\"-\"*90)\n trunc_name = GOLD[GOLD[\"Project / Study Name\"] == t].iloc[0]['IMG Genome ID ']\n # is terminal node/actually a leaf\n # here we change the terminal nodes to have dists as a dict\n # of IMG_id -> distance,\n # and we include the list of img_id's for each \n\n tree.append({\n 'truncated_name': str(trunc_name),\n 'name' : t,\n 'count': \"({})\".format(len(dist))\n })\n if source_order!=None:\n tree[-1]['dist'] = dist\n tree[-1]['compl'] = compl\n tree[-1]['cont'] = cont\n else:\n children = []\n for key, val in dist.items():\n child = {}\n child['truncated_name'] = key\n child['count'] = ''\n child['dist'] = val\n children.append(child)\n\n tree[-1]['children'] = children\n else: \n tree.append({\n 'truncated_name':t,\n 'count':count,\n 'children':leaf\n })\n if source_order!=None:\n\n sources = []\n if leaf == []:\n g = GOLD[GOLD[col]==t][['upa','mag_id']]\n upas = g['upa'].tolist()\n ss = {}\n for upa in upas:\n mag_ids = g[g['upa']==upa]['mag_id'].tolist()\n ss[upa] = mag_ids\n\n for i, s in enumerate(source_order):\n if s in ss:\n sources.append(ss[upa])\n # sources[i] = ss[upa]\n else:\n sources.append([])\n else:\n source_count = GOLD[GOLD[col]==t]['upa'].value_counts().to_dict()\n for i, s in enumerate(source_order):\n if s in source_count:\n sources.append(source_count[s])\n # sources[i] = source_count[s]\n else:\n sources.append(0)\n\n tree[-1]['sources'] = sources\n\n return tree\n\ndef get_location_markers(ids, source=None):\n '''\n For now this simply returns 1 marker with\n the location of LBL. Returns list of markers\n\n ids: list of ids\n\n marker format:\n {\n 'name': name of marker\n 'lat': latitude as a float\n 'lng': longitude as a float\n 'details': pop up details\n\n }\n '''\n markers = [\n {'name':\"LBL\", \"lat\":37.877344, \"lng\":-122.250694, \"details\":\"This is Lawrence Berkeley National Laboratory.\"},\n {'name':\"Golden Gate Bridge\", \"lat\": 37.817060, \"lng\": -122.478206, \"details\":\"This is the Golden Gate Bridge.\"},\n {'name':\"SFO Airport\", 'lat':37.616310, 'lng': -122.386793, 'details':\"This is San Francisco International Airport.\"},\n {'name':\"Mount Diablo\", \"lat\": 37.881523, \"lng\": -121.914325, \"details\":\"This is Mount Diablo.\"}\n ]\n if source!= None:\n for m in markers:\n m['source'] = \"Input source:\"+source\n\n return markers\n\n\ndef unwind_tree(X, tree):\n \"\"\"\n \"\"\"\n if tree.get('children'):\n for t in tree['children']:\n if 'compl' in t:\n X.append(np.array([len(mag_ids) for mag_ids in t['sources']]))\n else:\n X.append(np.array(t['sources']))\n X = unwind_tree(X, t)\n return X\n\n\ndef remap_sources(sources, upa_order):\n new_sources = {}\n for j, i in enumerate(upa_order):\n val = sources[i]\n if val != 0 and val != []:\n new_sources[j] = val\n\n return new_sources\n\n\ndef rewind_tree(tree, upa_order):\n \"\"\"\n \"\"\"\n for t_ix, t in enumerate(tree['children']):\n new_sources = remap_sources(t['sources'], upa_order)\n t['sources'] = new_sources\n if t.get('children'):\n t = rewind_tree(t, upa_order)\n tree['children'][t_ix] = t\n return tree\n\n\ndef get_source_order(tree, upa_names):\n \"\"\"\n stats:\n \"\"\"\n X = unwind_tree([tree['sources']], tree)\n print(\"-\"*80)\n print(\"je suis here first:\",X) \n X = np.transpose(np.array(X))\n print(\"je suis here:\",X)\n print('-'*80)\n z = linkage(X, 'ward')\n upa_order = leaves_list(z)\n return upa_order\n\n\ndef filter_results(ws_url, cb_url, query_results, n_max_results, max_distance, min_completeness, max_contamination):\n \"\"\"\n Here we do a combiantion of getting all the relevant statistics from the data csv, filtering\n the outputs according to the provided inputs, and staging some of the outputs for the templates.\n \"\"\"\n if len(query_results) > 1:\n upa_to_name = get_upa_names(ws_url, cb_url, list(query_results.keys()))\n else:\n upa_to_name = {list(query_results.keys())[0]:\"\"}\n\n\n currdir = os.path.dirname(__file__)\n gold_path = os.path.join(currdir,'data','GOLD-metadata.csv')\n GOLD = pd.read_csv(gold_path)\n upa_names = []\n upas = []\n dist_compl = {}\n\n all_GOLD = []\n\n # id_to_inputs = defaultdict(lambda:[])\n\n stats = []\n\n for upa in query_results:\n upas.append(upa)\n upa_name = upa_to_name[upa]\n curr_GOLD = GOLD[GOLD['GOLD Analysis Project ID'].isin([val[2]['GOLD_Analysis_ID'] for key, val in query_results[upa].items()])]\n tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\\n 'Ecosystem Type','Specific Ecosystem','Project / Study Name']\n\n print(\"curr gold cols 1:\",curr_GOLD.columns)\n curr_GOLD = curr_GOLD.fillna({col:\"Unknown\" for col in tree_cols})\n print(\"curr gold cols 2:\",curr_GOLD.columns)\n\n curr_stats = get_statistics(query_results[upa], curr_GOLD, upa_name=upa_name)\n curr_stats, curr_dist_compl = filter_stats(curr_stats, n_max_results, max_distance, min_completeness, max_contamination)\n\n curr_GOLD = curr_GOLD[curr_GOLD['GOLD Analysis Project ID'].isin([s['GOLD_Analysis_ID'] for s in curr_stats])]\n curr_GOLD['upa'] = upa\n print(\"curr gold cols 3:\",curr_GOLD.columns)\n\n # We want to get a row for each mag id in curr_GOLD,\n # right now we only have a row for each img id\n stats += curr_stats\n\n # group them by img_ids\n curr_GOLD.set_index('IMG Genome ID ', inplace=True)\n print(\"curr gold cols 4:\",curr_GOLD.columns)\n\n new_gold = defaultdict(lambda: [])\n\n for i, cs in enumerate(curr_stats):\n img_id = cs['IMG_Genome_ID']\n mag_id = cs['mag_id']\n gold_info = curr_GOLD.loc[int(img_id),:]\n new_gold['mag_id'].append(mag_id)\n new_gold['IMG Genome ID '].append(img_id)\n for key, val in gold_info.iteritems():\n new_gold[key].append(val)\n \n new_gold = pd.DataFrame.from_dict(new_gold)\n\n all_GOLD.append(new_gold)\n\n for key in curr_dist_compl:\n if key in dist_compl:\n for mag_key in curr_dist_compl[key]:\n dist_compl[key][mag_key] = curr_dist_compl[key][mag_key]\n else:\n dist_compl[key] = curr_dist_compl[key]\n\n # dist_1, compl_1, cont_1 = dist_compl[key]\n # dist_2, compl_2, cont_2 = curr_dist_compl[key]\n # if compl_1 == compl_2 and cont_1 == cont_2:\n # # check to see distance dictionary\n # unincluded_keys = list(set(list(dist_2.keys())) - set(list(dist_1.keys())))\n # for uinc_key in unincluded_keys:\n # dist_1[uinc_key] = dist_2[uinc_key]\n # dist_compl[key] = [dist_1, compl_1, cont_1]\n # else:\n # raise ValueError('Same project ids but contamination and/or completeness do not match')\n # # id_to_inputs[key].append(upa_name)\n # else:\n # dist_compl[key] = curr_dist_compl[key]\n\n upa_names.append(upa_name)\n\n all_GOLD = pd.concat(all_GOLD, ignore_index=True)\n\n tree_cols = ['Ecosystem','Ecosystem Category','Ecosystem Subtype',\\\n 'Ecosystem Type','Specific Ecosystem','Project / Study Name']\n if len(upas) == 1:\n tree = create_tree(all_GOLD, tree_cols, dist_compl)\n count = sum([ int(t['count'][1:-1]) for t in tree]) #len(query_results[upas[0]])\n tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(count)), \"count_num\":count, \"children\":tree}\n else:\n tree = create_tree(all_GOLD, tree_cols, dist_compl, source_order=upas)\n sources = [0 for _ in range(len(upa_names))]\n for i in range(len(upa_names)):\n for t in tree:\n sources[i]+=t['sources'][i]\n\n\n\n total_num = sum(sources)\n tree = {\"truncated_name\":\"\", \"count\":\"({})\".format(str(total_num)), 'count_num':total_num, 'sources':sources, \"children\":tree}\n\n upa_order = get_source_order(tree, upa_names)\n tree['sources'] = remap_sources(tree['sources'], upa_order)\n tree = rewind_tree(tree, upa_order)\n new_upa_names = []\n for i in upa_order:\n new_upa_names.append(upa_names[i])\n upa_names = new_upa_names\n\n # TEMPORARY MARKER SET UP\n markers = get_location_markers(set([s['mag_id'] for s in stats]))\n\n return stats, upa_names, tree, markers\n\n\ndef filter_stats(stats, n_max_results, max_distance, min_completeness, max_contamination):\n if max_distance:\n stats = [s for s in stats if s['dist'] <= max_distance]\n if min_completeness:\n stats = [s for s in stats if s['completeness'] >= min_completeness]\n if max_contamination:\n stats = [s for s in stats if s['contamination'] <= max_contamination]\n stats = sorted(stats, key=lambda s: s['dist'])\n if len(stats) > n_max_results:\n stats = stats[:n_max_results]\n\n dist_compl = {}\n for s in stats:\n if s['project'] not in dist_compl:\n dist_compl[s['project']] = {}\n dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)]\n\n # dist_compl[s['project']] = [{s['mag_id']:round(s['dist'], 3)}, round(s['completeness'],2), round(s['contamination'],2)]\n else:\n dist_compl[s['project']][s['mag_id']] = [round(s['dist'], 3), round(s['completeness'],2), round(s['contamination'],2)]\n\n # print(\"mapping the items:\",s, dist_compl[s['project']])\n # if round(s['completeness'],2) == dist_compl[s['project']][1] and round(s['contamination'],2) == dist_compl[s['project']][2]:\n # dist_compl[s['project']][0][s['mag_id']] = (round(s['dist'], 3))\n # else:\n # raise ValueError('same project ids but contamination and/or completeness do not match',\\\n # round(s['completeness'],2), dist_compl[s['project']][1],\n # round(s['contamination'],2), dist_compl[s['project']][2])\n\n # dist_compl = {s['project']:(round(s['dist'], 3), round(s['completeness'], 2), round(s['contamination'], 2)) for s in stats}\n return stats, dist_compl\n\ndef get_upa_names(ws_url, cb_url, upas):\n \"\"\"\n \"\"\"\n ws = Workspace(ws_url)\n objs = ws.get_object_info3({\n 'objects': [{'ref':upa} for upa in upas]\n })\n\n upa_to_name = {'/'.join([str(info[6]), str(info[0]), str(info[4])]):info[1] for info in objs['infos']}\n if len(upa_to_name)==len(upas):\n return upa_to_name\n\n missing_upas = list(set(upas) - set(list(upa_to_name.keys())))\n\n dfu = DataFileUtil(cb_url)\n objs = dfu.get_objects({'object_refs':missing_upas})['data']\n if len(objs) != len(missing_upas):\n raise ValueError(\"Could not find all input names. len upas: %s len objs: %s\"%(len(upas), len(objs)), upas, [obj['info'] for obj in objs])\n for obj in objs:\n info = obj['info']\n upa = '/'.join([str(info[6]), str(info[0]), str(info[4])])\n upa_to_name[upa] = info[1]\n return upa_to_name\n\n\ndef get_statistics(ids, GOLD, upa_name=None):\n '''\n get statistics from the GOLD and statitics csvs\n\n ids:\n GOLD: \n '''\n output = []\n currdir = os.path.dirname(__file__)\n stats_path = os.path.join(currdir, 'data', 'Stats-taxonomy.csv')\n Stats = pd.read_csv(stats_path)\n curr_stats = Stats[Stats['binid'].isin(ids.keys())]\n curr_stats = curr_stats.fillna('Unknown')\n for id_ in ids:\n curr = {}\n dist, kb_id, relatedids = ids[id_]\n if upa_name != None:\n curr['input_name'] = upa_name\n curr['dist'] = dist\n # if kb_id:\n # curr['kb_id'] = kb_id\n # else:\n # curr['kb_id'] = ''\n id_stats = curr_stats[curr_stats.binid == id_]\n curr['completeness'] = id_stats.iloc[0]['completeness']\n curr['contamination'] = id_stats.iloc[0]['contamination']\n curr['MIMAG'] = id_stats.iloc[0]['MIMAG']\n curr['mag_id'] = id_\n curr['IMG_Genome_ID'] = id_.split('_')[0]\n\n img_link = \"https://img.jgi.doe.gov/cgi-bin/m/main.cgi?section=MetaDetail&page=metagenomeBinScaffolds&taxon_oid=%s&bin_name=%s\"%(id_.split('_')[0], id_)\n curr['IMG_link'] = img_link\n if relatedids:\n for key in relatedids:\n if relatedids[key]:\n curr[key] = relatedids[key]\n else:\n curr[key] = 'Unknown'\n if relatedids['GOLD_Analysis_ID']:\n curr['project'] = GOLD[GOLD['GOLD Analysis Project ID'] == relatedids['GOLD_Analysis_ID']].iloc[0]['Project / Study Name']\n else:\n curr['project'] = 'Unknown'\n\n output.append(curr)\n\n return output\n" ]
[ [ "pandas.concat", "scipy.cluster.hierarchy.leaves_list", "pandas.read_csv", "pandas.DataFrame.from_dict", "scipy.cluster.hierarchy.linkage", "numpy.array" ] ]
beukueb/leopard
[ "0b1c8d267397ee3daa7b7e713a765d24a6c645f6" ]
[ "leopard/utils.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Leopard utility functions and classes\n\"\"\"\n\nimport pandas as pd, re\nfrom collections import OrderedDict\n\ndef makeFigFromFile(filename,*args,**kwargs):\n \"\"\"\n Renders an image in a matplotlib figure, so it can be added to reports \n args and kwargs are passed to plt.subplots\n \"\"\"\n import matplotlib.pyplot as plt\n img = plt.imread(filename)\n fig,ax = plt.subplots(*args,**kwargs)\n ax.axis('off')\n ax.imshow(img)\n return fig\n\ndef pdSeriesToFrame(pdseries,colname='value'):\n \"Returns a series as a pd dataframe\"\n return pd.DataFrame(pdseries,columns=[colname])\n\ndef renewliner(text):\n newline = re.compile(r'(\\w)\\n(\\w)')\n return newline.subn(r'\\g<1> \\g<2>',text)[0]\n\nclass LeopardDict(OrderedDict):\n \"\"\"Leopard Dictionary\n\n Ordered dictionary that only allows string keys.\n Integers can be used to query the dict instead of the string key.\n \"\"\"\n def __setitem__(self, key, item, **kwargs):\n if not isinstance(key, str):\n raise KeyError(\"Only string keys are allowed in LeopardDict\")\n super().__setitem__(key,item,**kwargs)\n\n def __getitem__(self, key):\n if isinstance(key, str):\n return super().__getitem__(key)\n elif isinstance(key, int):\n return super().__getitem__(list(self.keys())[key])\n else:\n raise KeyError(\"LeopardDict item retrieval is only possible with str or int\")\n\nclass FigureDict(LeopardDict):\n \"\"\"\n Connects the report section to the dict\n Currently not used!\n \"\"\"\n def __init__(self, *args, section=None, **kwds):\n super().__init__(*args, **kwds)\n self._section = section \n \n def __setitem__(self, key, figure, **kwargs):\n super().__setitem__(key,figure,**kwargs)\n\ndef print2report(report):\n \"\"\"Redirecting print to report\n Original print function saved as builtins._print\n \"\"\"\n import builtins\n if not '_print' in vars(builtins):\n builtins._print = builtins.print\n builtins._print('Redirecting print to print method', report)\n builtins.print = report.print\n\ndef open_file(filename):\n import os, sys, subprocess\n if sys.platform == \"win32\":\n os.startfile(filename)\n else:\n opener =\"open\" if sys.platform == \"darwin\" else \"xdg-open\"\n subprocess.call([opener, filename])\n" ]
[ [ "matplotlib.pyplot.imread", "matplotlib.pyplot.subplots", "pandas.DataFrame" ] ]
caomw/pydro
[ "00286cb99a58e4c49fe79b08d8ae041cf8ee173c", "00286cb99a58e4c49fe79b08d8ae041cf8ee173c" ]
[ "tests/features_test.py", "src/pydro/features.py" ]
[ "import numpy.linalg\nimport scipy.io\nimport scipy.misc\nimport numpy\nimport itertools\n\nfrom pydro.features import *\nfrom pydro.io import *\n\ndef resize_test():\n image = scipy.misc.imread('tests/lenna.png').astype(numpy.float32)\n\n image_resized = ResizeImage(image, image.shape[0]/2, image.shape[1]/2)\n \n\ndef features_test():\n lenna_image = scipy.misc.imread('tests/lenna.png').astype(numpy.float32)\n lenna_features = ComputeFeatures(lenna_image, 8, 0, 0)\n\n correct_features = scipy.io.loadmat('tests/lenna_features.mat')\n\n assert (numpy.fabs(correct_features['features']-lenna_features) < 1e-6).all()\n\ndef build_pyramid_test():\n image = scipy.misc.imread('tests/lenna.png')\n\n sbin = 8\n interval = 10\n\n pyramid = BuildPyramid(image, sbin=sbin, interval=interval, extra_octave=True, padx=16, pady=7)\n\n for level in pyramid.levels:\n\n assert math.floor(image.shape[0]*level.scale/sbin) - level.features.shape[0] - 30 <= 2\n assert math.floor(image.shape[1]*level.scale/sbin) - level.features.shape[1] - 6 <= 2\n assert level.features.shape[2] == 32\n\n assert (numpy.fabs(level.features[:7,:,:-1]) < 1e-6).all()\n assert (numpy.fabs(level.features[:7,:,-1] - 1) < 1e-6).all()\n\n assert (numpy.fabs(level.features[-7:,:,:-1]) < 1e-6).all()\n assert (numpy.fabs(level.features[-7:,:,-1] - 1) < 1e-6).all()\n\n assert (numpy.fabs(level.features[:,:16,:-1]) < 1e-6).all()\n assert (numpy.fabs(level.features[:,:16,-1] - 1) < 1e-6).all()\n\n assert (numpy.fabs(level.features[:,-16:,:-1]) < 1e-6).all()\n assert (numpy.fabs(level.features[:,-16:,-1] - 1) < 1e-6).all()\n\n sc = 2**(1.0/interval)\n scale = pyramid.levels[0].scale\n for level in pyramid.levels[1:]:\n new_scale = level.scale\n assert math.fabs(new_scale/scale - 1/sc) < 1e-6\n scale = new_scale\n\ndef compare_pyramid_test():\n image = scipy.misc.imread('tests/000034.jpg')\n\n model = LoadModel('tests/example.dpm')\n pyramid = BuildPyramid(image, sbin=model.features.sbin, interval=model.interval, extra_octave=False, padx=15, pady=6)\n\n correct = scipy.io.loadmat('tests/pyramid.mat')\n\n pyra = correct['pyra']\n\n assert (numpy.fabs(pyra[0][0][1].flatten() - numpy.array([l.scale for l in pyramid.levels])) < 1e-6).all()\n\n for level, given in itertools.izip(pyramid.levels, pyra[0][0][0]):\n given = given[0]\n assert level.features.shape == given.shape\n diff = level.features/given\n diff = diff[numpy.logical_not(numpy.isnan(diff))]\n diff = diff[diff != numpy.inf]\n diff = diff[diff != -numpy.inf]\n if level.scale > 1:\n assert numpy.fabs(numpy.median(numpy.fabs(diff))-1) < 1e-2\n else:\n print(numpy.median(numpy.fabs(diff)))\n assert numpy.fabs(numpy.median(numpy.fabs(diff))-1) < 1e-1\n\ndef resize_test():\n correct = scipy.io.loadmat('tests/resize_test.mat')\n\n im = correct['im']\n im_small_correct = correct['im_small']\n\n im = numpy.array(im, dtype=numpy.float32, order='C')\n im_small_mine = ResizeImage(im.astype(numpy.float32), im_small_correct.shape[0], im_small_correct.shape[1])\n\n assert numpy.fabs(im_small_correct - im_small_mine).max() < 1e-2\n", "from pydro._features import *\n\nimport numpy\nimport math\nfrom collections import namedtuple\nimport sys\n\nLevel = namedtuple('Level', 'features,scale')\nPyramid = namedtuple('Pyramid', 'levels,image,pady,padx,sbin,interval')\n\n\ndef BuildPyramid(image, model=None, sbin=None, interval=None, extra_octave=None, padx=None, pady=None):\n if sbin is None:\n sbin = model.sbin\n if interval is None:\n interval = model.interval\n if extra_octave is None:\n extra_octave = model.features.extra_octave\n if padx is None:\n padx = model.maxsize[1]\n if pady is None:\n pady = model.maxsize[0]\n\n if len(image.shape) == 2:\n image = numpy.dstack((image, image, image))\n image = image.astype(numpy.float32)\n image.flags.writeable = False\n\n sc = 2 ** (1.0 / interval)\n max_scale = 1 + \\\n int(math.floor(\n math.log(min(image.shape[0:2]) / (5.0 * sbin)) / math.log(sc)))\n\n def level_generator():\n for i in xrange(interval):\n scale = 1 / (sc ** i)\n x = int(round(image.shape[1] * scale))\n y = int(round(image.shape[0] * scale))\n sys.stdout.flush()\n scaled = ResizeImage(image, y, x)\n\n if extra_octave:\n yield Level(\n features=ComputeFeatures(\n scaled, sbin / 4, padx + 1, pady + 1),\n scale=4 * scale,\n )\n\n yield Level(\n features=ComputeFeatures(scaled, sbin / 2, padx + 1, pady + 1),\n scale=2 * scale,\n )\n\n yield Level(\n features=ComputeFeatures(scaled, sbin, padx + 1, pady + 1),\n scale=scale,\n )\n\n for j in xrange(i + interval, max_scale, interval):\n scale *= 0.5\n x = int(round(x * 0.5))\n y = int(round(y * 0.5))\n scaled = ResizeImage(image, y, x)\n\n yield Level(\n features=ComputeFeatures(scaled, sbin, padx + 1, pady + 1),\n scale=scale,\n )\n\n levels = list(level_generator())\n levels.sort(key=lambda k: -k.scale)\n\n pyramid = Pyramid(\n levels=levels,\n pady=pady,\n padx=padx,\n sbin=sbin,\n interval=interval,\n image=image,\n )\n\n return pyramid\n" ]
[ [ "numpy.isnan", "numpy.array", "numpy.fabs" ], [ "numpy.dstack" ] ]
doongu/pnu_opensource_hack
[ "1abcce865776182b677ee468135a86bb462e89a8" ]
[ "secondary-voice-server-AI/assem-vc/gta_extractor.py" ]
[ "import os\r\nimport tqdm\r\nimport torch\r\nfrom torch.utils.data import DataLoader\r\nimport shutil\r\nimport argparse\r\nimport pytorch_lightning as pl\r\nfrom omegaconf import OmegaConf\r\n\r\nfrom synthesizer import Synthesizer\r\nfrom datasets.text import Language\r\nfrom datasets import TextMelDataset, text_mel_collate\r\n\r\n\r\nMETA_DIR = 'gta_metadata'\r\n\r\nclass GtaExtractor(object):\r\n def __init__(self, args):\r\n self.args = args\r\n self._load_checkpoint(args.checkpoint_path, args.config)\r\n self.trainloader = self._gen_dataloader(\r\n self.hp.data.train_dir, self.hp.data.train_meta)\r\n self.valloader = self._gen_dataloader(\r\n self.hp.data.val_dir, self.hp.data.val_meta)\r\n\r\n def _gen_hparams(self, config_paths):\r\n # generate hparams object for pl.LightningModule\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--config')\r\n args = parser.parse_args(['--config', config_paths])\r\n return args\r\n\r\n def _load_checkpoint(self, checkpoint_path, model_config_path):\r\n args_temp = self._gen_hparams(model_config_path)\r\n self.model = Synthesizer(args_temp).cuda()\r\n self.hp = self.model.hp\r\n self.lang = Language(self.hp.data.lang, self.hp.data.text_cleaners)\r\n\r\n checkpoint = torch.load(checkpoint_path, map_location='cpu')\r\n self.model.load_state_dict(checkpoint['state_dict'])\r\n self.model.eval()\r\n self.model.freeze()\r\n del checkpoint\r\n torch.cuda.empty_cache()\r\n\r\n def _gen_dataloader(self, data_dir, data_meta):\r\n dataset = TextMelDataset(\r\n self.hp, data_dir, data_meta, train=False, norm=False, use_f0s = True)\r\n\r\n return DataLoader(dataset, batch_size=self.hp.train.batch_size, shuffle=False,\r\n num_workers=self.hp.train.num_workers,\r\n collate_fn=text_mel_collate, pin_memory=False, drop_last=False)\r\n\r\n def main(self):\r\n self.extract_and_write_meta('val')\r\n self.extract_and_write_meta('train')\r\n\r\n def extract_and_write_meta(self, mode):\r\n assert mode in ['train', 'val']\r\n\r\n dataloader = self.trainloader if mode == 'train' else self.valloader\r\n desc = 'Extracting GTA mel of %s data' % mode\r\n meta_list = list()\r\n for batch in tqdm.tqdm(dataloader, desc=desc):\r\n temp_meta = self.extract_gta_mels(batch, mode)\r\n meta_list.extend(temp_meta)\r\n\r\n meta_path = self.hp.data.train_meta if mode == 'train' else self.hp.data.val_meta\r\n meta_filename = os.path.basename(meta_path)\r\n new_meta_filename = 'gta_' + meta_filename\r\n new_meta_path = os.path.join('datasets', META_DIR, new_meta_filename)\r\n\r\n os.makedirs(os.path.join('datasets', META_DIR), exist_ok=True)\r\n with open(new_meta_path, 'w', encoding='utf-8') as f:\r\n for wavpath, speaker in meta_list:\r\n f.write('%s||%s\\n' % (wavpath, speaker))\r\n\r\n print('Wrote %d of %d files to %s' % \\\r\n (len(meta_list), len(dataloader.dataset), new_meta_path))\r\n\r\n @torch.no_grad()\r\n def extract_gta_mels(self, batch, mode):\r\n text, mel_source, speakers, f0_padded, input_lengths, output_lengths, max_input_len, savepaths = batch\r\n text = text.cuda()\r\n mel_source = mel_source.cuda()\r\n speakers = speakers.cuda()\r\n f0_padded = f0_padded.cuda()\r\n input_lengths = input_lengths.cuda()\r\n output_lengths = output_lengths.cuda()\r\n max_input_len = max_input_len.cuda()\r\n\r\n ling_s, alignment = self.model.forward(text, mel_source, input_lengths, output_lengths, max_input_len)\r\n mask = self.model.get_cnn_mask(output_lengths)\r\n residual = self.model.f0_encoder(f0_padded)\r\n ling_s = torch.cat((ling_s, residual), dim=1) # [B, chn.encoder+chn.residual_out, T]\r\n z_s = self.model.speaker(mel_source, output_lengths)\r\n mel_s_s = self.model.decoder(ling_s, z_s, mask)\r\n\r\n return self.store_mels_in_savepaths(\r\n mel_s_s, alignment, input_lengths, output_lengths, savepaths, speakers, mode)\r\n\r\n def store_mels_in_savepaths(self,\r\n mel_postnet, alignment, input_lengths, output_lengths, savepaths, speakers, mode):\r\n mels = mel_postnet.detach().cpu()\r\n alignment = alignment.detach().cpu()\r\n input_lengths = input_lengths.cpu()\r\n output_lengths = output_lengths.cpu()\r\n speakers = speakers.cpu().tolist()\r\n\r\n temp_meta = list()\r\n for i, path in enumerate(savepaths):\r\n attention = alignment[i]\r\n t_enc = input_lengths[i]\r\n t_dec = output_lengths[i]\r\n speaker_id = speakers[i]\r\n speaker = self.hp.data.speakers[speaker_id]\r\n\r\n mel = mels[i][:, :t_dec].clone()\r\n\r\n torch.save(mel, path)\r\n if mel.size(1) < self.args.min_mel_length:\r\n continue\r\n\r\n # so now, mel is sufficiently long, and alignment looks good.\r\n # let's write the mel path to metadata.\r\n root_dir = self.hp.data.train_dir if mode == 'train' \\\r\n else self.hp.data.val_dir\r\n rel_path = os.path.relpath(path, start=root_dir)\r\n wav_path = rel_path.replace('.gta', '')\r\n temp_meta.append((wav_path, speaker))\r\n\r\n return temp_meta\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-c', '--config', nargs=2, type=str, required=True,\r\n help=\"path of configuration yaml file\")\r\n parser.add_argument('-p', '--checkpoint_path', type=str, default=None,\r\n help=\"path of checkpoint to use for extracting GTA mel\")\r\n parser.add_argument('-m', '--min_mel_length', type=int, default=33,\r\n help=\"minimal length of mel spectrogram. (segment_length // hop_length + 1) expected.\")\r\n args = parser.parse_args()\r\n\r\n extractor = GtaExtractor(args)\r\n extractor.main()\r\n" ]
[ [ "torch.cat", "torch.load", "torch.utils.data.DataLoader", "torch.cuda.empty_cache", "torch.no_grad", "torch.save" ] ]
haharay/mesa
[ "449185f7ceb461c4df60a6721ecc6f021146c8aa" ]
[ "mesa/space.py" ]
[ "\"\"\"\nMesa Space Module\n=================\n\nObjects used to add a spatial component to a model.\n\nGrid: base grid, a simple list-of-lists.\nSingleGrid: grid which strictly enforces one object per cell.\nMultiGrid: extension to Grid where each cell is a set of objects.\n\n\"\"\"\n# Instruction for PyLint to suppress variable name errors, since we have a\n# good reason to use one-character variable names for x and y.\n# pylint: disable=invalid-name\n\nimport itertools\n\nimport numpy as np\n\nfrom typing import (\n Any,\n Dict,\n Iterable,\n Iterator,\n List,\n Optional,\n Set,\n Sequence,\n Tuple,\n Union,\n cast,\n overload,\n)\nfrom .agent import Agent\n\nCoordinate = Tuple[int, int]\nGridContent = Union[Optional[Agent], Set[Agent]]\n# used in ContinuousSpace\nFloatCoordinate = Union[Tuple[float, float], np.ndarray]\n\n\ndef accept_tuple_argument(wrapped_function):\n \"\"\"Decorator to allow grid methods that take a list of (x, y) coord tuples\n to also handle a single position, by automatically wrapping tuple in\n single-item list rather than forcing user to do it.\n\n \"\"\"\n\n def wrapper(*args: Any):\n if isinstance(args[1], tuple) and len(args[1]) == 2:\n return wrapped_function(args[0], [args[1]])\n else:\n return wrapped_function(*args)\n\n return wrapper\n\n\nclass Grid:\n \"\"\"Base class for a square grid.\n\n Grid cells are indexed by [x][y], where [0][0] is assumed to be the\n bottom-left and [width-1][height-1] is the top-right. If a grid is\n toroidal, the top and bottom, and left and right, edges wrap to each other\n\n Properties:\n width, height: The grid's width and height.\n torus: Boolean which determines whether to treat the grid as a torus.\n grid: Internal list-of-lists which holds the grid cells themselves.\n \"\"\"\n\n def __init__(self, width: int, height: int, torus: bool) -> None:\n \"\"\"Create a new grid.\n\n Args:\n width, height: The width and height of the grid\n torus: Boolean whether the grid wraps or not.\n\n \"\"\"\n self.height = height\n self.width = width\n self.torus = torus\n\n self.grid: List[List[GridContent]] = []\n\n for x in range(self.width):\n col: List[GridContent] = []\n for y in range(self.height):\n col.append(self.default_val())\n self.grid.append(col)\n\n # Add all cells to the empties list.\n self.empties = set(itertools.product(*(range(self.width), range(self.height))))\n\n # Neighborhood Cache\n self._neighborhood_cache: Dict[Any, List[Coordinate]] = dict()\n\n @staticmethod\n def default_val() -> None:\n \"\"\"Default value for new cell elements.\"\"\"\n return None\n\n @overload\n def __getitem__(self, index: int) -> List[GridContent]:\n ...\n\n @overload\n def __getitem__(\n self, index: Tuple[Union[int, slice], Union[int, slice]]\n ) -> Union[GridContent, List[GridContent]]:\n ...\n\n @overload\n def __getitem__(self, index: Sequence[Coordinate]) -> List[GridContent]:\n ...\n\n def __getitem__(\n self,\n index: Union[\n int, Sequence[Coordinate], Tuple[Union[int, slice], Union[int, slice]],\n ],\n ) -> Union[GridContent, List[GridContent]]:\n \"\"\"Access contents from the grid.\"\"\"\n\n if isinstance(index, int):\n # grid[x]\n return self.grid[index]\n\n if isinstance(index[0], tuple):\n # grid[(x1, y1), (x2, y2)]\n index = cast(Sequence[Coordinate], index)\n\n cells = []\n for pos in index:\n x1, y1 = self.torus_adj(pos)\n cells.append(self.grid[x1][y1])\n return cells\n\n x, y = index\n\n if isinstance(x, int) and isinstance(y, int):\n # grid[x, y]\n index = cast(Coordinate, index)\n x, y = self.torus_adj(index)\n return self.grid[x][y]\n\n if isinstance(x, int):\n # grid[x, :]\n x, _ = self.torus_adj((x, 0))\n x = slice(x, x + 1)\n\n if isinstance(y, int):\n # grid[:, y]\n _, y = self.torus_adj((0, y))\n y = slice(y, y + 1)\n\n # grid[:, :]\n x, y = (cast(slice, x), cast(slice, y))\n cells = []\n for rows in self.grid[x]:\n for cell in rows[y]:\n cells.append(cell)\n return cells\n\n raise IndexError\n\n def __iter__(self) -> Iterator[GridContent]:\n \"\"\"\n create an iterator that chains the\n rows of grid together as if one list:\n \"\"\"\n return itertools.chain(*self.grid)\n\n def coord_iter(self) -> Iterator[Tuple[GridContent, int, int]]:\n \"\"\"An iterator that returns coordinates as well as cell contents.\"\"\"\n for row in range(self.width):\n for col in range(self.height):\n yield self.grid[row][col], row, col # agent, x, y\n\n def neighbor_iter(\n self, pos: Coordinate, moore: bool = True\n ) -> Iterator[GridContent]:\n \"\"\"Iterate over position neighbors.\n\n Args:\n pos: (x,y) coords tuple for the position to get the neighbors of.\n moore: Boolean for whether to use Moore neighborhood (including\n diagonals) or Von Neumann (only up/down/left/right).\n\n \"\"\"\n neighborhood = self.get_neighborhood(pos, moore=moore)\n return self.iter_cell_list_contents(neighborhood)\n\n def iter_neighborhood(\n self,\n pos: Coordinate,\n moore: bool,\n include_center: bool = False,\n radius: int = 1,\n ) -> Iterator[Coordinate]:\n \"\"\"Return an iterator over cell coordinates that are in the\n neighborhood of a certain point.\n\n Args:\n pos: Coordinate tuple for the neighborhood to get.\n moore: If True, return Moore neighborhood\n (including diagonals)\n If False, return Von Neumann neighborhood\n (exclude diagonals)\n include_center: If True, return the (x, y) cell as well.\n Otherwise, return surrounding cells only.\n radius: radius, in cells, of neighborhood to get.\n\n Returns:\n A list of coordinate tuples representing the neighborhood. For\n example with radius 1, it will return list with number of elements\n equals at most 9 (8) if Moore, 5 (4) if Von Neumann (if not\n including the center).\n\n \"\"\"\n yield from self.get_neighborhood(pos, moore, include_center, radius)\n\n def get_neighborhood(\n self,\n pos: Coordinate,\n moore: bool,\n include_center: bool = False,\n radius: int = 1,\n ) -> List[Coordinate]:\n \"\"\"Return a list of cells that are in the neighborhood of a\n certain point.\n\n Args:\n pos: Coordinate tuple for the neighborhood to get.\n moore: If True, return Moore neighborhood\n (including diagonals)\n If False, return Von Neumann neighborhood\n (exclude diagonals)\n include_center: If True, return the (x, y) cell as well.\n Otherwise, return surrounding cells only.\n radius: radius, in cells, of neighborhood to get.\n\n Returns:\n A list of coordinate tuples representing the neighborhood;\n With radius 1, at most 9 if Moore, 5 if Von Neumann (8 and 4\n if not including the center).\n\n \"\"\"\n cache_key = (pos, moore, include_center, radius)\n neighborhood = self._neighborhood_cache.get(cache_key, None)\n\n if neighborhood is None:\n coordinates: Set[Coordinate] = set()\n\n x, y = pos\n for dy in range(-radius, radius + 1):\n for dx in range(-radius, radius + 1):\n if dx == 0 and dy == 0 and not include_center:\n continue\n # Skip coordinates that are outside manhattan distance\n if not moore and abs(dx) + abs(dy) > radius:\n continue\n\n coord = (x + dx, y + dy)\n\n if self.out_of_bounds(coord):\n # Skip if not a torus and new coords out of bounds.\n if not self.torus:\n continue\n coord = self.torus_adj(coord)\n\n coordinates.add(coord)\n\n neighborhood = sorted(coordinates)\n self._neighborhood_cache[cache_key] = neighborhood\n\n return neighborhood\n\n def iter_neighbors(\n self,\n pos: Coordinate,\n moore: bool,\n include_center: bool = False,\n radius: int = 1,\n ) -> Iterator[GridContent]:\n \"\"\"Return an iterator over neighbors to a certain point.\n\n Args:\n pos: Coordinates for the neighborhood to get.\n moore: If True, return Moore neighborhood\n (including diagonals)\n If False, return Von Neumann neighborhood\n (exclude diagonals)\n include_center: If True, return the (x, y) cell as well.\n Otherwise,\n return surrounding cells only.\n radius: radius, in cells, of neighborhood to get.\n\n Returns:\n An iterator of non-None objects in the given neighborhood;\n at most 9 if Moore, 5 if Von-Neumann\n (8 and 4 if not including the center).\n \"\"\"\n neighborhood = self.get_neighborhood(pos, moore, include_center, radius)\n return self.iter_cell_list_contents(neighborhood)\n\n def get_neighbors(\n self,\n pos: Coordinate,\n moore: bool,\n include_center: bool = False,\n radius: int = 1,\n ) -> List[GridContent]:\n \"\"\"Return a list of neighbors to a certain point.\n\n Args:\n pos: Coordinate tuple for the neighborhood to get.\n moore: If True, return Moore neighborhood\n (including diagonals)\n If False, return Von Neumann neighborhood\n (exclude diagonals)\n include_center: If True, return the (x, y) cell as well.\n Otherwise,\n return surrounding cells only.\n radius: radius, in cells, of neighborhood to get.\n\n Returns:\n A list of non-None objects in the given neighborhood;\n at most 9 if Moore, 5 if Von-Neumann\n (8 and 4 if not including the center).\n\n \"\"\"\n return list(self.iter_neighbors(pos, moore, include_center, radius))\n\n def torus_adj(self, pos: Coordinate) -> Coordinate:\n \"\"\"Convert coordinate, handling torus looping.\"\"\"\n if not self.out_of_bounds(pos):\n return pos\n elif not self.torus:\n raise Exception(\"Point out of bounds, and space non-toroidal.\")\n else:\n return pos[0] % self.width, pos[1] % self.height\n\n def out_of_bounds(self, pos: Coordinate) -> bool:\n \"\"\"\n Determines whether position is off the grid, returns the out of\n bounds coordinate.\n \"\"\"\n x, y = pos\n return x < 0 or x >= self.width or y < 0 or y >= self.height\n\n @accept_tuple_argument\n def iter_cell_list_contents(\n self, cell_list: Iterable[Coordinate]\n ) -> Iterator[GridContent]:\n \"\"\"\n Args:\n cell_list: Array-like of (x, y) tuples, or single tuple.\n\n Returns:\n An iterator of the contents of the cells identified in cell_list\n\n \"\"\"\n return filter(None, (self.grid[x][y] for x, y in cell_list))\n\n @accept_tuple_argument\n def get_cell_list_contents(\n self, cell_list: Iterable[Coordinate]\n ) -> List[GridContent]:\n \"\"\"\n Args:\n cell_list: Array-like of (x, y) tuples, or single tuple.\n\n Returns:\n A list of the contents of the cells identified in cell_list\n\n \"\"\"\n return list(self.iter_cell_list_contents(cell_list))\n\n def move_agent(self, agent: Agent, pos: Coordinate) -> None:\n \"\"\"\n Move an agent from its current position to a new position.\n\n Args:\n agent: Agent object to move. Assumed to have its current location\n stored in a 'pos' tuple.\n pos: Tuple of new position to move the agent to.\n\n \"\"\"\n pos = self.torus_adj(pos)\n self._remove_agent(agent.pos, agent)\n self._place_agent(pos, agent)\n agent.pos = pos\n\n def place_agent(self, agent: Agent, pos: Coordinate) -> None:\n \"\"\"Position an agent on the grid, and set its pos variable.\"\"\"\n self._place_agent(pos, agent)\n agent.pos = pos\n\n def _place_agent(self, pos: Coordinate, agent: Agent) -> None:\n \"\"\"Place the agent at the correct location.\"\"\"\n x, y = pos\n self.grid[x][y] = agent\n self.empties.discard(pos)\n\n def remove_agent(self, agent: Agent) -> None:\n \"\"\"Remove the agent from the grid and set its pos variable to None.\"\"\"\n pos = agent.pos\n self._remove_agent(pos, agent)\n agent.pos = None\n\n def _remove_agent(self, pos: Coordinate, agent: Agent) -> None:\n \"\"\"Remove the agent from the given location.\"\"\"\n x, y = pos\n self.grid[x][y] = None\n self.empties.add(pos)\n\n def is_cell_empty(self, pos: Coordinate) -> bool:\n \"\"\"Returns a bool of the contents of a cell.\"\"\"\n x, y = pos\n return self.grid[x][y] == self.default_val()\n\n def move_to_empty(self, agent: Agent) -> None:\n \"\"\"Moves agent to a random empty cell, vacating agent's old cell.\"\"\"\n pos = agent.pos\n if len(self.empties) == 0:\n raise Exception(\"ERROR: No empty cells\")\n new_pos = agent.random.choice(sorted(self.empties))\n self._place_agent(new_pos, agent)\n agent.pos = new_pos\n self._remove_agent(pos, agent)\n\n def find_empty(self) -> Optional[Coordinate]:\n \"\"\"Pick a random empty cell.\"\"\"\n from warnings import warn\n import random\n\n warn(\n (\n \"`find_empty` is being phased out since it uses the global \"\n \"`random` instead of the model-level random-number generator. \"\n \"Consider replacing it with having a model or agent object \"\n \"explicitly pick one of the grid's list of empty cells.\"\n ),\n DeprecationWarning,\n )\n\n if self.exists_empty_cells():\n pos = random.choice(sorted(self.empties))\n return pos\n else:\n return None\n\n def exists_empty_cells(self) -> bool:\n \"\"\"Return True if any cells empty else False.\"\"\"\n return len(self.empties) > 0\n\n\nclass SingleGrid(Grid):\n \"\"\"Grid where each cell contains exactly at most one object.\"\"\"\n\n empties: Set[Coordinate] = set()\n\n def __init__(self, width: int, height: int, torus: bool) -> None:\n \"\"\"Create a new single-item grid.\n\n Args:\n width, height: The width and width of the grid\n torus: Boolean whether the grid wraps or not.\n\n \"\"\"\n super().__init__(width, height, torus)\n\n def position_agent(\n self, agent: Agent, x: Union[int, str] = \"random\", y: Union[int, str] = \"random\"\n ) -> None:\n \"\"\"Position an agent on the grid.\n This is used when first placing agents! Use 'move_to_empty()'\n when you want agents to jump to an empty cell.\n Use 'swap_pos()' to swap agents positions.\n If x or y are positive, they are used, but if \"random\",\n we get a random position.\n Ensure this random position is not occupied (in Grid).\n\n \"\"\"\n if x == \"random\" or y == \"random\":\n if len(self.empties) == 0:\n raise Exception(\"ERROR: Grid full\")\n coords = agent.random.choice(sorted(self.empties))\n else:\n coords = (x, y)\n agent.pos = coords\n self._place_agent(coords, agent)\n\n def _place_agent(self, pos: Coordinate, agent: Agent) -> None:\n if self.is_cell_empty(pos):\n super()._place_agent(pos, agent)\n else:\n raise Exception(\"Cell not empty\")\n\n\nclass MultiGrid(Grid):\n \"\"\"Grid where each cell can contain more than one object.\n\n Grid cells are indexed by [x][y], where [0][0] is assumed to be at\n bottom-left and [width-1][height-1] is the top-right. If a grid is\n toroidal, the top and bottom, and left and right, edges wrap to each other.\n\n Each grid cell holds a set object.\n\n Properties:\n width, height: The grid's width and height.\n\n torus: Boolean which determines whether to treat the grid as a torus.\n\n grid: Internal list-of-lists which holds the grid cells themselves.\n\n Methods:\n get_neighbors: Returns the objects surrounding a given cell.\n \"\"\"\n\n @staticmethod\n def default_val() -> Set[Agent]:\n \"\"\"Default value for new cell elements.\"\"\"\n return []\n\n def _place_agent(self, pos: Coordinate, agent: Agent) -> None:\n \"\"\"Place the agent at the correct location.\"\"\"\n x, y = pos\n if agent not in self.grid[x][y]:\n self.grid[x][y].append(agent)\n self.empties.discard(pos)\n\n def _remove_agent(self, pos: Coordinate, agent: Agent) -> None:\n \"\"\"Remove the agent from the given location.\"\"\"\n x, y = pos\n self.grid[x][y].remove(agent)\n if self.is_cell_empty(pos):\n self.empties.add(pos)\n\n @accept_tuple_argument\n def iter_cell_list_contents(\n self, cell_list: Iterable[Coordinate]\n ) -> Iterator[GridContent]:\n \"\"\"\n Args:\n cell_list: Array-like of (x, y) tuples, or single tuple.\n\n Returns:\n A iterator of the contents of the cells identified in cell_list\n\n \"\"\"\n return itertools.chain.from_iterable(\n self[x][y] for x, y in cell_list if not self.is_cell_empty((x, y))\n )\n\n\nclass HexGrid(Grid):\n \"\"\"Hexagonal Grid: Extends Grid to handle hexagonal neighbors.\n\n Functions according to odd-q rules.\n See http://www.redblobgames.com/grids/hexagons/#coordinates for more.\n\n Properties:\n width, height: The grid's width and height.\n torus: Boolean which determines whether to treat the grid as a torus.\n\n Methods:\n get_neighbors: Returns the objects surrounding a given cell.\n get_neighborhood: Returns the cells surrounding a given cell.\n neighbor_iter: Iterates over position neighbours.\n iter_neighborhood: Returns an iterator over cell coordinates that are\n in the neighborhood of a certain point.\n\n \"\"\"\n\n def iter_neighborhood(\n self, pos: Coordinate, include_center: bool = False, radius: int = 1\n ) -> Iterator[Coordinate]:\n \"\"\"Return an iterator over cell coordinates that are in the\n neighborhood of a certain point.\n\n Args:\n pos: Coordinate tuple for the neighborhood to get.\n include_center: If True, return the (x, y) cell as well.\n Otherwise, return surrounding cells only.\n radius: radius, in cells, of neighborhood to get.\n\n Returns:\n A list of coordinate tuples representing the neighborhood. For\n example with radius 1, it will return list with number of elements\n equals at most 9 (8) if Moore, 5 (4) if Von Neumann (if not\n including the center).\n\n \"\"\"\n\n def torus_adj_2d(pos: Coordinate) -> Coordinate:\n return (pos[0] % self.width, pos[1] % self.height)\n\n coordinates = set()\n\n def find_neighbors(pos: Coordinate, radius: int) -> None:\n x, y = pos\n\n \"\"\"\n Both: (0,-), (0,+)\n\n Even: (-,+), (-,0), (+,+), (+,0)\n Odd: (-,0), (-,-), (+,0), (+,-)\n \"\"\"\n adjacent = [(x, y - 1), (x, y + 1)]\n\n if include_center:\n adjacent.append(pos)\n\n if x % 2 == 0:\n adjacent += [(x - 1, y + 1), (x - 1, y), (x + 1, y + 1), (x + 1, y)]\n else:\n adjacent += [(x - 1, y), (x - 1, y - 1), (x + 1, y), (x + 1, y - 1)]\n\n if self.torus is False:\n adjacent = list(\n filter(lambda coords: not self.out_of_bounds(coords), adjacent)\n )\n else:\n adjacent = [torus_adj_2d(coord) for coord in adjacent]\n\n coordinates.update(adjacent)\n\n if radius > 1:\n [find_neighbors(coords, radius - 1) for coords in adjacent]\n\n find_neighbors(pos, radius)\n\n if not include_center and pos in coordinates:\n coordinates.remove(pos)\n\n for i in coordinates:\n yield i\n\n def neighbor_iter(self, pos: Coordinate) -> Iterator[GridContent]:\n \"\"\"Iterate over position neighbors.\n\n Args:\n pos: (x,y) coords tuple for the position to get the neighbors of.\n\n \"\"\"\n neighborhood = self.iter_neighborhood(pos)\n return self.iter_cell_list_contents(neighborhood)\n\n def get_neighborhood(\n self, pos: Coordinate, include_center: bool = False, radius: int = 1\n ) -> List[Coordinate]:\n \"\"\"Return a list of cells that are in the neighborhood of a\n certain point.\n\n Args:\n pos: Coordinate tuple for the neighborhood to get.\n include_center: If True, return the (x, y) cell as well.\n Otherwise, return surrounding cells only.\n radius: radius, in cells, of neighborhood to get.\n\n Returns:\n A list of coordinate tuples representing the neighborhood;\n With radius 1\n\n \"\"\"\n return list(self.iter_neighborhood(pos, include_center, radius))\n\n def iter_neighbors(\n self, pos: Coordinate, include_center: bool = False, radius: int = 1\n ) -> Iterator[GridContent]:\n \"\"\"Return an iterator over neighbors to a certain point.\n\n Args:\n pos: Coordinates for the neighborhood to get.\n include_center: If True, return the (x, y) cell as well.\n Otherwise,\n return surrounding cells only.\n radius: radius, in cells, of neighborhood to get.\n\n Returns:\n An iterator of non-None objects in the given neighborhood\n\n \"\"\"\n neighborhood = self.iter_neighborhood(pos, include_center, radius)\n return self.iter_cell_list_contents(neighborhood)\n\n def get_neighbors(\n self, pos: Coordinate, include_center: bool = False, radius: int = 1\n ) -> List[Coordinate]:\n \"\"\"Return a list of neighbors to a certain point.\n\n Args:\n pos: Coordinate tuple for the neighborhood to get.\n include_center: If True, return the (x, y) cell as well.\n Otherwise,\n return surrounding cells only.\n radius: radius, in cells, of neighborhood to get.\n\n Returns:\n A list of non-None objects in the given neighborhood\n\n \"\"\"\n return list(self.iter_neighbors(pos, include_center, radius))\n\n\nclass ContinuousSpace:\n \"\"\"Continuous space where each agent can have an arbitrary position.\n\n Assumes that all agents are point objects, and have a pos property storing\n their position as an (x, y) tuple. This class uses a numpy array internally\n to store agent objects, to speed up neighborhood lookups.\n\n \"\"\"\n\n _grid = None\n\n def __init__(\n self,\n x_max: float,\n y_max: float,\n torus: bool,\n x_min: float = 0,\n y_min: float = 0,\n ) -> None:\n \"\"\"Create a new continuous space.\n\n Args:\n x_max, y_max: Maximum x and y coordinates for the space.\n torus: Boolean for whether the edges loop around.\n x_min, y_min: (default 0) If provided, set the minimum x and y\n coordinates for the space. Below them, values loop to\n the other edge (if torus=True) or raise an exception.\n\n \"\"\"\n self.x_min = x_min\n self.x_max = x_max\n self.width = x_max - x_min\n self.y_min = y_min\n self.y_max = y_max\n self.height = y_max - y_min\n self.center = np.array(((x_max + x_min) / 2, (y_max + y_min) / 2))\n self.size = np.array((self.width, self.height))\n self.torus = torus\n\n self._agent_points = None\n self._index_to_agent: Dict[int, Agent] = {}\n self._agent_to_index: Dict[Agent, int] = {}\n\n def place_agent(self, agent: Agent, pos: FloatCoordinate) -> None:\n \"\"\"Place a new agent in the space.\n\n Args:\n agent: Agent object to place.\n pos: Coordinate tuple for where to place the agent.\n\n \"\"\"\n pos = self.torus_adj(pos)\n if self._agent_points is None:\n self._agent_points = np.array([pos])\n else:\n self._agent_points = np.append(self._agent_points, np.array([pos]), axis=0)\n self._index_to_agent[self._agent_points.shape[0] - 1] = agent\n self._agent_to_index[agent] = self._agent_points.shape[0] - 1\n agent.pos = pos\n\n def move_agent(self, agent: Agent, pos: FloatCoordinate) -> None:\n \"\"\"Move an agent from its current position to a new position.\n\n Args:\n agent: The agent object to move.\n pos: Coordinate tuple to move the agent to.\n\n \"\"\"\n pos = self.torus_adj(pos)\n idx = self._agent_to_index[agent]\n self._agent_points[idx, 0] = pos[0]\n self._agent_points[idx, 1] = pos[1]\n agent.pos = pos\n\n def remove_agent(self, agent: Agent) -> None:\n \"\"\"Remove an agent from the simulation.\n\n Args:\n agent: The agent object to remove\n \"\"\"\n if agent not in self._agent_to_index:\n raise Exception(\"Agent does not exist in the space\")\n idx = self._agent_to_index[agent]\n del self._agent_to_index[agent]\n max_idx = max(self._index_to_agent.keys())\n # Delete the agent's position and decrement the index/agent mapping\n self._agent_points = np.delete(self._agent_points, idx, axis=0)\n for a, index in self._agent_to_index.items():\n if index > idx:\n self._agent_to_index[a] = index - 1\n self._index_to_agent[index - 1] = a\n # The largest index is now redundant\n del self._index_to_agent[max_idx]\n agent.pos = None\n\n def get_neighbors(\n self, pos: FloatCoordinate, radius: float, include_center: bool = True\n ) -> List[GridContent]:\n \"\"\"Get all objects within a certain radius.\n\n Args:\n pos: (x,y) coordinate tuple to center the search at.\n radius: Get all the objects within this distance of the center.\n include_center: If True, include an object at the *exact* provided\n coordinates. i.e. if you are searching for the\n neighbors of a given agent, True will include that\n agent in the results.\n\n \"\"\"\n deltas = np.abs(self._agent_points - np.array(pos))\n if self.torus:\n deltas = np.minimum(deltas, self.size - deltas)\n dists = deltas[:, 0] ** 2 + deltas[:, 1] ** 2\n\n (idxs,) = np.where(dists <= radius ** 2)\n neighbors = [\n self._index_to_agent[x] for x in idxs if include_center or dists[x] > 0\n ]\n return neighbors\n\n def get_heading(\n self, pos_1: FloatCoordinate, pos_2: FloatCoordinate\n ) -> FloatCoordinate:\n \"\"\"Get the heading angle between two points, accounting for toroidal space.\n\n Args:\n pos_1, pos_2: Coordinate tuples for both points.\n \"\"\"\n one = np.array(pos_1)\n two = np.array(pos_2)\n if self.torus:\n one = (one - self.center) % self.size\n two = (two - self.center) % self.size\n heading = two - one\n if isinstance(pos_1, tuple):\n heading = tuple(heading)\n return heading\n\n def get_distance(self, pos_1: FloatCoordinate, pos_2: FloatCoordinate) -> float:\n \"\"\"Get the distance between two point, accounting for toroidal space.\n\n Args:\n pos_1, pos_2: Coordinate tuples for both points.\n\n \"\"\"\n x1, y1 = pos_1\n x2, y2 = pos_2\n\n dx = np.abs(x1 - x2)\n dy = np.abs(y1 - y2)\n if self.torus:\n dx = min(dx, self.width - dx)\n dy = min(dy, self.height - dy)\n return np.sqrt(dx * dx + dy * dy)\n\n def torus_adj(self, pos: FloatCoordinate) -> FloatCoordinate:\n \"\"\"Adjust coordinates to handle torus looping.\n\n If the coordinate is out-of-bounds and the space is toroidal, return\n the corresponding point within the space. If the space is not toroidal,\n raise an exception.\n\n Args:\n pos: Coordinate tuple to convert.\n\n \"\"\"\n if not self.out_of_bounds(pos):\n return pos\n elif not self.torus:\n raise Exception(\"Point out of bounds, and space non-toroidal.\")\n else:\n x = self.x_min + ((pos[0] - self.x_min) % self.width)\n y = self.y_min + ((pos[1] - self.y_min) % self.height)\n if isinstance(pos, tuple):\n return (x, y)\n else:\n return np.array((x, y))\n\n def out_of_bounds(self, pos: FloatCoordinate) -> bool:\n \"\"\"Check if a point is out of bounds.\"\"\"\n x, y = pos\n return x < self.x_min or x >= self.x_max or y < self.y_min or y >= self.y_max\n\n\nclass NetworkGrid:\n \"\"\"Network Grid where each node contains zero or more agents.\"\"\"\n\n def __init__(self, G: Any) -> None:\n self.G = G\n for node_id in self.G.nodes:\n G.nodes[node_id][\"agent\"] = list()\n\n def place_agent(self, agent: Agent, node_id: int) -> None:\n \"\"\"Place a agent in a node.\"\"\"\n\n self._place_agent(agent, node_id)\n agent.pos = node_id\n\n def get_neighbors(self, node_id: int, include_center: bool = False) -> List[int]:\n \"\"\"Get all adjacent nodes\"\"\"\n\n neighbors = list(self.G.neighbors(node_id))\n if include_center:\n neighbors.append(node_id)\n\n return neighbors\n\n def move_agent(self, agent: Agent, node_id: int) -> None:\n \"\"\"Move an agent from its current node to a new node.\"\"\"\n\n self._remove_agent(agent, agent.pos)\n self._place_agent(agent, node_id)\n agent.pos = node_id\n\n def _place_agent(self, agent: Agent, node_id: int) -> None:\n \"\"\"Place the agent at the correct node.\"\"\"\n\n self.G.nodes[node_id][\"agent\"].append(agent)\n\n def _remove_agent(self, agent: Agent, node_id: int) -> None:\n \"\"\"Remove an agent from a node.\"\"\"\n\n self.G.nodes[node_id][\"agent\"].remove(agent)\n\n def remove_agent(self, agent: Agent) -> None:\n \"\"\"Remove the agent from the network and set its pos variable to None.\"\"\"\n pos = agent.pos\n self._remove_agent(agent, pos)\n agent.pos = None\n\n def is_cell_empty(self, node_id: int) -> bool:\n \"\"\"Returns a bool of the contents of a cell.\"\"\"\n return not self.G.nodes[node_id][\"agent\"]\n\n def get_cell_list_contents(self, cell_list: List[int]) -> List[GridContent]:\n return list(self.iter_cell_list_contents(cell_list))\n\n def get_all_cell_contents(self) -> List[GridContent]:\n return list(self.iter_cell_list_contents(self.G))\n\n def iter_cell_list_contents(self, cell_list: List[int]) -> List[GridContent]:\n list_of_lists = [\n self.G.nodes[node_id][\"agent\"]\n for node_id in cell_list\n if not self.is_cell_empty(node_id)\n ]\n return [item for sublist in list_of_lists for item in sublist]\n" ]
[ [ "numpy.minimum", "numpy.abs", "numpy.sqrt", "numpy.delete", "numpy.array", "numpy.where" ] ]
sbassi/metodos-numericos
[ "d21b09f384f326793eb59bbaa28505561698d719" ]
[ "python/bernstein.py" ]
[ "#!/usr/bin/env python3\n# Aproximación a una función continua mediante los polinomios de Bernstein\n# (prueba del teorema de aproximación de Weierstrass)\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.stats as st\n\n\n# La función a aproximar\ndef f(x):\n return np.abs(1 / 2 - x)\n\n\ndef bernstein(f, n, p):\n return np.sum(\n [f(k / n) * st.binom.pmf(k, n, p) for k in np.arange(0, n + 1)])\n\n\n# Create the vectors X and Y\nx = np.linspace(0, 1, num=1000)\ny = f(x)\n# Create the plot\nplt.plot(x, y, 'blue', label='f(x)')\n\nbernstein3 = lambda x: bernstein(f, 3, x)\nbernstein3 = np.vectorize(bernstein3)\ny3 = bernstein3(x)\nplt.plot(x, y3, 'green', label='$B_3$')\n\nbernstein10 = lambda x: bernstein(f, 10, x)\nbernstein10 = np.vectorize(bernstein10)\ny10 = bernstein10(x)\nplt.plot(x, y10, 'red', label='$B_{10}$')\n\nbernstein100 = lambda x: bernstein(f, 100, x)\nbernstein100 = np.vectorize(bernstein100)\ny100 = bernstein100(x)\nplt.plot(x, y100, 'magenta', label='$B_{100}$')\n\nbernstein300 = lambda x: bernstein(f, 300, x)\nbernstein300 = np.vectorize(bernstein300)\ny300 = bernstein300(x)\nplt.plot(x, y300, 'brown', label='$B_{300}$')\n\n# Show the plot\nplt.legend(loc=\"upper left\")\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.abs", "numpy.linspace", "scipy.stats.binom.pmf", "numpy.arange", "matplotlib.pyplot.plot", "numpy.vectorize", "matplotlib.pyplot.show" ] ]
KejiaChen/assembly
[ "dbaa3eeb40709c4a2033b6a603a68c17e60a0477" ]
[ "furniture/env/controllers/baxter_ik_controller.py" ]
[ "\"\"\"\nNOTE: requires pybullet module.\n\nRun `pip install pybullet==1.9.5`.\n\"\"\"\n\nimport os\nimport numpy as np\n\ntry:\n import pybullet as p\nexcept ImportError:\n raise Exception(\n \"Please make sure pybullet is installed. Run `pip install pybullet==1.9.5`\"\n )\n\nfrom .. import transform_utils as T\nfrom ..controllers import Controller\n\n\nclass BaxterIKController(Controller):\n \"\"\"\n Inverse kinematics for the Baxter robot, using Pybullet and the urdf description\n files.\n \"\"\"\n\n def __init__(self, bullet_data_path, robot_jpos_getter):\n \"\"\"\n Args:\n bullet_data_path (str): base path to bullet data.\n\n robot_jpos_getter (function): function that returns the joint positions of\n the robot to be controlled as a numpy array.\n \"\"\"\n # Set up inverse kinematics\n self.robot_jpos_getter = robot_jpos_getter\n\n # Do any setup needed for Inverse Kinematics.\n path = os.path.join(bullet_data_path, \"baxter_description/urdf/baxter_mod.urdf\")\n self.setup_inverse_kinematics(path)\n\n # Should be in (0, 1], smaller values mean less sensitivity.\n self.user_sensitivity = 1.0\n\n self.sync_state()\n\n def get_control(self, right=None, left=None):\n \"\"\"\n Returns joint velocities to control the robot after the target end effector\n positions and orientations are updated from arguments @left and @right.\n If no arguments are provided, joint velocities will be computed based\n on the previously recorded target.\n\n Args:\n left (dict): A dictionary to control the left end effector with these keys.\n\n dpos (numpy array): a 3 dimensional array corresponding to the desired\n change in x, y, and z left end effector position.\n\n rotation (numpy array): a rotation matrix of shape (3, 3) corresponding\n to the desired orientation of the left end effector.\n\n right (dict): A dictionary to control the left end effector with these keys.\n\n dpos (numpy array): a 3 dimensional array corresponding to the desired\n change in x, y, and z right end effector position.\n\n rotation (numpy array): a rotation matrix of shape (3, 3) corresponding\n to the desired orientation of the right end effector.\n\n Returns:\n velocities (numpy array): a flat array of joint velocity commands to apply\n to try and achieve the desired input control.\n\n\n \"\"\"\n # Sync joint positions for IK.\n self.sync_ik_robot(self.robot_jpos_getter())\n\n # Compute new target joint positions if arguments are provided\n if (right is not None) and (left is not None):\n self.commanded_joint_positions = self.joint_positions_for_eef_command(\n right, left\n )\n\n # P controller from joint positions (from IK) to velocities\n velocities = np.zeros(14)\n deltas = self._get_current_error(\n self.robot_jpos_getter(), self.commanded_joint_positions\n )\n\n for i, delta in enumerate(deltas):\n velocities[i] = -2 * delta\n velocities = self.clip_joint_velocities(velocities)\n\n self.commanded_joint_velocities = velocities\n return velocities\n\n # For debugging purposes: set joint positions directly\n # robot.set_joint_positions(self.commanded_joint_positions)\n\n def sync_state(self):\n \"\"\"\n Syncs the internal Pybullet robot state to the joint positions of the\n robot being controlled.\n \"\"\"\n\n # sync IK robot state to the current robot joint positions\n self.sync_ik_robot(self.robot_jpos_getter())\n\n # make sure target pose is up to date\n pos_r, orn_r, pos_l, orn_l = self.ik_robot_eef_joint_cartesian_pose()\n\n self.ik_robot_target_pos_right = pos_r\n self.ik_robot_target_orn_right = orn_r\n self.ik_robot_target_pos_left = pos_l\n self.ik_robot_target_orn_left = orn_l\n\n def setup_inverse_kinematics(self, urdf_path):\n \"\"\"\n This function is responsible for doing any setup for inverse kinematics.\n Inverse Kinematics maps end effector (EEF) poses to joint angles that\n are necessary to achieve those poses.\n \"\"\"\n\n # These indices come from the urdf file we're using\n self.effector_right = 27\n self.effector_left = 45\n\n # Use PyBullet to handle inverse kinematics.\n # Set up a connection to the PyBullet simulator.\n p.connect(p.DIRECT)\n p.resetSimulation()\n\n self.ik_robot = p.loadURDF(urdf_path, (0, 0, 0), useFixedBase=1)\n\n # Relevant joints we care about. Many of the joints are fixed and don't count, so\n # we need this second map to use the right ones.\n self.actual = [13, 14, 15, 16, 17, 19, 20, 31, 32, 33, 34, 35, 37, 38]\n\n self.num_joints = p.getNumJoints(self.ik_robot)\n n = p.getNumJoints(self.ik_robot)\n self.rest = []\n self.lower = []\n self.upper = []\n self.ranges = []\n for i in range(n):\n info = p.getJointInfo(self.ik_robot, i)\n # Retrieve lower and upper ranges for each relevant joint\n if info[3] > -1:\n self.rest.append(p.getJointState(self.ik_robot, i)[0])\n self.lower.append(info[8])\n self.upper.append(info[9])\n self.ranges.append(info[9] - info[8])\n\n # Simulation will update as fast as it can in real time, instead of waiting for\n # step commands like in the non-realtime case.\n p.setRealTimeSimulation(1)\n\n def sync_ik_robot(self, joint_positions, simulate=False, sync_last=True):\n \"\"\"\n Force the internal robot model to match the provided joint angles.\n\n Args:\n joint_positions (list): a list or flat numpy array of joint positions.\n simulate (bool): If True, actually use physics simulation, else\n write to physics state directly.\n sync_last (bool): If False, don't sync the last joint angle. This\n is useful for directly controlling the roll at the end effector.\n \"\"\"\n num_joints = len(joint_positions)\n if not sync_last:\n num_joints -= 1\n for i in range(num_joints):\n if simulate:\n p.setJointMotorControl2(\n self.ik_robot,\n self.actual[i],\n p.POSITION_CONTROL,\n targetVelocity=0,\n targetPosition=joint_positions[i],\n force=500,\n positionGain=0.5,\n velocityGain=1.,\n )\n else:\n # Note that we use self.actual[i], and not i\n p.resetJointState(self.ik_robot, self.actual[i], joint_positions[i])\n\n def ik_robot_eef_joint_cartesian_pose(self):\n \"\"\"\n Returns the current cartesian pose of the last joint of the ik robot with respect\n to the base frame as a (pos, orn) tuple where orn is a x-y-z-w quaternion.\n \"\"\"\n out = []\n for eff in [self.effector_right, self.effector_left]:\n eef_pos_in_world = np.array(p.getLinkState(self.ik_robot, eff)[0])\n eef_orn_in_world = np.array(p.getLinkState(self.ik_robot, eff)[1])\n eef_pose_in_world = T.pose2mat((eef_pos_in_world, eef_orn_in_world))\n\n base_pos_in_world = np.array(\n p.getBasePositionAndOrientation(self.ik_robot)[0]\n )\n base_orn_in_world = np.array(\n p.getBasePositionAndOrientation(self.ik_robot)[1]\n )\n base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))\n world_pose_in_base = T.pose_inv(base_pose_in_world)\n\n eef_pose_in_base = T.pose_in_A_to_pose_in_B(\n pose_A=eef_pose_in_world, pose_A_in_B=world_pose_in_base\n )\n out.extend(T.mat2pose(eef_pose_in_base))\n\n return out\n\n def inverse_kinematics(\n self,\n target_position_right,\n target_orientation_right,\n target_position_left,\n target_orientation_left,\n rest_poses,\n ):\n \"\"\"\n Helper function to do inverse kinematics for a given target position and\n orientation in the PyBullet world frame.\n\n Args:\n target_position_{right, left}: A tuple, list, or numpy array of size 3 for position.\n target_orientation_{right, left}: A tuple, list, or numpy array of size 4 for\n a orientation quaternion.\n rest_poses: A list of size @num_joints to favor ik solutions close by.\n\n Returns:\n A list of size @num_joints corresponding to the joint angle solution.\n \"\"\"\n\n ndof = 48\n\n ik_solution = list(\n p.calculateInverseKinematics(\n self.ik_robot,\n self.effector_right,\n target_position_right,\n targetOrientation=target_orientation_right,\n restPoses=rest_poses[:7],\n lowerLimits=self.lower,\n upperLimits=self.upper,\n jointRanges=self.ranges,\n jointDamping=[0.7] * ndof,\n )\n )\n ik_solution2 = list(\n p.calculateInverseKinematics(\n self.ik_robot,\n self.effector_left,\n target_position_left,\n targetOrientation=target_orientation_left,\n restPoses=rest_poses[7:],\n lowerLimits=self.lower,\n upperLimits=self.upper,\n jointRanges=self.ranges,\n jointDamping=[0.7] * ndof,\n )\n )\n for i in range(8, 15):\n ik_solution[i] = ik_solution2[i]\n\n return ik_solution[1:]\n\n def bullet_base_pose_to_world_pose(self, pose_in_base):\n \"\"\"\n Convert a pose in the base frame to a pose in the world frame.\n\n Args:\n pose_in_base: a (pos, orn) tuple.\n\n Returns:\n pose_in world: a (pos, orn) tuple.\n \"\"\"\n pose_in_base = T.pose2mat(pose_in_base)\n\n base_pos_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[0])\n base_orn_in_world = np.array(p.getBasePositionAndOrientation(self.ik_robot)[1])\n base_pose_in_world = T.pose2mat((base_pos_in_world, base_orn_in_world))\n\n pose_in_world = T.pose_in_A_to_pose_in_B(\n pose_A=pose_in_base, pose_A_in_B=base_pose_in_world\n )\n return T.mat2pose(pose_in_world)\n\n def joint_positions_for_eef_command(self, right, left):\n \"\"\"\n This function runs inverse kinematics to back out target joint positions\n from the provided end effector command.\n\n Same arguments as @get_control.\n\n Returns:\n A list of size @num_joints corresponding to the target joint angles.\n \"\"\"\n\n dpos_right = right[\"dpos\"]\n dpos_left = left[\"dpos\"]\n self.target_pos_right = self.ik_robot_target_pos_right + np.array([0, 0, 0.913])\n self.target_pos_left = self.ik_robot_target_pos_left + np.array([0, 0, 0.913])\n self.ik_robot_target_pos_right += dpos_right * self.user_sensitivity\n self.ik_robot_target_pos_left += dpos_left * self.user_sensitivity\n\n rotation_right = right[\"rotation\"]\n rotation_left = left[\"rotation\"]\n self.ik_robot_target_orn_right = T.mat2quat(rotation_right)\n self.ik_robot_target_orn_left = T.mat2quat(rotation_left)\n\n # convert from target pose in base frame to target pose in bullet world frame\n world_targets_right = self.bullet_base_pose_to_world_pose(\n (self.ik_robot_target_pos_right, self.ik_robot_target_orn_right)\n )\n world_targets_left = self.bullet_base_pose_to_world_pose(\n (self.ik_robot_target_pos_left, self.ik_robot_target_orn_left)\n )\n\n # Empirically, more iterations aren't needed, and it's faster\n # for _ in range(5):\n for _ in range(20):\n arm_joint_pos = self.inverse_kinematics(\n world_targets_right[0],\n world_targets_right[1],\n world_targets_left[0],\n world_targets_left[1],\n rest_poses=self.robot_jpos_getter(),\n )\n self.sync_ik_robot(arm_joint_pos, sync_last=True)\n\n return arm_joint_pos\n\n def _get_current_error(self, current, set_point):\n \"\"\"\n Returns an array of differences between the desired joint positions and current\n joint positions. Useful for PID control.\n\n Args:\n current: the current joint positions.\n set_point: the joint positions that are desired as a numpy array.\n\n Returns:\n the current error in the joint positions.\n \"\"\"\n error = current - set_point\n return error\n\n def clip_joint_velocities(self, velocities):\n \"\"\"\n Clips joint velocities into a valid range.\n \"\"\"\n for i in range(len(velocities)):\n if velocities[i] >= 1.0:\n velocities[i] = 1.0\n elif velocities[i] <= -1.0:\n velocities[i] = -1.0\n return velocities\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
matthew-z/MatchZoo-py
[ "014e33d7c4ae05dc5296ebd1d600baf7e9cb9dec" ]
[ "matchzoo/engine/base_model.py" ]
[ "\"\"\"Base Model.\"\"\"\n\nimport abc\nimport typing\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom matchzoo import preprocessors\nfrom matchzoo import tasks\nfrom matchzoo.dataloader import callbacks\nfrom matchzoo.engine import hyper_spaces\nfrom matchzoo.engine.base_callback import BaseCallback\nfrom matchzoo.engine.base_preprocessor import BasePreprocessor\nfrom matchzoo.engine.param import Param\nfrom matchzoo.engine.param_table import ParamTable\nfrom matchzoo.utils import parse_activation\n\n\nclass BaseModel(nn.Module, abc.ABC):\n \"\"\"\n Abstract base class of all MatchZoo models.\n\n MatchZoo models are wrapped over pytorch models. `params` is a set of model\n hyper-parameters that deterministically builds a model. In other words,\n `params['model_class'](params=params)` of the same `params` always create\n models with the same structure.\n\n :param params: Model hyper-parameters. (default: return value from\n :meth:`get_default_params`)\n\n Example:\n >>> BaseModel() # doctest: +ELLIPSIS\n Traceback (most recent call last):\n ...\n TypeError: Can't instantiate abstract class BaseModel ...\n >>> class MyModel(BaseModel):\n ... def build(self):\n ... pass\n ... def forward(self):\n ... pass\n >>> isinstance(MyModel(), BaseModel)\n True\n\n \"\"\"\n\n def __init__(\n self,\n params: typing.Optional[ParamTable] = None\n ):\n \"\"\"Init.\"\"\"\n super().__init__()\n self._params = params or self.get_default_params()\n\n @classmethod\n def get_default_params(\n cls,\n with_embedding=False,\n with_multi_layer_perceptron=False\n ) -> ParamTable:\n \"\"\"\n Model default parameters.\n\n The common usage is to instantiate :class:`matchzoo.engine.ModelParams`\n first, then set the model specific parametrs.\n\n Examples:\n >>> class MyModel(BaseModel):\n ... def build(self):\n ... print(self._params['num_eggs'], 'eggs')\n ... print('and', self._params['ham_type'])\n ... def forward(self, greeting):\n ... print(greeting)\n ...\n ... @classmethod\n ... def get_default_params(cls):\n ... params = ParamTable()\n ... params.add(Param('num_eggs', 512))\n ... params.add(Param('ham_type', 'Parma Ham'))\n ... return params\n >>> my_model = MyModel()\n >>> my_model.build()\n 512 eggs\n and Parma Ham\n >>> my_model('Hello MatchZoo!')\n Hello MatchZoo!\n\n Notice that all parameters must be serialisable for the entire model\n to be serialisable. Therefore, it's strongly recommended to use python\n native data types to store parameters.\n\n :return: model parameters\n\n \"\"\"\n params = ParamTable()\n params.add(Param(\n name='model_class', value=cls,\n desc=\"Model class. Used internally for save/load. \"\n \"Changing this may cause unexpected behaviors.\"\n ))\n params.add(Param(\n name='task',\n desc=\"Decides model output shape, loss, and metrics.\"\n ))\n params.add(Param(\n name='out_activation_func', value=None,\n desc=\"Activation function used in output layer.\"\n ))\n if with_embedding:\n params.add(Param(\n name='with_embedding', value=True,\n desc=\"A flag used help `auto` module. Shouldn't be changed.\"\n ))\n params.add(Param(\n name='embedding',\n desc='FloatTensor containing weights for the Embedding.',\n validator=lambda x: isinstance(x, np.ndarray)\n ))\n params.add(Param(\n name='embedding_input_dim',\n desc='Usually equals vocab size + 1. Should be set manually.'\n ))\n params.add(Param(\n name='embedding_output_dim',\n desc='Should be set manually.'\n ))\n params.add(Param(\n name='padding_idx', value=0,\n desc='If given, pads the output with the embedding vector at'\n 'padding_idx (initialized to zeros) whenever it encounters'\n 'the index.'\n ))\n params.add(Param(\n name='embedding_freeze', value=False,\n desc='`True` to freeze embedding layer training, '\n '`False` to enable embedding parameters.'\n ))\n if with_multi_layer_perceptron:\n params.add(Param(\n name='with_multi_layer_perceptron', value=True,\n desc=\"A flag of whether a multiple layer perceptron is used. \"\n \"Shouldn't be changed.\"\n ))\n params.add(Param(\n name='mlp_num_units', value=128,\n desc=\"Number of units in first `mlp_num_layers` layers.\",\n hyper_space=hyper_spaces.quniform(8, 256, 8)\n ))\n params.add(Param(\n name='mlp_num_layers', value=3,\n desc=\"Number of layers of the multiple layer percetron.\",\n hyper_space=hyper_spaces.quniform(1, 6)\n ))\n params.add(Param(\n name='mlp_num_fan_out', value=64,\n desc=\"Number of units of the layer that connects the multiple \"\n \"layer percetron and the output.\",\n hyper_space=hyper_spaces.quniform(4, 128, 4)\n ))\n params.add(Param(\n name='mlp_activation_func', value='relu',\n desc='Activation function used in the multiple '\n 'layer perceptron.'\n ))\n return params\n\n def guess_and_fill_missing_params(self, verbose=1):\n \"\"\"\n Guess and fill missing parameters in :attr:`params`.\n\n Use this method to automatically fill-in other hyper parameters.\n This involves some guessing so the parameter it fills could be\n wrong. For example, the default task is `Ranking`, and if we do not\n set it to `Classification` manaully for data packs prepared for\n classification, then the shape of the model output and the data will\n mismatch.\n\n :param verbose: Verbosity.\n \"\"\"\n self._params.get('task').set_default(tasks.Ranking(), verbose)\n if 'with_embedding' in self._params:\n self._params.get('embedding_input_dim').set_default(300, verbose)\n self._params.get('embedding_output_dim').set_default(300, verbose)\n\n def _set_param_default(self, name: str,\n default_val: str, verbose: int = 0):\n if self._params[name] is None:\n self._params[name] = default_val\n if verbose:\n print(f\"Parameter \\\"{name}\\\" set to {default_val}.\")\n\n @classmethod\n def get_default_preprocessor(\n cls,\n truncated_mode: str = 'pre',\n truncated_length_left: typing.Optional[int] = None,\n truncated_length_right: typing.Optional[int] = None,\n filter_mode: str = 'df',\n filter_low_freq: float = 1,\n filter_high_freq: float = float('inf'),\n remove_stop_words: bool = False,\n ngram_size: typing.Optional[int] = None,\n **kwargs\n ) -> BasePreprocessor:\n \"\"\"\n Model default preprocessor.\n\n The preprocessor's transform should produce a correctly shaped data\n pack that can be used for training.\n\n :return: Default preprocessor.\n \"\"\"\n return preprocessors.BasicPreprocessor(\n truncated_mode=truncated_mode,\n truncated_length_left=truncated_length_left,\n truncated_length_right=truncated_length_right,\n filter_mode=filter_mode,\n filter_low_freq=filter_low_freq,\n filter_high_freq=filter_high_freq,\n remove_stop_words=remove_stop_words,\n ngram_size=ngram_size,\n **kwargs\n )\n\n @classmethod\n def get_default_padding_callback(\n cls,\n fixed_length_left: int = None,\n fixed_length_right: int = None,\n pad_word_value: typing.Union[int, str] = 0,\n pad_word_mode: str = 'post',\n with_ngram: bool = False,\n fixed_ngram_length: int = None,\n pad_ngram_value: typing.Union[int, str] = 0,\n pad_ngram_mode: str = 'post'\n ) -> BaseCallback:\n \"\"\"\n Model default padding callback.\n\n The padding callback's on_batch_unpacked would pad a batch of data to\n a fixed length.\n\n :return: Default padding callback.\n \"\"\"\n return callbacks.BasicPadding(\n fixed_length_left=fixed_length_left,\n fixed_length_right=fixed_length_right,\n pad_word_value=pad_word_value,\n pad_word_mode=pad_word_mode,\n with_ngram=with_ngram,\n fixed_ngram_length=fixed_ngram_length,\n pad_ngram_value=pad_ngram_value,\n pad_ngram_mode=pad_ngram_mode\n )\n\n @property\n def params(self) -> ParamTable:\n \"\"\":return: model parameters.\"\"\"\n return self._params\n\n @params.setter\n def params(self, val):\n self._params = val\n\n @abc.abstractmethod\n def build(self):\n \"\"\"Build model, each subclass need to implement this method.\"\"\"\n raise NotImplementedError(\n \"Build method not implemented in the subclass.\"\n )\n\n @abc.abstractmethod\n def forward(self, *input):\n \"\"\"\n Defines the computation performed at every call.\n\n Should be overridden by all subclasses.\n \"\"\"\n raise NotImplementedError(\n \"Forward method not implemented in the subclass.\"\n )\n\n def _make_embedding_layer(\n self,\n num_embeddings: int = 0,\n embedding_dim: int = 0,\n freeze: bool = True,\n embedding: typing.Optional[np.ndarray] = None,\n **kwargs\n ) -> nn.Module:\n \"\"\":return: an embedding module.\"\"\"\n if isinstance(embedding, np.ndarray):\n return nn.Embedding.from_pretrained(\n embeddings=torch.Tensor(embedding),\n freeze=freeze\n )\n else:\n return nn.Embedding(\n num_embeddings=num_embeddings,\n embedding_dim=embedding_dim\n )\n\n def _make_default_embedding_layer(\n self,\n **kwargs\n ) -> nn.Module:\n \"\"\":return: an embedding module.\"\"\"\n if isinstance(self._params['embedding'], np.ndarray):\n self._params['embedding_input_dim'] = (\n self._params['embedding'].shape[0]\n )\n self._params['embedding_output_dim'] = (\n self._params['embedding'].shape[1]\n )\n return nn.Embedding.from_pretrained(\n embeddings=torch.Tensor(self._params['embedding']),\n freeze=self._params['embedding_freeze'],\n padding_idx=self._params['padding_idx']\n )\n else:\n return nn.Embedding(\n num_embeddings=self._params['embedding_input_dim'],\n embedding_dim=self._params['embedding_output_dim'],\n padding_idx=self._params['padding_idx']\n )\n\n def _make_output_layer(\n self,\n in_features: int = 0\n ) -> nn.Module:\n \"\"\":return: a correctly shaped torch module for model output.\"\"\"\n task = self._params['task']\n if isinstance(task, tasks.Classification):\n out_features = task.num_classes\n elif isinstance(task, tasks.Ranking):\n out_features = 1\n else:\n raise ValueError(f\"{task} is not a valid task type. \"\n f\"Must be in `Ranking` and `Classification`.\")\n if self._params['out_activation_func']:\n return nn.Sequential(\n nn.Linear(in_features, out_features),\n parse_activation(self._params['out_activation_func'])\n )\n else:\n return nn.Linear(in_features, out_features)\n\n def _make_perceptron_layer(\n self,\n in_features: int = 0,\n out_features: int = 0,\n activation: nn.Module = nn.ReLU()\n ) -> nn.Module:\n \"\"\":return: a perceptron layer.\"\"\"\n return nn.Sequential(\n nn.Linear(in_features, out_features),\n activation\n )\n\n def _make_multi_layer_perceptron_layer(self, in_features) -> nn.Module:\n \"\"\":return: a multiple layer perceptron.\"\"\"\n if not self._params['with_multi_layer_perceptron']:\n raise AttributeError(\n 'Parameter `with_multi_layer_perception` not set.')\n\n activation = parse_activation(self._params['mlp_activation_func'])\n mlp_sizes = [\n in_features,\n *self._params['mlp_num_layers'] * [self._params['mlp_num_units']],\n self._params['mlp_num_fan_out']\n ]\n mlp = [\n self._make_perceptron_layer(in_f, out_f, activation)\n for in_f, out_f in zip(mlp_sizes, mlp_sizes[1:])\n ]\n return nn.Sequential(*mlp)\n" ]
[ [ "torch.nn.Sequential", "torch.Tensor", "torch.nn.Embedding", "torch.nn.Linear", "torch.nn.ReLU" ] ]
YanickT/astonomische-methoden
[ "e1c35265f64ed1bb6310ba89f975f45b2c42a9cb" ]
[ "tf_versions/main.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom astroquery.sdss import SDSS\nfrom network import Network\n\n\ndef get_data(class_, n_train=1000, n_test=1000):\n \"\"\"\n Load data for training and evaluation.\n :param class_: str = Type of astronomical objects\n :param n_train: int = Number of traingings data\n :return: np.array[n_train, 5], np.array[n_train], np.array[n_test, 5], np.array[n_test] =\n input-training, output-training, input-test, output-test\n \"\"\"\n query = f\"\"\"SELECT top {n_train + n_test} specobj.z, specobj.ra, PhotoObj.u, PhotoObj.g, PhotoObj.r, PhotoObj.i, PhotoObj.z \n FROM specObj JOIN PhotoObj ON specObj.bestObjID = PhotoObj.objid \n WHERE class = '{class_}' AND specobj.z > -0.1 AND NOT bestObjID = 0 AND zWarning = 0 ORDER BY ra\"\"\"\n data = np.array([list(tup) for tup in np.array(SDSS.query_sql(query)).tolist()])\n outputs = data[:, 0]\n inputs = data[:, 2:]\n\n train_out = outputs[:n_train]\n train_ins = inputs[:n_train, :]\n test_out = outputs[n_train:]\n test_ins = inputs[n_train:, :]\n return train_ins, train_out, test_ins, test_out\n\n\ndef redshift_error_plot(net, test_in, test_out):\n \"\"\"\n Create plot of the uncertainty over the redshift.\n :param net: Network = Network to use\n :param test_in: np.array[5, n] = List of input data to test\n :param test_out: np.array[n] = List of output data to test\n :return: void\n \"\"\"\n preds = net.predict(test_in).flatten()\n diff = np.abs(preds - test_out)\n plt.plot(test_out, np.array(diff) / np.array(test_out), \".\")\n plt.ticklabel_format(style='plain')\n plt.xlabel(\"redshift\")\n plt.ylabel(\"rel error\")\n plt.show()\n\n\n# only galaxy\nif False:\n train_in, train_out, test_in, test_out = get_data(\"GALAXY\", 10000, 3000)\n net = Network()\n history = net.train(train_in, train_out, val_in=test_in, val_out=test_out, epochs=60, verbose=0)\n net.model.save(\"galaxies.h5\")\n\n preds = net.predict(test_in)\n plt.plot(test_out, preds.flatten(), \"x\")\n plt.xlabel(\"redshift\")\n plt.ylabel(\"predicted redshift\")\n plt.show()\n\n # plot loss of test data\n val_loss = history.history[\"val_loss\"]\n plt.plot(list(range(len(val_loss))), val_loss)\n plt.xlabel(\"epochs\")\n plt.ylabel(\"validation loss\")\n plt.show()\n\n # plot loss of test data over redshift\n redshift_error_plot(net, test_in, test_out)\n\n\n# only quasar\nif True:\n \"\"\"\n Use with regularization in Network\n \"\"\"\n train_in, train_out, test_in, test_out = get_data(\"QSO\", 10000, 3000)\n net = Network()\n history = net.train(train_in, train_out, val_in=test_in, val_out=test_out, epochs=60, verbose=0)\n net.model.save(\"quasar.h5\")\n\n preds = net.predict(test_in)\n plt.plot(test_out, preds.flatten(), \"x\")\n plt.xlabel(\"redshift\")\n plt.ylabel(\"predicted redshift\")\n plt.show()\n\n # plot loss of test data\n val_loss = history.history[\"val_loss\"]\n plt.plot(list(range(len(val_loss))), val_loss)\n plt.xlabel(\"epochs\")\n plt.ylabel(\"validation loss\")\n plt.show()\n\n # plot loss of test data over redshift\n redshift_error_plot(net, test_in, test_out)\n\n" ]
[ [ "numpy.abs", "matplotlib.pyplot.ticklabel_format", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
carsondahlberg/Kashgari
[ "7111d2d2959f234c05a0db06073c6142f826a8f7" ]
[ "kashgari/tasks/seq_labeling/base_model.py" ]
[ "# encoding: utf-8\n\"\"\"\n@author: BrikerMan\n@contact: eliyar917@gmail.com\n@blog: https://eliyar.biz\n\n@version: 1.0\n@license: Apache Licence\n@file: base_model\n@time: 2019-01-21\n\n\"\"\"\nimport os\nimport random\nimport json\nimport pathlib\nimport logging\nfrom typing import Tuple, Dict\n\nimport numpy as np\nimport keras\nfrom keras.models import Model\nfrom keras.preprocessing import sequence\nfrom keras.utils import to_categorical\nfrom seqeval.metrics import f1_score, classification_report, recall_score\n\nimport kashgari.macros as k\nfrom kashgari.utils import helper\nfrom kashgari.embeddings import CustomEmbedding, BaseEmbedding\nfrom kashgari.type_hints import *\n\nfrom kashgari.utils.crf import CRF, crf_loss\nfrom kashgari.tasks.base import BaseModel\n\n\nclass SequenceLabelingModel(BaseModel):\n\n @property\n def label2idx(self) -> Dict[str, int]:\n return self._label2idx\n\n @property\n def token2idx(self) -> Dict[str, int]:\n return self.embedding.token2idx\n\n @label2idx.setter\n def label2idx(self, value):\n self._label2idx = value\n self._idx2label = dict([(val, key) for (key, val) in value.items()])\n\n def build_model(self, loss_f=None, optimizer=None, metrics=None, **kwargs):\n \"\"\"\n build model function\n :return:\n \"\"\"\n raise NotImplementedError()\n\n def build_token2id_label2id_dict(self,\n x_train: List[List[str]],\n y_train: List[List[str]],\n x_validate: List[List[str]] = None,\n y_validate: List[List[str]] = None):\n x_data = x_train\n y_data = y_train\n if x_validate:\n x_data = x_train + x_validate\n y_data = y_data + y_validate\n self.embedding.build_token2idx_dict(x_data, 3)\n\n label_set = []\n for seq in y_data:\n for y in seq:\n if y not in label_set:\n label_set.append(y)\n\n label2idx = {\n k.PAD: 0,\n k.BOS: 1,\n k.EOS: 2\n }\n label_set = [i for i in label_set if i not in label2idx]\n for label in label_set:\n label2idx[label] = len(label2idx)\n\n self.label2idx = label2idx\n\n def convert_labels_to_idx(self,\n label: Union[List[List[str]], List[str]],\n add_eos_bos: bool = True) -> Union[List[List[int]], List[int]]:\n\n def tokenize_tokens(seq: List[str]):\n tokens = [self._label2idx[i] for i in seq]\n if add_eos_bos:\n tokens = [self._label2idx[k.BOS]] + tokens + [self._label2idx[k.EOS]]\n return tokens\n\n if isinstance(label[0], str):\n return tokenize_tokens(label)\n else:\n return [tokenize_tokens(l) for l in label]\n\n def convert_idx_to_labels(self,\n idx: Union[List[List[int]], List[int]],\n tokens_length: Union[List[int], int],\n remove_eos_bos: bool = True) -> Union[List[str], str]:\n\n def reverse_tokenize_tokens(idx_item, seq_length):\n if remove_eos_bos:\n seq = idx_item[1: 1 + seq_length]\n else:\n seq = idx_item\n tokens = [self._idx2label[i] for i in seq]\n return tokens\n\n if isinstance(idx[0], int):\n return reverse_tokenize_tokens(idx, tokens_length)\n else:\n labels = []\n for index in range(len(idx)):\n idx_item = idx[index]\n seq_length = tokens_length[index]\n labels.append(reverse_tokenize_tokens(idx_item, seq_length))\n return labels\n\n def get_data_generator(self,\n x_data: List[List[str]],\n y_data: List[List[str]],\n batch_size: int = 64,\n is_bert: bool = False):\n while True:\n page_list = list(range(len(x_data) // batch_size + 1))\n random.shuffle(page_list)\n for page in page_list:\n start_index = page * batch_size\n end_index = start_index + batch_size\n target_x = x_data[start_index: end_index]\n target_y = y_data[start_index: end_index]\n if len(target_x) == 0:\n target_x = x_data[0: batch_size]\n target_y = y_data[0: batch_size]\n\n tokenized_x = self.embedding.tokenize(target_x)\n tokenized_y = self.convert_labels_to_idx(target_y)\n\n padded_x = sequence.pad_sequences(tokenized_x,\n maxlen=self.embedding.sequence_length,\n padding='post')\n padded_y = sequence.pad_sequences(tokenized_y,\n maxlen=self.embedding.sequence_length,\n padding='post')\n\n one_hot_y = to_categorical(padded_y, num_classes=len(self.label2idx))\n\n if is_bert:\n padded_x_seg = np.zeros(shape=(len(padded_x), self.embedding.sequence_length))\n x_input_data = [padded_x, padded_x_seg]\n else:\n x_input_data = padded_x\n yield (x_input_data, one_hot_y)\n\n def fit(self,\n x_train: List[List[str]],\n y_train: List[List[str]],\n x_validate: List[List[str]] = None,\n y_validate: List[List[str]] = None,\n batch_size: int = 64,\n epochs: int = 5,\n labels_weight: bool = None,\n default_labels_weight: float = 50.0,\n fit_kwargs: Dict = None,\n **kwargs):\n \"\"\"\n\n :param x_train: list of training data.\n :param y_train: list of training target label data.\n :param batch_size: batch size for trainer model\n :param epochs: Number of epochs to train the model.\n :param x_validate: list of validation data.\n :param y_validate: list of validation target label data.\n :param y_validate: list of validation target label data.\n :param y_validate: list of validation target label data.\n :param labels_weight: set class weights for imbalanced classes\n :param default_labels_weight: default weight for labels not in labels_weight dict\n :param fit_kwargs: additional kwargs to be passed to\n :func:`~keras.models.Model.fit`\n :return:\n \"\"\"\n assert len(x_train) == len(y_train)\n self.build_token2id_label2id_dict(x_train, y_train, x_validate, y_validate)\n\n if len(x_train) < batch_size:\n batch_size = len(x_train) // 2\n\n if not self.model:\n if self.embedding.sequence_length == 0:\n self.embedding.sequence_length = sorted([len(x) for x in x_train])[int(0.95 * len(x_train))]\n logging.info('sequence length set to {}'.format(self.embedding.sequence_length))\n\n if labels_weight:\n weights = []\n initial_weights = {\n k.PAD: 1,\n k.BOS: 1,\n k.EOS: 1,\n 'O': 1\n }\n for label in self.label2idx.keys():\n weights.append(initial_weights.get(label, default_labels_weight))\n loss_f = helper.weighted_categorical_crossentropy(np.array(weights))\n self.model_info['loss'] = {\n 'func': 'weighted_categorical_crossentropy',\n 'weights': weights\n }\n\n self.build_model(loss_f=loss_f, metrics=['categorical_accuracy', 'acc'])\n else:\n self.build_model()\n\n train_generator = self.get_data_generator(x_train,\n y_train,\n batch_size,\n is_bert=self.embedding.is_bert)\n\n if fit_kwargs is None:\n fit_kwargs = {}\n\n if x_validate:\n validation_generator = self.get_data_generator(x_validate,\n y_validate,\n batch_size,\n is_bert=self.embedding.is_bert)\n\n fit_kwargs['validation_data'] = validation_generator\n fit_kwargs['validation_steps'] = len(x_validate) // batch_size\n\n self.model.fit_generator(train_generator,\n steps_per_epoch=len(x_train) // batch_size,\n epochs=epochs,\n **fit_kwargs)\n\n def predict(self, sentence: Union[List[str], List[List[str]]], batch_size=None):\n tokens = self.embedding.tokenize(sentence)\n is_list = not isinstance(sentence[0], str)\n if is_list:\n seq_length = [len(item) for item in sentence]\n padded_tokens = sequence.pad_sequences(tokens,\n maxlen=self.embedding.sequence_length,\n padding='post')\n else:\n seq_length = [len(sentence)]\n padded_tokens = sequence.pad_sequences([tokens],\n maxlen=self.embedding.sequence_length,\n padding='post')\n if self.embedding.is_bert:\n x = [padded_tokens, np.zeros(shape=(len(padded_tokens), self.embedding.sequence_length))]\n else:\n x = padded_tokens\n predict_result = self.model.predict(x, batch_size=batch_size).argmax(-1)\n labels = self.convert_idx_to_labels(predict_result, seq_length)\n\n if is_list:\n return labels\n else:\n return labels[0]\n\n def evaluate(self, x_data, y_data, batch_size=None) -> Tuple[float, float, Dict]:\n y_pred = self.predict(x_data, batch_size=batch_size)\n report = classification_report(y_data, y_pred)\n print(classification_report(y_data, y_pred))\n return report\n" ]
[ [ "numpy.array" ] ]
dantegd/dask-cuml
[ "f69539c5bc88963824d51e5cc558512a00c62a48" ]
[ "dask_cuml/neighbors/nearest_neighbors.py" ]
[ "# Copyright (c) 2019, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom dask_cuml.core import *\n\nimport dask\n\nfrom cuml.neighbors import NearestNeighbors as cumlKNN\n\nimport logging\n\nimport random\n\nfrom cuml import numba_utils\n\nimport itertools\n\n\nfrom dask.distributed import get_worker, get_client, Client\n\nfrom dask import delayed\nfrom collections import defaultdict\nfrom dask.distributed import wait, default_client\nimport dask.dataframe as dd\nimport dask.array as da\n\n\nfrom tornado import gen\nimport dask_cudf, cudf\n\nimport logging\n\nimport os\nimport time\n\nimport numpy as np\n\nfrom toolz import first, assoc\n\nimport numba.cuda\n\n\ndef to_gpu_matrix(df):\n \"\"\"\n Turn input cudf into a Numba array. Returns device\n of current worker and the start/stop indexes\n of the cudf.\n :param df:\n :return:\n \"\"\"\n\n try:\n start_idx = df.index[0]\n stop_idx = df.index[-1]\n gpu_matrix = numba_utils.row_matrix(df)\n dev = device_of_devicendarray(gpu_matrix)\n return dev, gpu_matrix, (start_idx, stop_idx)\n\n except Exception as e:\n import traceback\n logging.error(\"Error in to_gpu_matrix(\" + str(e))\n traceback.print_exc()\n pass\n\n\ndef build_alloc_info(data):\n \"\"\"\n Use the __cuda_array_interface__ to extract cpointer\n information for passing into cython.\n :param data:\n :return:\n \"\"\"\n dev, gpu_matrix, _ = data\n return gpu_matrix.__cuda_array_interface__\n\n\ndef get_ipc_handle(data):\n \"\"\"\n Extract IPC handles from input Numba array. Pass\n along the device of the current worker and the\n start/stop indices from the original cudf.\n :param data:\n :return:\n \"\"\"\n dev, gpu_matrix, idx = data\n\n try:\n in_handle = gpu_matrix.get_ipc_handle()\n return dev, in_handle, idx\n except Exception as e:\n import traceback\n logging.error(\"Error in get_ipc_handles(dev=\" + str(dev) + \"): \" + str(e))\n traceback.print_exc()\n pass\n\n\n# Run on a single worker on each unique host\ndef _fit_on_worker(data, params):\n \"\"\"\n For SPMG kNN, this is the function that runs on a\n chosen worker, collects all the IPC handles from\n the other workers, and makes the cython call to\n the cuML kNN multiGPU fit() function.\n :param data:\n :param params:\n :return:\n \"\"\"\n\n ipcs, raw_arrs = data\n\n # Separate threads to hold pointers to separate devices\n # The order in which we pass the list of IPCs to the thread matters and the goal is\n # to maximize reuse while minimizing the number of threads. We want to limit the\n # number of threads to O(len(devices)) and want to avoid having if be O(len(ipcs))\n # at all costs!\n device_handle_map = defaultdict(list)\n [device_handle_map[dev].append((idx, ipc)) for dev, ipc, idx in ipcs]\n\n open_ipcs = [([i[0] for i in ipcs], new_ipc_thread([i[1] for i in ipcs], dev))\n for dev, ipcs in device_handle_map.items()]\n\n alloc_info = []\n for idxs, t in open_ipcs:\n inf = t.info()\n for i in range(len(idxs)):\n alloc_info.append([idxs[i], inf[i]])\n\n alloc_info.extend([[t[2], build_alloc_info(t)] for t in raw_arrs])\n alloc_info.sort(key = lambda x: x[0][0])\n\n final_allocs = [a for i, a in alloc_info]\n\n m = cumlKNN(should_downcast = params[\"should_downcast\"])\n m._fit_mg(params[\"D\"], final_allocs)\n\n [t[1].close() for t in open_ipcs]\n [t[1].join() for t in open_ipcs]\n\n return m\n\n\ndef _kneighbors_on_worker(data, m, params):\n \"\"\"\n For SPMG kNN, this is the function that runs on\n a chosen worker, collects all the IPC handles from\n the other workers, and makes the cython call to\n the cuML kNN multiGPU kneighbors() function.\n :param data:\n :param m:\n :param params:\n :return:\n \"\"\"\n\n ipc_dev_list, devarrs_dev_list = data\n\n device_handle_map = defaultdict(list)\n\n ipc_dev_list = list(filter(None, ipc_dev_list))\n devarrs_dev_list = list(filter(None, devarrs_dev_list))\n\n # Each ipc contains X, I, D handles\n [device_handle_map[dev].append((idx, ipc)) for ipc, dev, idx in ipc_dev_list]\n\n def collect_ipcs(ipcs):\n \"\"\"\n A simple helper function to flat map a deeply\n nested list of ipc handles.\n :param ipcs:\n :return:\n \"\"\"\n final = []\n for ipc in ipcs:\n for i in ipc:\n for j in i:\n final.append(j)\n\n return final\n\n open_ipcs = [([i[0] for i in idx_ipcs], new_ipc_thread(collect_ipcs([i[1] for i in idx_ipcs]), dev))\n for dev, idx_ipcs in device_handle_map.items()]\n\n alloc_info = []\n for idxs, t in open_ipcs:\n inf = t.info()\n for i in range(len(idxs)):\n alloc_info.append((idxs[i], inf[i*3:(i*3)+3]))\n\n for p, dev, idx in devarrs_dev_list:\n for X, inds, dists in p:\n alloc_info.extend([(idx, [build_alloc_info((dev, X, idx)),\n build_alloc_info((dev, inds, idx)),\n build_alloc_info((dev, dists, idx))])])\n\n alloc_info.sort(key = lambda x: x[0][0])\n\n for idx, allocs in alloc_info:\n X, inds, dists = allocs\n m._kneighbors(X[\"data\"][0], X[\"shape\"][0], params[\"k\"], inds[\"data\"][0], dists[\"data\"][0])\n\n [t.close() for idx, t in open_ipcs]\n [t.join() for idx, t in open_ipcs]\n\n return data\n\n\ndef input_to_device_arrays(X, params):\n \"\"\"\n Create output arrays and return them w/ the input array(s)\n :param arr:\n A tuple in the form of (X, y)\n :return:\n \"\"\"\n\n if len(X[0]) == 0:\n return None\n\n start_idx = X[0].index[0]\n stop_idx = X[0].index[-1]\n\n X_mat = numba_utils.row_matrix(X[0])\n dev = device_of_devicendarray(X_mat)\n\n shape = X_mat.shape[0]*params[\"k\"]\n\n # Create output numba arrays.\n I_ndarr = numba.cuda.to_device(np.zeros(shape, dtype=np.int64, order=\"C\"))\n D_ndarr = numba.cuda.to_device(np.zeros(shape, dtype=np.float32, order=\"C\"))\n\n # Return canonical device id as string\n return [(X_mat, I_ndarr, D_ndarr)], dev, (start_idx, stop_idx)\n\n\ndef get_input_ipc_handles(arr):\n \"\"\"\n Used for kneighbors() to extract the IPC handles from\n the input Numba arrays. The device of the current worker\n and the start/stop indices of the original cudf are\n passed along as well.\n :param arr:\n :return:\n \"\"\"\n if arr is None:\n return None\n\n arrs, dev, idx = arr\n mat = [(X.get_ipc_handle(), inds.get_ipc_handle(), dists.get_ipc_handle())\n for X, inds, dists in arrs]\n return mat, dev, idx\n\n\ndef build_dask_dfs(arrs, params):\n \"\"\"\n Convert Numba arrays for kneighbors() resulting\n indices and distances into cudf Dataframes, using\n the start/stop indices from dataframe X.\n :param arrs:\n :param params:\n :return:\n \"\"\"\n\n if arrs is None:\n return None\n\n arr, dev, idx = arrs\n\n X, I_ndarr, D_ndarr = arr[0]\n\n I_ndarr = I_ndarr.reshape((X.shape[0], params[\"k\"]))\n D_ndarr = D_ndarr.reshape((X.shape[0], params[\"k\"]))\n\n I = cudf.DataFrame(index = cudf.dataframe.RangeIndex(idx[0], idx[1]+1))\n D = cudf.DataFrame(index = cudf.dataframe.RangeIndex(idx[0], idx[1]+1))\n\n for i in range(0, params[\"k\"]):\n I[str(i)] = I_ndarr[:, i]\n\n for i in range(0, params[\"k\"]):\n D[str(i)] = D_ndarr[:, i]\n\n return I, D, idx\n\n\ndef get_idx(arrs):\n \"\"\"\n Extract and return the start/stop\n indices from original cudf.\n :param arrs:\n :return:\n \"\"\"\n return arrs[2]\n\ndef get_I(arrs):\n \"\"\"\n Extract and return the indices cudf\n :param arrs:\n :return:\n \"\"\"\n return arrs[0]\n\n\ndef get_D(arrs):\n \"\"\"\n Extract and return the dists cudf\n :param arrs:\n :return:\n \"\"\"\n return arrs[1]\n\n\ndef get_I_meta(arrs):\n \"\"\"\n Extract and return the metadata\n from the indices cudf.\n :param arrs:\n :return:\n \"\"\"\n return arrs[0].iloc[:0]\n\n\ndef get_D_meta(arrs):\n \"\"\"\n Extract and return the metadata\n from the dists cudf.\n :param arrs:\n :return:\n \"\"\"\n return arrs[1].iloc[:0]\n\n\nclass NearestNeighbors(object):\n \"\"\"\n Data-parallel Multi-Node Multi-GPU kNN Model.\n\n Data is spread across Dask workers using Dask cuDF. A single worker is chosen to create a series of kNN indices,\n one for each chunk of the Dask input, across the devices on that host. Results will reflect the global order,\n extracted from the Dask cuDF used for training.\n \"\"\"\n def __init__(self, n_neighbors = 5, should_downcast = False):\n self.model = None\n self.master_host = None\n self.should_downcast = should_downcast\n self.n_neighbors = n_neighbors\n\n def fit(self, ddf):\n \"\"\"\n Fits a single-node multi-gpu knn model using single process-multiGPU technique.\n :param futures:\n :return:\n \"\"\"\n client = default_client()\n\n # Keep the futures around so the GPU memory doesn't get\n # deallocated on the workers.\n gpu_futures, cols = client.sync(self._get_mg_info, ddf)\n\n host_dict = self._build_host_dict(gpu_futures, client).items()\n if len(host_dict) > 1:\n raise Exception(\"Dask cluster appears to span hosts. Current \"\n \"multi-GPU implementation is limited to a single host\")\n\n # Choose a random worker on each unique host to run cuml's kNN.fit() function\n # on all the cuDFs living on that host.\n self.master_host = [(host, random.sample(ports, 1)[0])\n for host, ports in host_dict][0]\n\n host, port = self.master_host\n\n gpu_futures_for_host = list(filter(lambda d: d[0][0] == host, gpu_futures))\n exec_node = (host, port)\n\n # build ipc handles\n gpu_data_excl_worker = list(filter(lambda d: d[0] != exec_node, gpu_futures_for_host))\n gpu_data_incl_worker = list(filter(lambda d: d[0] == exec_node, gpu_futures_for_host))\n\n ipc_handles = [client.submit(get_ipc_handle, future, workers=[worker])\n for worker, future in gpu_data_excl_worker]\n\n raw_arrays = [future for worker, future in gpu_data_incl_worker]\n\n f = (exec_node, client.submit(_fit_on_worker, (ipc_handles, raw_arrays),\n {\"D\": cols, \"should_downcast\":self.should_downcast},\n workers=[exec_node]))\n\n wait(f)\n\n # The model on each unique host is held for futures queries\n self.model = f\n\n @gen.coroutine\n def _kneighbors(self, X, k = None):\n \"\"\"\n Internal function to query the kNN model.\n :param X:\n :param k:\n :return:\n \"\"\"\n client = default_client()\n if k is None:\n k = self.n_neighbors\n\n # Break apart Dask.array/dataframe into chunks/parts\n data_parts = X.to_delayed()\n\n parts = list(map(delayed, data_parts))\n parts = client.compute(parts) # Start computation in the background\n yield wait(parts)\n for part in parts:\n if part.status == 'error':\n yield part # trigger error locally\n\n # A dict in the form of { part_key: part }\n key_to_part_dict = dict([(str(part.key), part) for part in parts])\n\n who_has = yield client.who_has(parts)\n\n worker_parts = {}\n for key, workers in who_has.items():\n worker = parse_host_port(first(workers))\n if worker not in worker_parts:\n worker_parts[worker] = []\n worker_parts[worker].append(key_to_part_dict[key])\n\n \"\"\"\n Create IP Handles on each worker hosting input data\n \"\"\"\n # Format of input_devarrays = ([(X, y)..], dev)\n input_devarrays = [(worker, client.submit(input_to_device_arrays, part, {\"k\":k}, workers=[worker]))\n for worker, part in worker_parts.items()]\n\n yield wait(input_devarrays)\n\n \"\"\"\n Gather IPC handles for each worker and call _fit() on each worker containing data.\n \"\"\"\n exec_node, model = self.model\n\n # Need to fetch coefficient parts on worker\n on_worker = list(filter(lambda x: x[0] == exec_node, input_devarrays))\n not_on_worker = list(filter(lambda x: x[0] != exec_node, input_devarrays))\n\n ipc_handles = [client.submit(get_input_ipc_handles, future, workers=[a_worker])\n for a_worker, future in not_on_worker]\n\n raw_arrays = [future for a_worker, future in on_worker]\n\n # IPC Handles are loaded in separate threads on worker so they can be\n # used to make calls through cython\n\n run = client.submit(_kneighbors_on_worker, (ipc_handles, raw_arrays), model, {\"k\": k}, workers=[exec_node])\n yield wait(run)\n\n dfs = [client.submit(build_dask_dfs, f, {\"k\": k}, workers=[worker])\n for worker, f in input_devarrays]\n yield wait(dfs)\n\n return gen.Return(dfs)\n\n def kneighbors(self, X, k):\n\n \"\"\"\n Queries the multi-gpu knn model given a dask-cudf as the query\n\n 1. Create 2 new Dask dataframes to hold output (1 chunk each per chunk of X), co-locate pieces w/ X.\n 2. Get IPC handles for each dataframe. Use IPCThread to hold onto them while calling query.\n\n :param input:\n A dask-cudf for calculating the kneighbors\n :param k:\n The number of nearest neighbors to query for each input vector.\n :return:\n dists and indices of the k-nearest neighbors to the input vectors\n \"\"\"\n\n client = default_client()\n dfs = client.sync(self._kneighbors, X, k).value\n\n dfs = [d for d in dfs if d.type != type(None)]\n\n local_divs = [client.submit(get_idx, f).result() for f in dfs]\n indices = [client.submit(get_I, f) for f in dfs]\n dists = [client.submit(get_D, f) for f in dfs]\n\n dfs_divs = list(zip(local_divs, indices, dists))\n\n # Sort delayed dfs by their starting index\n dfs_divs.sort(key = lambda x: x[0][0])\n\n I_meta = client.submit(get_I_meta, dfs[0]).result()\n D_meta = client.submit(get_D_meta, dfs[0]).result()\n\n I_ddf = dask_cudf.from_delayed(indices, meta = I_meta)\n D_ddf = dask_cudf.from_delayed(dists, meta = D_meta)\n\n return D_ddf, I_ddf\n\n\n def get(self, indices):\n \"\"\"\n Returns the vectors from the knn index for a list of indices.\n :param indices:\n :return:\n \"\"\"\n pass\n\n @staticmethod\n def _build_host_dict(gpu_futures, client):\n \"\"\"\n Helper function to build a dictionary mapping workers to parts\n that currently hold the parts of given futures.\n :param gpu_futures:\n :param client:\n :return:\n \"\"\"\n who_has = client.who_has(gpu_futures)\n\n key_to_host_dict = {}\n for key in who_has:\n key_to_host_dict[key] = parse_host_port(who_has[key][0])\n\n hosts_to_key_dict = {}\n for key, host in key_to_host_dict.items():\n if host not in hosts_to_key_dict:\n hosts_to_key_dict[host] = set([key])\n else:\n hosts_to_key_dict[host].add(key)\n\n workers = [key[0] for key in list(who_has.values())]\n return build_host_dict(workers)\n\n @gen.coroutine\n def _get_mg_info(self, ddf):\n \"\"\"\n Given a Dask cuDF, extract number of dimensions and convert\n the pieces of the Dask cuDF into Numba arrays, which can\n be passed into the kNN algorithm.\n build a\n :param ddf:\n :return:\n \"\"\"\n\n client = default_client()\n\n if isinstance(ddf, dd.DataFrame):\n cols = len(ddf.columns)\n parts = ddf.to_delayed()\n parts = client.compute(parts)\n yield wait(parts)\n else:\n raise Exception(\"Input should be a Dask DataFrame\")\n\n key_to_part_dict = dict([(str(part.key), part) for part in parts])\n who_has = yield client.who_has(parts)\n\n worker_map = []\n for key, workers in who_has.items():\n worker = parse_host_port(first(workers))\n worker_map.append((worker, key_to_part_dict[key]))\n\n gpu_data = [(worker, client.submit(to_gpu_matrix, part, workers=[worker]))\n for worker, part in worker_map]\n\n yield wait(gpu_data)\n\n raise gen.Return((gpu_data, cols))\n" ]
[ [ "numpy.zeros" ] ]
jpchiodini/Grasp-Planning
[ "e31234244b8f934743605ebea59d9d98a258957e" ]
[ "Grasping.py" ]
[ "import numpy as np\nimport math\nimport itertools\n\n\ndef GraspPointFiltering(numPts, P, N, C):\n # create superset of all possibilities:\n counter = list(range(0, numPts))\n points = list(itertools.combinations(counter, 2))\n curvatureVals = []\n\n for i in range(0, len(points)):\n x = points[i][0]\n y = points[i][1]\n curvatureVals.append(Term1(C[x], C[y]))\n\n curvatureVals = np.asarray(curvatureVals)\n\n # now sort the curvature values high to low for grasping point filter.\n sortIndices = (-curvatureVals).argsort()\n for i in range(0, len(points)):\n\n idx = sortIndices[i]\n x = points[idx][0]\n y = points[idx][1]\n\n # perform force closure test\n fcTest = Term2(P[x], P[y], N[x], N[y])\n\n if curvatureVals[idx] <= 0:\n break\n\n if fcTest < 0.34:\n return x, y\n\n # if we have finished the concave, test the convex...\n # sort low to high\n sortIndices = (curvatureVals).argsort()\n for i in range(0, len(points)):\n\n idx = sortIndices[i]\n x = points[idx][0]\n y = points[idx][1]\n\n # perform force closure test\n fcTest = Term2(P[x], P[y], N[x], N[y])\n\n if curvatureVals[idx] <= 0:\n break\n\n if fcTest < 0.34:\n return x, y\n\n # if we haven't returned anything till now, there are no good points...\n return 1, 1\n\n\ndef Term1(C1, C2):\n # print(C1+C2)\n return C1 + C2\n\n\ndef Term2(Pm1, Pm2, Nm1, Nm2):\n s1 = np.subtract(Pm1, Pm2)\n s2 = np.subtract(Pm1, Pm2)\n sub1 = s1 / np.linalg.norm(s1)\n sub2 = s2 / np.linalg.norm(s2)\n\n norm1 = Nm1 / np.linalg.norm(Nm1)\n norm2 = Nm2 / np.linalg.norm(Nm2)\n\n if np.dot(norm1, sub1) < 0:\n my_sub = sub2\n else:\n my_sub = sub1\n\n A = math.acos(np.dot(norm1, my_sub))\n B = math.acos(np.dot(norm2, my_sub))\n\n # print(math.degrees(A))\n # print(math.degrees(np.pi- B))\n\n return A ** 2 + (np.pi - B) ** 2\n\n\ndef FindBestGrasps(numPts, P, N, C):\n # THIS WAS AN ATTEMPT TO USE A GRADIENT ASCENT ALGORITHM TO OPTIMIZE THE GRASPING POINTS. IT FAILED HOWEVER,\n # BECAUSE THE OPTIMIZATION FUNCTION I USED WAS NOT BOUNDED, AND THUS NEVER CONVERGED. THERE IS PROBABLY SOME SIMILAR\n # FUNCTION THAT COULD WORK... ANYHOW, HERE IS THE CODE FOR GRADIENT DESCENT IF ANYONE GETS A WORKING FUNCTION.\n # gradient ascent to optimize the objective wr*Term1 - wf*Term2\n\n # find the entire set of pairs that we want to optimize.\n counter = list(range(0, numPts))\n points = list(itertools.combinations(counter, 2))\n\n learning_rate = 1\n initial_b = 0 # initial y-intercept guess\n initial_m = 0 # initial slope guess\n num_iterations = 1000\n print(\"Starting gradient descent at b = {0}, m = {1}, error = {2}\".format(initial_b, initial_m,\n compute_total_score(\n initial_b, initial_m, points, N, P,\n C)))\n print(\"Running...\")\n [b, m] = gradient_descent_runner(points, initial_b, initial_m, learning_rate, num_iterations, N, P, C)\n print(\"After {0} iterations b = {1}, m = {2}, error = {3}\".format(num_iterations, b, m,\n compute_total_score(b, m,\n points, N, P, C)))\n\n\ndef compute_total_score(a, b, points, N, P, C):\n totalScore = 0\n for i in range(0, len(points)):\n x = points[i][0]\n y = points[i][1]\n print((a * Term1(C[x], C[y])) - (b * Term2(P[x], P[y], N[x], N[y])))\n totalScore += (a * Term1(C[x], C[y])) - (b * Term2(P[x], P[y], N[x], N[y]))\n return totalScore\n\n\ndef gradient_descent_runner(points, starting_a, starting_b, learning_rate, num_iterations, N, P, C):\n a = starting_a\n b = starting_b\n for i in range(num_iterations):\n a, b = step_gradient(a, b, points, learning_rate, N, P, C)\n print(compute_total_score(a, b, points, N, P, C), a, b)\n return [a, b]\n\n\ndef step_gradient(a_current, b_current, points, learningRate, N, P, C):\n a_g = 0\n b_g = 0\n for i in range(0, len(points)):\n x = points[i][0]\n y = points[i][1]\n\n # a_gradient += 1.0 / len(points) * Term1(C[x], C[y])\n # b_gradient += 1.0 / len(points) * -Term2(P[x], P[y], N[x], N[y])\n\n t1 = Term1(C[x], C[y])\n t2 = Term2(P[x], P[y], N[x], N[y])\n\n a_g += (1.0 / len(points)) * ((a_current * t1 - b_current * t2) - t1 - t2) * t1\n b_g += (1.0 / len(points)) * ((a_current * t1 - b_current * t2) - t1 - t2) * t2\n\n new_b = a_current + (learningRate * a_g)\n new_m = b_current + (learningRate * b_g)\n return [new_b, new_m]\n" ]
[ [ "numpy.asarray", "numpy.dot", "numpy.subtract", "numpy.linalg.norm" ] ]
tussedrotten/pylie
[ "df34b820b9d9273bc9c4287e559e5d5837faf794" ]
[ "examples/vis_perturbations.py" ]
[ "import visgeom as vg\nfrom pylie import SO3, SE3\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button, RadioButtons, CheckButtons\n\n\"\"\"Example - Visualizes different perturbations and the path they take along the manifold\"\"\"\n\n\ndef vis_perturbations():\n # Define the fixed frame \"a\" relative to the world frame \"w\".\n T_w_a = SE3((SO3.from_roll_pitch_yaw(5*np.pi/4, 0, np.pi/2), np.array([[2, 2, 2]]).T))\n\n # The vector xi represents a perturbation on the tangent vector space.\n # We change the elements in this vector by using the sliders.\n xi_vec = np.zeros([6, 1])\n\n # We can choose to draw an oriented box around the perturbed pose,\n # and we can draw the trajectory along the manifold (by interpolation).\n draw_options = {'Draw box': False, 'Draw manifold\\ntrajectory': True}\n\n # Use Qt 5 backend in visualisation.\n matplotlib.use('qt5agg')\n\n # Create figure and axis.\n fig, ax = plt.subplots(subplot_kw={'projection': '3d'})\n plt.subplots_adjust(left=0.25)\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n ax.margins(x=0)\n\n # Add the widgets.\n widget_color = 'lightgoldenrodyellow'\n xi_sliders = [Slider(plt.axes([0.05, 0.9 - 0.05*i, 0.20, 0.03], facecolor=widget_color),\n r'$\\xi_' + str(i+1) + '$', -4.0, 4.0, valinit=xi_vec[i].item(), valstep=0.01) for i in range(6)]\n button = Button(plt.axes([0.1, 0.55, 0.1, 0.04]), 'Reset', color=widget_color, hovercolor='0.975')\n check = CheckButtons(plt.axes([0.025, 0.10, 0.24, 0.15], facecolor=widget_color),\n draw_options.keys(),\n draw_options.values())\n radio = RadioButtons(plt.axes([0.025, 0.3, 0.24, 0.2], facecolor=widget_color),\n (r'1. $\\mathrm{Exp}(\\mathbf{\\xi})$',\n r'2. $\\mathbf{T}_{wc} \\circ \\mathrm{Exp}(\\mathbf{\\xi})$',\n r'3. $\\mathrm{Exp}(\\mathbf{\\xi}) \\circ \\mathbf{T}_{wc}$'),\n active=0)\n\n # Setup the update callback, which is called by the sliders and the radio buttons.\n def update(val):\n ax.clear()\n for i, slider in enumerate(xi_sliders):\n xi_vec[i] = slider.val\n\n if radio.value_selected[0] == '1':\n draw_exp(ax, xi_vec, draw_options)\n elif radio.value_selected[0] == '2':\n draw_right_perturbation(ax, T_w_a, xi_vec, draw_options)\n else:\n draw_left_perturbation(ax, T_w_a, xi_vec, draw_options)\n\n ax.set_xlim(-4, 4)\n ax.set_ylim(-4, 4)\n ax.set_zlim(-4, 4)\n vg.plot.axis_equal(ax)\n\n fig.canvas.draw_idle()\n for slider in xi_sliders:\n slider.on_changed(update)\n radio.on_clicked(update)\n\n # Setup the check buttons to update the \"draw options\".\n def update_draw_options(label):\n draw_options[label] = not draw_options[label]\n update([])\n check.on_clicked(update_draw_options)\n\n # Setup the reset callback, used by the reset button.\n def reset(event):\n for slider in xi_sliders:\n slider.reset()\n button.on_clicked(reset)\n\n # Start with first update.\n update([])\n plt.show()\n\n\ndef draw_exp(ax, xi_vec, draw_options):\n vg.plot_pose(ax, SE3().to_tuple(), scale=1, text='$\\mathcal{F}_w$')\n T_l = SE3.Exp(xi_vec)\n vg.plot_pose(ax, T_l.to_tuple()) #, text=r'$\\mathrm{Exp}(\\mathbf{\\xi})$')\n\n if draw_options['Draw box']:\n box_points = vg.utils.generate_box(pose=T_l.to_tuple(), scale=1)\n vg.utils.plot_as_box(ax, box_points)\n\n if draw_options['Draw manifold\\ntrajectory']:\n draw_interpolated(ax, SE3(), xi_vec, SE3())\n\n\ndef draw_right_perturbation(ax, T_w_a, xi_vec, draw_options):\n vg.plot_pose(ax, SE3().to_tuple(), scale=1, text='$\\mathcal{F}_w$')\n vg.plot_pose(ax, T_w_a.to_tuple(), scale=1, text='$\\mathcal{F}_a$')\n T_r = T_w_a @ SE3.Exp(xi_vec)\n\n vg.plot_pose(ax, T_r.to_tuple()) #, text=r'$\\mathbf{T}_{wa} \\circ \\mathrm{Exp}(\\mathbf{\\xi})$')\n\n if draw_options['Draw box']:\n box_points = vg.utils.generate_box(pose=T_r.to_tuple(), scale=1)\n vg.utils.plot_as_box(ax, box_points)\n\n if draw_options['Draw manifold\\ntrajectory']:\n draw_interpolated(ax, T_w_a, xi_vec, SE3())\n\n\ndef draw_left_perturbation(ax, T_w_a, xi_vec, draw_options):\n vg.plot_pose(ax, SE3().to_tuple(), scale=1, text='$\\mathcal{F}_w$')\n vg.plot_pose(ax, T_w_a.to_tuple(), scale=1, text='$\\mathcal{F}_a$')\n T_l = SE3.Exp(xi_vec) @ T_w_a\n vg.plot_pose(ax, T_l.to_tuple()) #, text=r'$\\mathrm{Exp}(\\mathbf{\\xi}) \\circ \\mathbf{T}_{wa}$')\n\n if draw_options['Draw box']:\n box_points = vg.utils.generate_box(pose=T_l.to_tuple(), scale=1)\n vg.utils.plot_as_box(ax, box_points)\n\n if draw_options['Draw manifold\\ntrajectory']:\n draw_interpolated(ax, SE3(), xi_vec, T_w_a)\n\n\ndef draw_interpolated(ax, T_1, xi, T_2):\n for alpha in np.linspace(0, 1, 20):\n T = T_1 @ SE3.Exp(alpha * xi) @ T_2\n vg.plot_pose(ax, T.to_tuple(), alpha=0.1)\n\n\nif __name__ == \"__main__\":\n vis_perturbations()\n" ]
[ [ "numpy.linspace", "matplotlib.use", "matplotlib.pyplot.subplots", "matplotlib.pyplot.axes", "matplotlib.pyplot.subplots_adjust", "numpy.array", "matplotlib.pyplot.show", "numpy.zeros" ] ]
muzi2018/DRL_RTD
[ "518100c4c48f66fd38ef9877f3e4903c9cc2a237" ]
[ "Multi-Agent-Path-Finding/cbs_mapf/planner.py" ]
[ "#!/usr/bin/env python3\n'''\nAuthor: Haoran Peng\nEmail: gavinsweden@gmail.com\n\nAn implementation of multi-agent path finding using conflict-based search\n[Sharon et al., 2015]\n'''\nfrom typing import List, Tuple, Dict, Callable, Set\nimport multiprocessing as mp\nfrom heapq import heappush, heappop\nfrom itertools import combinations\nfrom copy import deepcopy\nimport numpy as np\n\n# The low level planner for CBS is the Space-Time A* planner\n# https://github.com/GavinPHR/Space-Time-AStar\nfrom stastar.planner import Planner as STPlanner\n\nfrom .constraint_tree import CTNode\nfrom .constraints import Constraints\nfrom .agent import Agent\nfrom .assigner import *\nclass Planner:\n\n def __init__(self, grid_size: int,\n robot_radius: int,\n static_obstacles: List[Tuple[int, int]]):\n\n self.robot_radius = robot_radius\n self.st_planner = STPlanner(grid_size, robot_radius, static_obstacles)\n\n '''\n You can use your own assignment function, the default algorithm greedily assigns\n the closest goal to each start.\n '''\n def plan(self, starts: List[Tuple[int, int]],\n goals: List[Tuple[int, int]],\n assign:Callable = min_cost,\n max_iter:int = 200,\n low_level_max_iter:int = 100,\n max_process:int = 10,\n debug:bool = False) -> np.ndarray:\n\n self.low_level_max_iter = low_level_max_iter\n self.debug = debug\n\n # Do goal assignment\n self.agents = assign(starts, goals)\n\n constraints = Constraints()\n\n # Compute path for each agent using low level planner\n solution = dict((agent, self.calculate_path(agent, constraints, None)) for agent in self.agents)\n\n open = []\n if all(len(path) != 0 for path in solution.values()):\n # Make root node\n node = CTNode(constraints, solution)\n # Min heap for quick extraction\n open.append(node)\n\n manager = mp.Manager()\n iter_ = 0\n while open and iter_ < max_iter:\n iter_ += 1\n\n results = manager.list([])\n\n processes = []\n\n # Default to 10 processes maximum\n for _ in range(max_process if len(open) > max_process else len(open)):\n p = mp.Process(target=self.search_node, args=[heappop(open), results])\n processes.append(p)\n p.start()\n\n for p in processes:\n p.join()\n\n for result in results:\n if len(result) == 1:\n if debug:\n print('CBS_MAPF: Paths found after about {0} iterations'.format(4 * iter_))\n return result[0]\n if result[0]:\n heappush(open, result[0])\n if result[1]:\n heappush(open, result[1])\n\n if debug:\n print('CBS-MAPF: Open set is empty, no paths found.')\n return np.array([])\n\n '''\n Abstracted away the cbs search for multiprocessing.\n The parameters open and results MUST BE of type ListProxy to ensure synchronization.\n '''\n def search_node(self, best: CTNode, results):\n agent_i, agent_j, time_of_conflict = self.validate_paths(self.agents, best)\n\n # If there is not conflict, validate_paths returns (None, None, -1)\n if agent_i is None:\n results.append((self.reformat(self.agents, best.solution),))\n return\n # Calculate new constraints\n agent_i_constraint = self.calculate_constraints(best, agent_i, agent_j, time_of_conflict)\n agent_j_constraint = self.calculate_constraints(best, agent_j, agent_i, time_of_conflict)\n\n # Calculate new paths\n agent_i_path = self.calculate_path(agent_i,\n agent_i_constraint,\n self.calculate_goal_times(best, agent_i, self.agents))\n agent_j_path = self.calculate_path(agent_j,\n agent_j_constraint,\n self.calculate_goal_times(best, agent_j, self.agents))\n\n # Replace old paths with new ones in solution\n solution_i = best.solution\n solution_j = deepcopy(best.solution)\n solution_i[agent_i] = agent_i_path\n solution_j[agent_j] = agent_j_path\n\n node_i = None\n if all(len(path) != 0 for path in solution_i.values()):\n node_i = CTNode(agent_i_constraint, solution_i)\n\n node_j = None\n if all(len(path) != 0 for path in solution_j.values()):\n node_j = CTNode(agent_j_constraint, solution_j)\n\n results.append((node_i, node_j))\n\n\n '''\n Pair of agent, point of conflict\n '''\n def validate_paths(self, agents, node: CTNode):\n # Check collision pair-wise\n for agent_i, agent_j in combinations(agents, 2):\n time_of_conflict = self.safe_distance(node.solution, agent_i, agent_j)\n # time_of_conflict=1 if there is not conflict\n if time_of_conflict == -1:\n continue\n return agent_i, agent_j, time_of_conflict\n return None, None, -1\n\n\n def safe_distance(self, solution: Dict[Agent, np.ndarray], agent_i: Agent, agent_j: Agent) -> int:\n for idx, (point_i, point_j) in enumerate(zip(solution[agent_i], solution[agent_j])):\n if self.dist(point_i, point_j) > 2*self.robot_radius:\n continue\n return idx\n return -1\n\n @staticmethod\n def dist(point1: np.ndarray, point2: np.ndarray) -> int:\n return int(np.linalg.norm(point1-point2, 2)) # L2 norm\n\n def calculate_constraints(self, node: CTNode,\n constrained_agent: Agent,\n unchanged_agent: Agent,\n time_of_conflict: int) -> Constraints:\n contrained_path = node.solution[constrained_agent]\n unchanged_path = node.solution[unchanged_agent]\n\n pivot = unchanged_path[time_of_conflict]\n conflict_end_time = time_of_conflict\n try:\n while self.dist(contrained_path[conflict_end_time], pivot) < 2*self.robot_radius:\n conflict_end_time += 1\n except IndexError:\n pass\n return node.constraints.fork(constrained_agent, tuple(pivot.tolist()), time_of_conflict, conflict_end_time)\n\n def calculate_goal_times(self, node: CTNode, agent: Agent, agents: List[Agent]):\n solution = node.solution\n goal_times = dict()\n for other_agent in agents:\n if other_agent == agent:\n continue\n time = len(solution[other_agent]) - 1\n goal_times.setdefault(time, set()).add(tuple(solution[other_agent][time]))\n return goal_times\n\n '''\n Calculate the paths for all agents with space-time constraints\n '''\n def calculate_path(self, agent: Agent, \n constraints: Constraints, \n goal_times: Dict[int, Set[Tuple[int, int]]]) -> np.ndarray:\n return self.st_planner.plan(agent.start, \n agent.goal, \n constraints.setdefault(agent, dict()), \n semi_dynamic_obstacles=goal_times,\n max_iter=self.low_level_max_iter, \n debug=self.debug)\n\n '''\n Reformat the solution to a numpy array\n '''\n @staticmethod\n def reformat(agents: List[Agent], solution: Dict[Agent, np.ndarray]):\n solution = Planner.pad(solution)\n reformatted_solution = []\n for agent in agents:\n reformatted_solution.append(solution[agent])\n return np.array(reformatted_solution)\n\n '''\n Pad paths to equal length, inefficient but well..\n '''\n @staticmethod\n def pad(solution: Dict[Agent, np.ndarray]):\n max_ = max(len(path) for path in solution.values())\n for agent, path in solution.items():\n if len(path) == max_:\n continue\n padded = np.concatenate([path, np.array(list([path[-1]])*(max_-len(path)))])\n solution[agent] = padded\n return solution\n\n" ]
[ [ "numpy.array", "numpy.linalg.norm" ] ]
chenghlee/imagecodecs
[ "926078fe406bfa4e6c0d4452f1eccf7201e105fd" ]
[ "imagecodecs/imagecodecs.py" ]
[ "# imagecodecs.py\n\n# Copyright (c) 2008-2021, Christoph Gohlke\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Image transformation, compression, and decompression codecs.\n\nImagecodecs is a Python library that provides block-oriented, in-memory buffer\ntransformation, compression, and decompression functions for use in the\ntifffile, czifile, and other scientific image input/output modules.\n\nDecode and/or encode functions are implemented for Zlib (DEFLATE), GZIP,\nZStandard (ZSTD), Blosc, Brotli, Snappy, LZMA, BZ2, LZ4, LZ4F, LZ4HC,\nLZW, LZF, ZFP, AEC, LERC, NPY, PNG, GIF, TIFF, WebP, JPEG 8-bit, JPEG 12-bit,\nLossless JPEG (LJPEG, SOF3), JPEG 2000, JPEG LS, JPEG XR, JPEG XL, AVIF,\nPackBits, Packed Integers, Delta, XOR Delta, Floating Point Predictor,\nBitorder reversal, Bitshuffle, and Float24 (24-bit floating point).\n\n:Author:\n `Christoph Gohlke <https://www.lfd.uci.edu/~gohlke/>`_\n\n:Organization:\n Laboratory for Fluorescence Dynamics. University of California, Irvine\n\n:License: BSD 3-Clause\n\n:Version: 2021.1.28\n\n:Status: Alpha\n\nRequirements\n------------\nThis release has been tested with the following requirements and dependencies\n(other versions may work):\n\n* `CPython 3.7.9, 3.8.7, 3.9.1 64-bit <https://www.python.org>`_\n* `Numpy 1.19.5 <https://pypi.org/project/numpy/>`_\n* `Cython 0.29.21 <https://cython.org>`_\n* `zlib 1.2.11 <https://github.com/madler/zlib>`_\n* `lz4 1.9.3 <https://github.com/lz4/lz4>`_\n* `zstd 1.4.8 <https://github.com/facebook/zstd>`_\n* `blosc 1.21.0 <https://github.com/Blosc/c-blosc>`_\n* `bzip2 1.0.8 <https://sourceware.org/bzip2>`_\n* `liblzma 5.2.5 <https://github.com/xz-mirror/xz>`_\n* `liblzf 3.6 <http://oldhome.schmorp.de/marc/liblzf.html>`_\n* `libpng 1.6.37 <https://github.com/glennrp/libpng>`_\n* `libwebp 1.1.0 <https://github.com/webmproject/libwebp>`_\n* `libtiff 4.2.0 <https://gitlab.com/libtiff/libtiff>`_\n* `libjpeg-turbo 2.0.6 <https://github.com/libjpeg-turbo/libjpeg-turbo>`_\n (8 and 12-bit)\n* `libjpeg 9d <http://libjpeg.sourceforge.net/>`_\n* `charls 2.2.0 <https://github.com/team-charls/charls>`_\n* `openjpeg 2.4.0 <https://github.com/uclouvain/openjpeg>`_\n* `jxrlib 1.1 <https://packages.debian.org/source/sid/jxrlib>`_\n* `zfp 0.5.5 <https://github.com/LLNL/zfp>`_\n* `bitshuffle 0.3.5 <https://github.com/kiyo-masui/bitshuffle>`_\n* `libaec 1.0.4 <https://gitlab.dkrz.de/k202009/libaec>`_\n* `snappy 1.1.8 <https://github.com/google/snappy>`_\n* `zopfli-1.0.3 <https://github.com/google/zopfli>`_\n* `brotli 1.0.9 <https://github.com/google/brotli>`_\n* `brunsli 0.1 <https://github.com/google/brunsli>`_\n* `giflib 5.2.1 <http://giflib.sourceforge.net/>`_\n* `lerc 2.2.1 <https://github.com/Esri/lerc>`_\n* `libdeflate 1.7 <https://github.com/ebiggers/libdeflate>`_\n* `libavif 0.8.4 <https://github.com/AOMediaCodec/libavif>`_\n* `dav1d 0.8.1 <https://github.com/videolan/dav1d>`_\n* `rav1e 0.3.5 <https://github.com/xiph/rav1e>`_\n* `aom 2.0.1 <https://aomedia.googlesource.com/aom>`_\n* `lcms 2.11 <https://github.com/mm2/Little-CMS>`_\n\nRequired Python packages for testing (other versions may work):\n\n* `tifffile 2021.1.11 <https://pypi.org/project/tifffile/>`_\n* `czifile 2019.7.2 <https://pypi.org/project/czifile/>`_\n* `python-blosc 1.10.2 <https://github.com/Blosc/python-blosc>`_\n* `python-lz4 3.1.3 <https://github.com/python-lz4/python-lz4>`_\n* `python-zstd 1.4.8.1 <https://github.com/sergey-dryabzhinsky/python-zstd>`_\n* `python-lzf 0.2.4 <https://github.com/teepark/python-lzf>`_\n* `python-brotli 1.0.9 <https://github.com/google/brotli/tree/master/python>`_\n* `python-snappy 0.6.0 <https://github.com/andrix/python-snappy>`_\n* `zopflipy 1.5 <https://github.com/hattya/zopflipy>`_\n* `bitshuffle 0.3.5 <https://github.com/kiyo-masui/bitshuffle>`_\n\nNotes\n-----\nThe API is not stable yet and might change between revisions.\n\nWorks on little-endian platforms only.\n\nPython 32-bit versions are deprecated. Python <= 3.6 are no longer supported.\n\nSome codecs are currently decode-only: ``tiff``, ``lzw``, ``packints``, and\n``jpegsof3``.\n\nThe latest `Microsoft Visual C++ Redistributable for Visual Studio 2015, 2017\nand 2019 <https://support.microsoft.com/en-us/help/2977003/\nthe-latest-supported-visual-c-downloads>`_ is required on Windows.\n\nRefer to the imagecodecs/licenses folder for 3rd-party library licenses.\n\nThis software is based in part on the work of the Independent JPEG Group.\n\nThis software includes modified versions of `dcm2niix's jpg_0XC3.cpp\n<https://github.com/rordenlab/dcm2niix/blob/master/console/jpg_0XC3.cpp>`_.\n\nThis software includes a copy of `liblj92\n<https://bitbucket.org/baldand/mlrawviewer/src/master/liblj92/>`_.\n\nBuild instructions and wheels for manylinux and macOS courtesy of\n`Grzegorz Bokota <https://github.com/Czaki/imagecodecs_build>`_.\n\nUpdate pip and setuptools to the latest version before installing imagecodecs:\n\n ``python -m pip install --upgrade pip setuptools``\n\nInstall imagecodecs using precompiled wheels:\n\n ``python -m pip install --upgrade imagecodecs``\n\nInstall the requirements for building imagecodecs from source code on\nlatest Ubuntu Linux distributions:\n\n ``sudo apt-get install build-essential python3-dev cython3\n python3-setuptools python3-pip python3-wheel python3-numpy\n python3-pytest python3-blosc python3-brotli python3-snappy python3-lz4\n libz-dev libblosc-dev liblzma-dev liblz4-dev libzstd-dev libpng-dev\n libwebp-dev libbz2-dev libopenjp2-7-dev libjpeg-dev libjxr-dev\n liblcms2-dev libcharls-dev libaec-dev libbrotli-dev libsnappy-dev\n libzopfli-dev libgif-dev libtiff-dev libdeflate-dev libavif-dev``\n\nUse the ``--lite`` build option to only build extensions without 3rd-party\ndependencies. Use the ``--skip-extension`` build options to skip building\nspecific extensions, e.g.:\n\n ``python -m pip install imagecodecs --global-option=\"build_ext\"\n --global-option=\"--skip-bitshuffle\"``\n\nThe ``jpeg12``, ``jpegls``, ``jpegxl``, ``zfp``, ``avif``, ``lz4f``, and\n``lerc`` extensions are disabled by default when building from source.\n\nTo modify other build settings such as library names and compiler arguments,\nprovide a ``imagecodecs_distributor_setup.customize_build`` function, which\nwill be imported and executed during setup. See ``setup.py`` for examples.\n\nOther Python packages and C libraries providing imaging or compression codecs:\n\n* `numcodecs <https://github.com/zarr-developers/numcodecs>`_\n* `Python zlib <https://docs.python.org/3/library/zlib.html>`_\n* `Python bz2 <https://docs.python.org/3/library/bz2.html>`_\n* `Python lzma <https://docs.python.org/3/library/lzma.html>`_\n* `backports.lzma <https://github.com/peterjc/backports.lzma>`_\n* `python-lzo <https://bitbucket.org/james_taylor/python-lzo-static>`_\n* `python-lzw <https://github.com/joeatwork/python-lzw>`_\n* `python-lerc <https://pypi.org/project/lerc/>`_\n* `packbits <https://github.com/psd-tools/packbits>`_\n* `fpzip <https://github.com/seung-lab/fpzip>`_\n* `libmng <https://sourceforge.net/projects/libmng/>`_\n* `APNG patch for libpng <https://sourceforge.net/projects/libpng-apng/>`_\n* `OpenEXR <https://github.com/AcademySoftwareFoundation/openexr>`_\n* `tinyexr <https://github.com/syoyo/tinyexr>`_\n* `pytinyexr <https://github.com/syoyo/pytinyexr>`_\n* `jpeg-xl <https://gitlab.com/wg1/jpeg-xl>`_\n* `libjpeg <https://github.com/thorfdbg/libjpeg>`_ (GPL)\n* `pylibjpeg <https://github.com/pydicom/pylibjpeg>`_\n* `pylibjpeg-libjpeg <https://github.com/pydicom/pylibjpeg-libjpeg>`_ (GPL)\n* `pylibjpeg-openjpeg <https://github.com/pydicom/pylibjpeg-openjpeg>`_\n* `glymur <https://github.com/quintusdias/glymur>`_\n* `pyheif <https://github.com/carsales/pyheif>`_\n* `libheif <https://github.com/strukturag/libheif>`_ (LGPL)\n\nRevisions\n---------\n2021.1.28\n Pass 4915 tests.\n Add option to return JPEG XR fixed point pixel types as integers.\n Add LJPEG codec via liblj92 (alternative to JPEGSOF3 codec).\n Change zopfli header location.\n2021.1.11\n Fix build issues (#7, #8).\n Return bytearray instead of bytes on PyPy.\n Raise TypeError if output provided is bytes (breaking).\n2021.1.8\n Add float24 codec.\n Update copyrights.\n2020.12.24\n Update dependencies and build scripts.\n2020.12.22\n Add AVIF codec via libavif (WIP).\n Add DEFLATE/Zlib and GZIP codecs via libdeflate.\n Add LZ4F codec.\n Add high compression mode option to lz4_encode.\n Convert JPEG XR 16 and 32-bit fixed point pixel types to float32.\n Fix JPEG 2000 lossy encoding.\n Fix GIF disposal handling.\n Remove support for Python 3.6 (NEP 29).\n2020.5.30\n Add LERC codec via ESRI's lerc library.\n Enable building JPEG extensions with libjpeg >= 8.\n Enable distributors to modify build settings.\n2020.2.18\n Fix segfault when decoding corrupted LZW segments.\n Work around Cython raises AttributeError when using incompatible numpy.\n Raise ValueError if in-place decoding is not possible (except floatpred).\n2020.1.31\n Add GIF codec via giflib.\n Add TIFF decoder via libtiff (WIP).\n Add codec_check functions (WIP).\n Fix formatting libjpeg error messages.\n Use xfail in tests.\n Load extensions on demand on Python >= 3.7.\n Add build options to skip building specific extensions.\n Split imagecodecs extension into individual extensions.\n Move shared code into shared extension.\n Rename imagecodecs_lite extension and imagecodecs C library to 'imcd'.\n Remove support for Python 2.7 and 3.5.\n2019.12.31\n Fix decoding of indexed PNG with transparency.\n Last version to support Python 2.7 and 3.5.\n2019.12.16\n Add Zopfli codec.\n Add Snappy codec.\n Rename j2k codec to jpeg2k.\n Rename jxr codec to jpegxr.\n Use Debian's jxrlib.\n Support pathlib and binary streams in imread and imwrite.\n Move external C declarations to pxd files.\n Move shared code to pxi file.\n Update copyright notices.\n2019.12.10\n Add version functions.\n Add Brotli codec (WIP).\n Add optional JPEG XL codec via Brunsli repacker (WIP).\n2019.12.3\n Sync with imagecodecs-lite.\n2019.11.28\n Add AEC codec via libaec (WIP).\n Do not require scikit-image for testing.\n Require CharLS 2.1.\n2019.11.18\n Add bitshuffle codec.\n Fix formatting of unknown error numbers.\n Fix test failures with official python-lzf.\n2019.11.5\n Rebuild with updated dependencies.\n2019.5.22\n Add optional YCbCr chroma subsampling to JPEG encoder.\n Add default reversible mode to ZFP encoder.\n Add imread and imwrite helper functions.\n2019.4.20\n Fix setup requirements.\n2019.2.22\n Move codecs without 3rd-party C library dependencies to imagecodecs_lite.\n2019.2.20\n Rebuild with updated dependencies.\n2019.1.20\n Add more pixel formats to JPEG XR codec.\n Add JPEG XR encoder.\n2019.1.14\n Add optional ZFP codec via zfp library (WIP).\n Add numpy NPY and NPZ codecs.\n Fix some static codechecker errors.\n2019.1.1\n ...\n\nRefer to the CHANGES file for older revisions.\n\n\"\"\"\n\n__version__ = '2021.1.28'\n\nimport os\nimport sys\nimport io\nimport importlib\n\nimport numpy\n\n# names of public attributes by module\n# will be updated with standard attributes\n_API = {\n None: [\n 'version',\n 'imread',\n 'imwrite',\n 'imagefileext',\n 'DelayedImportError',\n ('none', 'numpy', 'jpeg'),\n ],\n 'imcd': [\n 'imcd_version',\n 'numpy_abi_version',\n 'cython_version',\n (\n 'bitorder',\n 'delta',\n 'float24',\n 'floatpred',\n 'lzw',\n 'packbits',\n 'packints',\n 'xor',\n ),\n ],\n 'aec': [],\n 'avif': [],\n # 'exr': [],\n 'bitshuffle': [],\n 'blosc': [],\n 'brotli': [],\n 'bz2': [],\n 'deflate': ['deflate_crc32', 'deflate_adler32', ('deflate', 'gzip')],\n 'gif': [],\n 'jpeg2k': [],\n 'jpeg8': [],\n 'jpeg12': [],\n 'jpegls': [],\n 'jpegsof3': [],\n 'jpegxl': [],\n 'jpegxr': [],\n 'lerc': [],\n 'ljpeg': [],\n 'lz4': [],\n 'lz4f': [],\n 'lzf': [],\n 'lzma': [],\n 'png': [],\n 'snappy': [],\n # 'szip': [],\n 'tiff': [],\n 'webp': [],\n 'zfp': [],\n 'zlib': ['zlib_crc32'],\n 'zopfli': [],\n 'zstd': [],\n # 'module': ['attribute1', 'attribute2', ('codec1', 'code2')]\n}\n\n# map extra to existing attributes\n# e.g. keep deprecated names for older versions of tifffile and czifile\n_COMPATIBILITY = {\n 'JPEG': 'JPEG8',\n 'jpeg_check': 'jpeg8_check',\n 'jpeg_version': 'jpeg8_version',\n 'zopfli_check': 'zlib_check',\n 'zopfli_decode': 'zlib_decode',\n 'j2k_encode': 'jpeg2k_encode',\n 'j2k_decode': 'jpeg2k_decode',\n 'jxr_encode': 'jpegxr_encode',\n 'jxr_decode': 'jpegxr_decode',\n}\n\n# map attribute names to module names\n_ATTRIBUTES = {}\n\n# map of codec names to module names\n_CODECS = {}\n\n\ndef _add_codec(module, codec=None, attributes=None):\n \"\"\"Register codec in global _API, _ATTRIBUTES, and _CODECS.\"\"\"\n if codec is None:\n codec = module\n if attributes is None:\n attributes = (\n f'{codec}_encode',\n f'{codec}_decode',\n f'{codec}_check',\n f'{codec}_version',\n f'{codec.capitalize()}Error',\n f'{codec.upper()}',\n )\n if module in _API:\n _API[module].extend(attributes)\n else:\n _API[module] = attributes\n _ATTRIBUTES.update({attr: module for attr in _API[module]})\n _CODECS[codec] = module\n\n\ndef _register_codecs():\n \"\"\"Parse _API and register all codecs.\"\"\"\n for module, attributes in _API.items():\n for attr in attributes.copy():\n if isinstance(attr, tuple):\n attributes.remove(attr)\n for codec in attr:\n _add_codec(module, codec)\n break\n else:\n _add_codec(module)\n\n\ndef _load_all():\n \"\"\"Add all registered attributes to package namespace.\"\"\"\n for name in __dir__():\n __getattr__(name)\n\n\ndef __dir__():\n \"\"\"Module __dir__.\"\"\"\n return sorted(list(_ATTRIBUTES) + list(_COMPATIBILITY))\n\n\ndef __getattr__(name):\n \"\"\"Load attribute's extension and add its attributes to package namespace.\n\n \"\"\"\n name_ = name\n name = _COMPATIBILITY.get(name, name)\n\n if name not in _ATTRIBUTES:\n raise AttributeError(f\"module 'imagecodecs' has no attribute {name!r}\")\n\n module_ = _ATTRIBUTES[name]\n if module_ is None:\n return None\n\n try:\n module = importlib.import_module('._' + module_, 'imagecodecs')\n except ImportError:\n module = None\n except AttributeError:\n # AttributeError: type object 'imagecodecs._module.array' has no\n # attribute '__reduce_cython__'\n # work around Cython raises AttributeError e.g. when the _shared\n # module failed to import due to an incompatible numpy version\n from . import _shared # noqa\n\n module = None\n\n for n in _API[module_]:\n if n in _COMPATIBILITY:\n continue\n attr = getattr(module, n, None)\n if attr is None:\n attr = _stub(n, module)\n setattr(imagecodecs, n, attr)\n\n attr = getattr(imagecodecs, name)\n if name != name_:\n setattr(imagecodecs, name_, attr)\n return attr\n\n\nclass DelayedImportError(ImportError):\n def __init__(self, name):\n msg = f\"could not import name {name!r} from 'imagecodecs'\"\n super().__init__(msg)\n\n\ndef _stub(name, module):\n \"\"\"Return stub function or class.\"\"\"\n\n if name.endswith('_version'):\n if module is None:\n\n def stub_version():\n f\"\"\"Stub for imagecodecs.{name}.\"\"\"\n return f\"{name[:-8]} n/a\"\n\n else:\n\n def stub_version():\n f\"\"\"Stub for imagecodecs.{name}.\"\"\"\n return f\"{name[:-8]} unknow\"\n\n return stub_version\n\n if name.endswith('_check'):\n if module is None:\n\n def stub_check(arg):\n f\"\"\"Stub for imagecodecs.{name}.\"\"\"\n return False\n\n else:\n\n def stub_check(arg):\n f\"\"\"Stub for imagecodecs.{name}.\"\"\"\n return None\n\n return stub_check\n\n if name.endswith('_decode'):\n\n def stub_decode(*args, **kwargs):\n f\"\"\"Stub for imagecodecs.{name}.\"\"\"\n raise DelayedImportError(name)\n\n return stub_decode\n\n if name.endswith('_encode'):\n\n def stub_encode(*args, **kwargs):\n f\"\"\"Stub for imagecodecs.{name}.\"\"\"\n raise DelayedImportError(name)\n\n return stub_encode\n\n if name.islower():\n\n def stub_function(*args, **kwargs):\n f\"\"\"Stub for imagecodecs.{name}.\"\"\"\n raise DelayedImportError(name)\n\n return stub_function\n\n if name.endswith('Error'):\n\n class StubError(RuntimeError):\n f\"\"\"Stub for imagecodecs.{name}.\"\"\"\n\n def __init__(self, *args, **kwargs):\n raise DelayedImportError(name)\n\n return StubError\n\n class StubType(type):\n def __getattr__(cls, arg):\n raise DelayedImportError(name)\n\n if module is None:\n\n def __bool__(cls):\n return False\n\n if name.isupper():\n\n class STUB(metaclass=StubType):\n f\"\"\"Stub for imagecodecs.{name}.\"\"\"\n\n return STUB\n\n class Stub(metaclass=StubType):\n f\"\"\"Stub for imagecodecs.{name}.\"\"\"\n\n return Stub\n\n\ndef _extensions():\n \"\"\"Return sorted list of extension names.\"\"\"\n return sorted(e for e in _API if e is not None)\n\n\ndef version(astype=None, _versions_=[]):\n \"\"\"Return version information about all codecs and dependencies.\"\"\"\n if not _versions_:\n _versions_.extend(\n (\n f'imagecodecs {__version__}',\n imagecodecs.cython_version(),\n imagecodecs.numpy_version(),\n imagecodecs.numpy_abi_version(),\n imagecodecs.imcd_version(),\n )\n )\n _versions_.extend(\n sorted(\n set(\n getattr(imagecodecs, v)()\n for v in _ATTRIBUTES\n if v.endswith('_version')\n and v\n not in (\n 'imcd_version',\n 'numpy_abi_version',\n 'numpy_version',\n 'cython_version',\n 'none_version',\n )\n )\n )\n )\n\n if astype is None or astype is str:\n return ', '.join(ver.replace(' ', '-') for ver in _versions_)\n if astype is dict:\n return dict(ver.split(' ') for ver in _versions_)\n return tuple(_versions_)\n\n\ndef imread(fileobj, codec=None, memmap=True, return_codec=False, **kwargs):\n \"\"\"Return image data from file as numpy array.\"\"\"\n import mmap\n\n codecs = []\n if codec is None:\n # find codec based on file extension\n if isinstance(fileobj, (str, os.PathLike)):\n ext = os.path.splitext(os.fspath(fileobj))[-1][1:].lower()\n else:\n ext = None\n if ext in _imcodecs():\n codec = _imcodecs()[ext]\n if codec == 'jpeg':\n codecs.extend(('jpeg8', 'jpeg12', 'jpegls', 'jpegsof3'))\n else:\n codecs.append(codec)\n # try other imaging codecs\n codecs.extend(\n c\n for c in (\n 'tiff',\n 'png',\n 'gif',\n 'webp',\n 'jpeg8',\n 'jpeg12',\n 'jpegsof3',\n 'jpeg2k',\n 'jpegls',\n 'jpegxr',\n 'jpegxl',\n 'avif',\n # 'exr',\n 'zfp',\n 'lerc',\n 'numpy',\n )\n if c not in codecs\n )\n else:\n # use provided codecs\n if not isinstance(codec, (list, tuple)):\n codec = [codec]\n for c in codec:\n if isinstance(c, str):\n c = c.lower()\n c = _imcodecs().get(c, c)\n codecs.append(c)\n\n offset = None\n close = False\n if isinstance(fileobj, mmap.mmap):\n data = fileobj\n offset = data.tell()\n elif hasattr(fileobj, 'read'):\n # binary stream: open file, BytesIO\n data = fileobj.read()\n elif isinstance(fileobj, (str, os.PathLike)):\n # TODO: support urllib.request.urlopen ?\n # file name\n with open(os.fspath(fileobj), 'rb') as fh:\n if memmap:\n offset = 0\n close = True\n data = mmap.mmap(fh.fileno(), 0, access=mmap.ACCESS_READ)\n else:\n data = fh.read()\n else:\n # binary data\n data = fileobj\n\n exceptions = []\n image = None\n for codec in codecs:\n if callable(codec):\n func = codec\n else:\n try:\n func = getattr(imagecodecs, codec + '_decode')\n except Exception as exc:\n exceptions.append(f'{repr(codec).upper()}: {exc}')\n continue\n try:\n image = func(data, **kwargs)\n if image.dtype == 'object':\n image = None\n raise ValueError('failed')\n break\n except DelayedImportError:\n pass\n except Exception as exc:\n # raise\n exceptions.append(f'{func.__name__.upper()}: {exc}')\n if offset is not None:\n data.seek(offset)\n\n if close:\n data.close()\n\n if image is None:\n raise ValueError('\\n'.join(exceptions))\n\n if return_codec:\n return image, func\n return image\n\n\ndef imwrite(fileobj, data, codec=None, **kwargs):\n \"\"\"Write numpy array to image file.\"\"\"\n if codec is None:\n # find codec based on file extension\n if isinstance(fileobj, (str, os.PathLike)):\n ext = os.path.splitext(os.fspath(fileobj))[-1].lower()[1:]\n else:\n raise ValueError('no codec specified')\n\n codec = _imcodecs().get(ext, ext)\n try:\n codec = getattr(imagecodecs, codec + '_encode')\n except AttributeError as exc:\n raise ValueError(f'invalid codec {codec!r}') from exc\n\n elif isinstance(codec, str):\n codec = codec.lower()\n codec = _imcodecs().get(codec, codec)\n try:\n codec = getattr(imagecodecs, codec + '_encode')\n except AttributeError as exc:\n raise ValueError(f'invalid codec {codec!r}') from exc\n\n elif not callable(codec):\n raise ValueError(f'invalid codec {codec!r}')\n\n data = codec(data, **kwargs)\n if hasattr(fileobj, 'write'):\n # binary stream: open file, BytesIO\n fileobj.write(data)\n else:\n # file name\n with open(str(fileobj), 'wb') as fh:\n fh.write(data)\n\n\ndef _imcodecs(_codecs_={}):\n \"\"\"Return map of image file extensions to codec names.\"\"\"\n if not _codecs_:\n codecs = {\n 'avif': ('avif', 'avifs'),\n # 'exr': ('exr',),\n 'gif': ('gif',),\n 'jpeg': ('jpg', 'jpeg', 'jpe', 'jfif', 'jif', 'ljpeg'),\n 'jpeg2k': ('j2k', 'jp2', 'j2c', 'jpc', 'jpx', 'jpf'), # jpm, mj2\n 'jpegls': ('jls',),\n 'jpegxl': ('jxl', 'brn'),\n 'jpegxr': ('jxr', 'hdp', 'wdp'),\n 'lerc': ('lerc1', 'lerc2'),\n 'numpy': ('npy', 'npz'),\n 'png': ('png',),\n 'tiff': ('tif', 'tiff', 'tf8', 'tf2', 'btf'),\n 'webp': ('webp',),\n 'zfp': ('zfp',),\n }\n _codecs_.update(\n (ext, codec) for codec, exts in codecs.items() for ext in exts\n )\n return _codecs_\n\n\ndef imagefileext():\n \"\"\"Return list of image file extensions handled by imread and imwrite.\"\"\"\n return list(_imcodecs().keys())\n\n\nNONE = True\nNoneError = RuntimeError\n\n\ndef none_version():\n \"\"\"Return empty version string.\"\"\"\n return ''\n\n\ndef none_check(data):\n \"\"\"Return True if data likely contains Template data.\"\"\"\n\n\ndef none_decode(data, *args, **kwargs):\n \"\"\"Decode NOP.\"\"\"\n return data\n\n\ndef none_encode(data, *args, **kwargs):\n \"\"\"Encode NOP.\"\"\"\n return data\n\n\nNUMPY = True\nNumpyError = RuntimeError\n\n\ndef numpy_version():\n \"\"\"Return numpy version string.\"\"\"\n return f'numpy {numpy.__version__}'\n\n\ndef numpy_check(data):\n \"\"\"Return True if data likely contains NPY or NPZ data.\"\"\"\n with io.BytesIO(data) as fh:\n data = fh.read(64)\n magic = b'\\x93NUMPY'\n return data.startswith(magic) or (data.startswith(b'PK') and magic in data)\n\n\ndef numpy_decode(data, index=0, out=None, **kwargs):\n \"\"\"Decode NPY and NPZ.\"\"\"\n with io.BytesIO(data) as fh:\n try:\n out = numpy.load(fh, **kwargs)\n except ValueError as exc:\n raise ValueError('not a numpy array') from exc\n if hasattr(out, 'files'):\n try:\n index = out.files[index]\n except Exception:\n pass\n out = out[index]\n return out\n\n\ndef numpy_encode(data, level=None, out=None):\n \"\"\"Encode NPY and NPZ.\"\"\"\n with io.BytesIO() as fh:\n if level:\n numpy.savez_compressed(fh, data)\n else:\n numpy.save(fh, data)\n fh.seek(0)\n out = fh.read()\n return out\n\n\nJpegError = RuntimeError\n\n\ndef jpeg_decode(\n data,\n bitspersample=None,\n tables=None,\n colorspace=None,\n outcolorspace=None,\n shape=None,\n out=None,\n):\n \"\"\"Decode JPEG 8-bit, 12-bit, SOF3, LS, or XL.\n\n \"\"\"\n if bitspersample is None:\n try:\n return imagecodecs.jpeg8_decode(\n data,\n tables=tables,\n colorspace=colorspace,\n outcolorspace=outcolorspace,\n shape=shape,\n out=out,\n )\n except Exception as exc:\n msg = str(exc)\n if 'Empty JPEG image' in msg:\n # TODO: handle Hamamatsu NDPI slides with dimensions > 65500\n raise exc\n if 'Unsupported JPEG data precision' in msg:\n return imagecodecs.jpeg12_decode(\n data,\n tables=tables,\n colorspace=colorspace,\n outcolorspace=outcolorspace,\n shape=shape,\n out=out,\n )\n if 'SOF type' in msg:\n return imagecodecs.jpegsof3_decode(data, out=out)\n # Unsupported marker type\n try:\n return imagecodecs.jpegls_decode(data, out=out)\n except Exception:\n try:\n return imagecodecs.jpegxl_decode(data, out=out)\n except Exception:\n raise exc\n try:\n if bitspersample == 8:\n return imagecodecs.jpeg8_decode(\n data,\n tables=tables,\n colorspace=colorspace,\n outcolorspace=outcolorspace,\n shape=shape,\n out=out,\n )\n if bitspersample == 12:\n return imagecodecs.jpeg12_decode(\n data,\n tables=tables,\n colorspace=colorspace,\n outcolorspace=outcolorspace,\n shape=shape,\n out=out,\n )\n try:\n return imagecodecs.jpegsof3_decode(data, out=out)\n except Exception:\n return imagecodecs.jpegls_decode(data, out=out)\n except Exception as exc:\n msg = str(exc)\n if 'Empty JPEG image' in msg:\n raise exc\n if 'SOF type' in msg:\n return imagecodecs.jpegsof3_decode(data, out=out)\n try:\n return imagecodecs.jpegls_decode(data, out=out)\n except Exception:\n try:\n return imagecodecs.jpegxl_decode(data, out=out)\n except Exception:\n raise exc\n\n\ndef jpeg_encode(\n data,\n level=None,\n colorspace=None,\n outcolorspace=None,\n subsampling=None,\n optimize=None,\n smoothing=None,\n out=None,\n):\n \"\"\"Encode JPEG 8-bit or 12-bit.\n\n \"\"\"\n if data.dtype == numpy.uint8:\n func = imagecodecs.jpeg8_encode\n elif data.dtype == numpy.uint16:\n func = imagecodecs.jpeg12_encode\n else:\n raise ValueError(f'invalid data type {data.dtype}')\n return func(\n data,\n level=level,\n colorspace=colorspace,\n outcolorspace=outcolorspace,\n subsampling=subsampling,\n optimize=optimize,\n smoothing=smoothing,\n out=out,\n )\n\n\n# initialize package\nimagecodecs = sys.modules['imagecodecs']\n\n_register_codecs()\n" ]
[ [ "numpy.load", "numpy.savez_compressed", "numpy.save" ] ]
Nayan-Das/augur
[ "857f4a4e7d688fd54356aa0f546834071fbabbf2" ]
[ "workers/linux_badge_worker/linux_badge_worker/worker.py" ]
[ "import os\nfrom datetime import datetime\nimport logging\nimport requests\nimport json\nfrom urllib.parse import quote\nfrom multiprocessing import Process, Queue\n\nfrom linux_badge_worker import __data_source__, __tool_source__, __tool_version__\nimport pandas as pd\nimport sqlalchemy as s\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy import MetaData\nfrom workers.standard_methods import register_task_completion, register_task_failure, connect_to_broker, update_gh_rate_limit, record_model_process\n\nclass CollectorTask:\n \"\"\" Worker's perception of a task in its queue\n Holds a message type (EXIT, TASK, etc) so the worker knows how to process the queue entry\n and the github_url given that it will be collecting data for\n \"\"\"\n def __init__(self, message_type='TASK', entry_info=None):\n self.type = message_type\n self.entry_info = entry_info\n\ndef dump_queue(queue):\n \"\"\"\n Empties all pending items in a queue and returns them in a list.\n \"\"\"\n result = []\n queue.put(\"STOP\")\n for i in iter(queue.get, 'STOP'):\n result.append(i)\n # time.sleep(.1)\n return result\n\nclass BadgeWorker:\n \"\"\" Worker that collects repo badging data from CII\n config: database credentials, broker information, and ID\n \"\"\"\n def __init__(self, config, task=None):\n logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid())))\n self.config = config\n\n self.db = None\n self.repo_badging_table = None\n\n self._task = task\n self._queue = Queue()\n self._child = None\n\n self.history_id = None\n self.finishing_task = False\n self.working_on = None\n self.results_counter = 0\n\n self.specs = {\n \"id\": self.config['id'],\n \"location\": self.config['location'],\n \"qualifications\": [\n {\n \"given\": [[\"git_url\"]],\n \"models\":[\"badges\"]\n }\n ],\n \"config\": [self.config]\n }\n\n self._db_str = 'postgresql://{}:{}@{}:{}/{}'.format(\n self.config['user'],\n self.config['password'],\n self.config['host'],\n self.config['port'],\n self.config['database']\n )\n\n dbschema = 'augur_data'\n self.db = s.create_engine(self._db_str, poolclass=s.pool.NullPool,\n connect_args={'options': '-csearch_path={}'.format(dbschema)})\n\n helper_schema = 'augur_operations'\n self.helper_db = s.create_engine(self._db_str, poolclass = s.pool.NullPool,\n connect_args={'options': '-csearch_path={}'.format(helper_schema)})\n logging.info(\"Database connection established...\")\n\n metadata = MetaData()\n helper_metadata = MetaData()\n\n metadata.reflect(self.db, only=['repo_badging'])\n helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth'])\n\n Base = automap_base(metadata=metadata)\n HelperBase = automap_base(metadata=helper_metadata)\n\n Base.prepare()\n HelperBase.prepare()\n\n self.history_table = HelperBase.classes.worker_history.__table__\n self.job_table = HelperBase.classes.worker_job.__table__\n self.repo_badging_table = Base.classes.repo_badging.__table__\n logging.info(\"ORM setup complete...\")\n\n # Organize different api keys/oauths available\n self.oauths = []\n self.headers = None\n\n # Endpoint to hit solely to retrieve rate limit information from headers of the response\n url = \"https://api.github.com/users/gabe-heim\"\n\n # Make a list of api key in the config combined w keys stored in the database\n oauth_sql = s.sql.text(\"\"\"\n SELECT * FROM worker_oauth WHERE access_token <> '{}'\n \"\"\".format(0))\n\n for oauth in [{'oauth_id': 0, 'access_token': 0}] + json.loads(pd.read_sql(oauth_sql, self.helper_db, params={}).to_json(orient=\"records\")):\n self.headers = {'Authorization': 'token %s' % oauth['access_token']}\n logging.info(\"Getting rate limit info for oauth: {}\".format(oauth))\n response = requests.get(url=url, headers=self.headers)\n self.oauths.append({\n 'oauth_id': oauth['oauth_id'],\n 'access_token': oauth['access_token'],\n 'rate_limit': int(response.headers['X-RateLimit-Remaining']),\n 'seconds_to_reset': (datetime.fromtimestamp(int(response.headers['X-RateLimit-Reset'])) \\\n - datetime.now()).total_seconds()\n })\n logging.info(\"Found OAuth available for use: {}\".format(self.oauths[-1]))\n\n if len(self.oauths) == 0:\n logging.info(\"No API keys detected, please include one in your config or in the worker_oauths table in the augur_operations schema of your database\\n\")\n\n # First key to be used will be the one specified in the config (first element in\n # self.oauths array will always be the key in use)\n self.headers = {'Authorization': 'token %s' % self.oauths[0]['access_token']}\n\n # Send broker hello message\n connect_to_broker(self, logging.getLogger())\n logging.info(\"Connected to the broker...\\n\")\n\n def update_config(self, config):\n \"\"\" Method to update config and set a default\n \"\"\"\n self.config = {\n \"key\": \"\",\n \"display_name\": \"\",\n \"description\": \"\",\n \"required\": 1,\n \"type\": \"string\"\n }\n self.config.update(config)\n self.API_KEY = self.config['github_api_key']\n\n @property\n def task(self):\n \"\"\" Property that is returned when the worker's current task is referenced\n \"\"\"\n return self._task\n\n @task.setter\n def task(self, value):\n \"\"\" entry point for the broker to add a task to the queue\n Adds this task to the queue, and calls method to process queue\n \"\"\"\n \n if value['job_type'] == \"UPDATE\" or value['job_type'] == \"MAINTAIN\":\n self._queue.put(value)\n\n if 'focused_task' in value:\n if value['focused_task'] == 1:\n logging.info(\"Focused task is ON\\n\")\n self.finishing_task = True\n else:\n self.finishing_task = False\n logging.info(\"Focused task is OFF\\n\")\n else:\n self.finishing_task = False\n logging.info(\"Focused task is OFF\\n\")\n \n self._task = value\n self.run()\n\n\n def cancel(self):\n \"\"\" Delete/cancel current task\n \"\"\"\n self._task = None\n\n def badges_model(self, entry_info, repo_id):\n \"\"\" Data collection and storage method\n Query the CII API and store the result in the DB for the badges model\n \"\"\"\n git_url = entry_info['given']['git_url']\n logging.info(\"Collecting data for {}\".format(git_url))\n extension = \"/projects.json?pq=\" + (quote(git_url[0:-4]))\n\n url = self.config['endpoint'] + extension\n logging.info(\"Hitting CII endpoint: \" + url + \" ...\")\n data = requests.get(url=url).json()\n\n if data != []:\n logging.info(\"Inserting badging data for \" + git_url)\n self.db.execute(self.repo_badging_table.insert()\\\n .values(repo_id=repo_id,\n data=data,\n tool_source=__tool_source__,\n tool_version=__tool_version__,\n data_source=__data_source__))\n\n self.results_counter += 1\n else:\n logging.info(\"No CII data found for {}\\n\".format(git_url))\n\n def collect(self):\n \"\"\" Function to process each entry in the worker's task queue\n Determines what action to take based off the message type\n \"\"\"\n while True:\n if not self._queue.empty():\n message = self._queue.get()\n self.working_on = message['job_type']\n else:\n break\n logging.info(\"Popped off message: {}\\n\".format(str(message)))\n\n if message['job_type'] == 'STOP':\n break\n\n if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':\n raise ValueError('{} is not a recognized task type'.format(message['job_type']))\n pass\n\n \"\"\" Query all repos with repo url of given task \"\"\"\n repoUrlSQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(message['given']['git_url']))\n repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])\n\n try:\n if message['models'][0] == 'badges':\n self.badges_model(message, repo_id)\n except Exception as e:\n register_task_failure(self, logging, message, repo_id, e)\n pass\n\n register_task_completion(self, logging, message, repo_id, \"badges\")\n\n def run(self):\n \"\"\" Kicks off the processing of the queue if it is not already being processed\n Gets run whenever a new task is added\n \"\"\"\n logging.info(\"Running...\\n\")\n self._child = Process(target=self.collect, args=())\n self._child.start()\n" ]
[ [ "pandas.read_sql" ] ]
rodekruis/Covid-SEIR
[ "c1f0d5d23b1bb82d5ac6298193dbcae139a77b0f" ]
[ "bin/confidencecurves.py" ]
[ "import os\nimport warnings\n\nfrom src.io_func import load_config, load_data\nfrom src.tools import calc_axis_interval\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport matplotlib.dates as mdates\n\nfrom src.parse import get_mean\nfrom src.tools import generate_hos_actual, generate_zero_columns\n\nI_TIME = 0\nI_INF = 1\nI_DEAD = 2\nI_REC = 3\nI_HOSCUM = 4\nI_ICU = 5\nI_HOS = 6\n\nO_TIME = 0\nO_SUS = 1\nO_EXP = 2\nO_INF = 3\nO_REM = 4\nO_HOS = 5\nO_HOSCUM = 6\nO_ICU = 7\nO_ICUCUM = 8\nO_REC = 9\nO_DEAD = 10\nO_CUMINF = 11\n\nS_HOS = 'hospitalized'\nS_ICU = 'ICU'\nS_HOSCUM = 'hospitalizedcum'\nS_DEAD = 'dead'\nS_INF = 'infected'\n\n\ndef plot_confidence_alpha(configpath, config, inputdata, firstdate):\n\n base = (os.path.split(configpath)[-1]).split('.')[0]\n outpath = os.path.join(os.path.split(os.getcwd())[0], 'output', base)\n\n names = ['posterior', 'prior']\n\n\n for i in range(0, 2):\n modelpath = '{}_{}_prob_{}_calibrated_on_{}.csv'.format(outpath, names[i], 'alpha', config['calibration_mode'])\n\n # modelpath = 'c:\\\\Users\\\\weesjdamv\\\\git\\\\corona\\\\configs\\\\r0_reductie_rivm.csv'\n # read the\n warnings.filterwarnings(\"error\")\n try:\n modeldata = np.genfromtxt(modelpath, names=True, delimiter=',')\n except IOError as e:\n print('No alpha file found (rerun esmda), expecting alpha in:', modelpath)\n return\n\n xmax = config['XMAX']\n time_delay = config['time_delay']\n xmax = xmax + time_delay\n # read xmax\n try:\n xmax = config['plot']['xmaxalpha'] + time_delay\n except :\n print('No xmaxalpha in plot parameters, using XMAX:', xmax)\n pass\n\n casename = ''\n try:\n casename = config['plot']['casename']\n except:\n print('No casename in plot parameters')\n pass\n\n conf_level = [a for a in modeldata.dtype.names if 'P' in a]\n conf_range = float(conf_level[-1].strip('P')) - float(conf_level[0].strip('P'))\n time = modeldata['time']\n\n\n date_1 = datetime.datetime.strptime(firstdate, \"%m/%d/%y\")\n t = [date_1 + datetime.timedelta(days=a - 1) for a in time]\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))\n day_interval = calc_axis_interval((t[xmax] - t[0]).days)\n plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=day_interval))\n\n fig, ax = plt.subplots()\n try:\n figure_size = config['plot']['figure_size']\n assert len(figure_size) == 2\n plt.figure(figsize=figure_size)\n except:\n pass\n\n\n title = '1-$\\\\alpha$'\n symcolor = ['violet', 'purple']\n ls = ['-', '--']\n for ilevel, cl in enumerate(conf_level[2:-2]):\n plt.plot(t, 1-modeldata[cl], label=cl, c='k', ls=ls[ilevel], lw=0.25)\n for iconf in range(0, 2):\n conf_range = float(conf_level[-1-iconf].strip('P')) - float(conf_level[iconf].strip('P'))\n plt.fill_between(t, 1-modeldata[conf_level[iconf]], 1-modeldata[conf_level[-1-iconf]],\n label='{}% confidence interval'.format(conf_range), color=symcolor[iconf])\n plt.grid(True)\n\n plt.legend(loc='lower left')\n plt.xlabel('Date')\n plt.ylabel('Number of cases')\n title = title + ' ' + casename\n plt.title(title)\n # plt.yscale('log')\n # plt.savefig('Hospital_cases_log.png', dpi=300)\n plt.yscale('linear')\n plt.xlim([date_1, t[xmax]])\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))\n day_interval = calc_axis_interval((t[xmax] - t[0]).days)\n plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=day_interval))\n plt.gcf().autofmt_xdate()\n plt.ylim(0, 1)\n outputpath = '{}_{}_prob_{}_calibrated_on_{}.png'.format(outpath, names[i], 'alpha', config['calibration_mode'])\n plt.savefig(outputpath, dpi=300)\n\n\ndef plot_confidence(configpath, config, inputdata, firstdate):\n\n base = (os.path.split(configpath)[-1]).split('.')[0]\n outpath = os.path.join(os.path.split(os.getcwd())[0], 'output', base)\n calmodes = [S_HOS, S_ICU, S_HOSCUM, S_DEAD, S_INF]\n o_indices = [O_HOS, O_ICU, O_HOSCUM, O_DEAD, O_CUMINF]\n\n titles = ['Hospitalized', 'ICU', 'Hospitalized Cum.', 'Mortalities', 'Infected']\n #['lightcoral', 'brown']\n #['mistyrose', 'lightcoral'],\n symcolors = [['powderblue','steelblue' ],['peachpuff', 'sandybrown'], ['lightgreen','forestgreen' ], ['silver', 'grey'], ['mistyrose', 'lightcoral']]\n y_obs_s = [inputdata[:, I_HOS], inputdata[:, I_ICU], inputdata[:, I_HOSCUM], inputdata[:, I_DEAD], inputdata[:,I_INF]]\n y_maxdef = config['YMAX']\n y_maxhos = y_maxdef * get_mean(config['hosfrac'])\n y_maxicu = y_maxhos * get_mean(config['ICufrac'])\n y_maxdead = y_maxhos * get_mean(config['dfrac']) * 4\n y_maxinf = y_maxdef*5\n y_max = [y_maxhos, y_maxicu, y_maxhos*4, y_maxdead, y_maxinf]\n\n casename = ''\n try:\n casename = config['plot']['casename']\n except:\n print('No casename in plot parameters')\n pass\n\n daily = False\n try:\n daily = config['plot']['daily']\n except:\n print('No daily in plot parameters, assuming cumulative display of mortalities and hospitalized cum. Infected, ICU and hospitalized plotted as actual')\n pass\n\n x_obs = inputdata[:, 0]\n xmax = config['XMAX']\n time_delay = config['time_delay']\n xmax = xmax + time_delay\n # make four output plots for hospitalized, cum hospitalized, dead, and hosm\n for i, calmode in enumerate(calmodes):\n output_index = o_indices[i]\n y_obs = None\n title = titles[i]\n symcolor = symcolors[i]\n y_obs = y_obs_s[i]\n ymax = y_max[i]\n\n if (daily):\n if ((i==2) or (i==3) or (i==4)):\n y_obs = np.concatenate((np.array([0]), np.diff(y_obs)))\n title = 'Daily ' + titles[i]\n ymax = ymax *0.1\n\n # read the\n modelpath = '{}_posterior_prob_{}_calibrated_on_{}.csv'.format(outpath, calmode, config['calibration_mode'])\n modeldata = np.genfromtxt(modelpath, names=True, delimiter=',')\n\n conf_level = [a for a in modeldata.dtype.names if 'P' in a]\n if (daily):\n if ((i == 2) or (i == 3)or (i==4)):\n for ilevel, cl in enumerate(conf_level):\n modeldata[cl] = np.concatenate((np.array([0]), np.diff(modeldata[cl])))\n modeldata['mean'] = np.concatenate((np.array([0]), np.diff(modeldata['mean'])))\n\n\n time = modeldata['time']\n mean = modeldata['mean']\n\n # fig, ax = plt.subplots()\n try:\n figure_size = config['plot']['figure_size']\n assert len(figure_size) == 2\n plt.figure(figsize=figure_size)\n except:\n pass\n # plt.figure()\n date_1 = datetime.datetime.strptime(firstdate, \"%m/%d/%y\")\n t = [date_1 + datetime.timedelta(days=a - 1) for a in time]\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))\n day_interval = calc_axis_interval((t[xmax] - t[0]).days)\n plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=day_interval))\n\n plt.plot(t, mean, label='Mean (Expectation value)', c='k', lw=0.5)\n symcolor = symcolors[i]\n ls = ['-', '--']\n for ilevel, cl in enumerate(conf_level[2:-2]):\n plt.plot(t, modeldata[cl], label=cl, c='k', ls=ls[ilevel], lw=0.25)\n for iconf in range(0, 2):\n conf_range = float(conf_level[-1-iconf].strip('P')) - float(conf_level[iconf].strip('P'))\n c = symcolor[iconf]\n plt.fill_between(t, modeldata[conf_level[iconf]], modeldata[conf_level[-1-iconf]],\n label='{}% confidence interval'.format(conf_range), color=c)\n if y_obs.any():\n x_days = [date_1 + datetime.timedelta(days=a - 1) for a in x_obs]\n plt.scatter(x_days, y_obs, c='k', label='data', marker='o', s=8)\n\n plt.grid(True)\n\n legendloc = 'upper left'\n try:\n legendloc = config['plot']['legendloc']\n except:\n print('No legendloc plot parameters, taking', legendloc)\n pass\n\n\n plt.legend(loc=legendloc)\n plt.xlabel('Date')\n plt.ylabel('Number of cases')\n title = title + ' ' + casename\n plt.title(title)\n # plt.yscale('log')\n # plt.savefig('Hospital_cases_log.png', dpi=300)\n plt.yscale('linear')\n #plt.xlim([t[0], t[-1]])\n plt.xlim([t[0], t[xmax]])\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))\n day_interval = calc_axis_interval((t[xmax] - t[0]).days)\n plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=day_interval))\n plt.gcf().autofmt_xdate()\n plt.ylim(0, ymax)\n outputpath = '{}_posterior_prob_{}_calibrated_on_{}.png'.format(outpath, calmode, config['calibration_mode'])\n plt.savefig(outputpath, dpi=300)\n\n if y_obs.any():\n # Have to make a new plot to change the size\n plt.figure(figsize=plt.rcParams[\"figure.figsize\"])\n\n plt.plot(t, mean, label='Mean (Expectation value)', c='k', lw=0.5)\n for ilevel, cl in enumerate(conf_level[2:-2]):\n plt.plot(t, modeldata[cl], label=cl, c='k', ls=ls[ilevel], lw=0.25)\n for iconf in range(0, 2):\n conf_range = float(conf_level[-1 - iconf].strip('P')) - float(conf_level[iconf].strip('P'))\n c = symcolor[iconf]\n plt.fill_between(t, modeldata[conf_level[iconf]], modeldata[conf_level[-1 - iconf]],\n label='{}% confidence interval'.format(conf_range), color=c)\n if y_obs.any():\n x_days = [date_1 + datetime.timedelta(days=a - 1) for a in x_obs]\n plt.scatter(x_days, y_obs, c='k', label='data', marker='o', s=8)\n\n plt.grid(True)\n plt.xlabel('Date')\n plt.ylabel('Number of cases')\n title = title + ' ' + casename\n plt.title(title)\n plt.yscale('linear')\n\n legendloc = 'lower right'\n try:\n legendloc = config['plot']['legendloczoom']\n except:\n print('No legendloczoom plot zoom parameters, taking', legendloc)\n pass\n plt.legend(loc=legendloc)\n inow = np.size(y_obs)\n i1 = inow-15\n i1 = max(0, i1)\n # ax.axvline(x=inow, color='silver')\n i2 = i1 + 25\n plt.xlim([t[i1], t[i2]])\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))\n day_interval = calc_axis_interval((t[i2] - t[i1]).days)\n plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=day_interval))\n plt.ylim(np.min(modeldata[conf_level[0]][i1:i2]), np.max(modeldata[conf_level[-1]][i1:i2]))\n outputpath = '{}_posterior_prob_{}_calibrated_on_{}_zoom.png'.format(outpath, calmode, config['calibration_mode'])\n plt.savefig(outputpath, dpi=300)\n plt.close()\n\n\ndef main(configpath):\n # Load the model configuration file and the data (observed cases)\n config = load_config(configpath)\n data = load_data(config)\n\n useworldfile = config['worldfile']\n if (not useworldfile):\n data = generate_hos_actual(data, config)\n else:\n data = generate_zero_columns(data, config)\n\n plot_confidence(configpath, config, data, config['startdate'])\n plot_confidence_alpha(configpath, config, data, config['startdate'])\n\n\nif __name__ == '__main__':\n # This script accepts two input argument:\n # 1) The path to the datafile to be postprocessed (.h5)\n # 2) the path to the configuration .json file\n main(sys.argv[1])\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.plot", "numpy.max", "matplotlib.dates.DayLocator", "matplotlib.pyplot.gca", "matplotlib.pyplot.gcf", "numpy.size", "numpy.diff", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "matplotlib.dates.DateFormatter", "matplotlib.pyplot.title", "numpy.min", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "numpy.genfromtxt", "numpy.array", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.scatter", "matplotlib.pyplot.yscale", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlim", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel" ] ]
mitkosl/OcadoHackaton
[ "45067799bf3178d7460d143ff1a2f265c610f7be" ]
[ "recognize_video.py" ]
[ "# USAGE\n# python recognize_video.py --detector face_detection_model \\\n#\t--embedding-model openface_nn4.small2.v1.t7 \\\n#\t--recognizer output/recognizer.pickle \\\n#\t--le output/le.pickle\n\n# import the necessary packages\nfrom imutils.video import VideoStream\nfrom imutils.video import FPS\nimport numpy as np\nimport argparse\nimport imutils\nimport pickle\nimport time\nimport cv2\nimport os\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--detector\", required=True,\n\thelp=\"path to OpenCV's deep learning face detector\")\nap.add_argument(\"-m\", \"--embedding-model\", required=True,\n\thelp=\"path to OpenCV's deep learning face embedding model\")\nap.add_argument(\"-r\", \"--recognizer\", required=True,\n\thelp=\"path to model trained to recognize faces\")\nap.add_argument(\"-l\", \"--le\", required=True,\n\thelp=\"path to label encoder\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n\thelp=\"minimum probability to filter weak detections\")\nargs = vars(ap.parse_args())\n\n# load our serialized face detector from disk\nprint(\"[INFO] loading face detector...\")\nprotoPath = os.path.sep.join([args[\"detector\"], \"deploy.prototxt\"])\nmodelPath = os.path.sep.join([args[\"detector\"],\n\t\"res10_300x300_ssd_iter_140000.caffemodel\"])\ndetector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)\n\n# load our serialized face embedding model from disk\nprint(\"[INFO] loading face recognizer...\")\nembedder = cv2.dnn.readNetFromTorch(args[\"embedding_model\"])\n\n# load the actual face recognition model along with the label encoder\nrecognizer = pickle.loads(open(args[\"recognizer\"], \"rb\").read())\nle = pickle.loads(open(args[\"le\"], \"rb\").read())\n\n# initialize the video stream, then allow the camera sensor to warm up\nprint(\"[INFO] starting video stream...\")\nvs = VideoStream(src=0).start()\ntime.sleep(2.0)\n\n# start the FPS throughput estimator\nfps = FPS().start()\n\n# loop over frames from the video file stream\nwhile True:\n\t# grab the frame from the threaded video stream\n\tframe = vs.read()\n\n\t# resize the frame to have a width of 600 pixels (while\n\t# maintaining the aspect ratio), and then grab the image\n\t# dimensions\n\tframe = imutils.resize(frame, width=600)\n\t(h, w) = frame.shape[:2]\n\n\t# construct a blob from the image\n\timageBlob = cv2.dnn.blobFromImage(\n\t\tcv2.resize(frame, (300, 300)), 1.0, (300, 300),\n\t\t(104.0, 177.0, 123.0), swapRB=False, crop=False)\n\n\t# apply OpenCV's deep learning-based face detector to localize\n\t# faces in the input image\n\tdetector.setInput(imageBlob)\n\tdetections = detector.forward()\n\n\t# loop over the detections\n\tfor i in range(0, detections.shape[2]):\n\t\t# extract the confidence (i.e., probability) associated with\n\t\t# the prediction\n\t\tconfidence = detections[0, 0, i, 2]\n\n\t\t# filter out weak detections\n\t\tif confidence > args[\"confidence\"]:\n\t\t\t# compute the (x, y)-coordinates of the bounding box for\n\t\t\t# the face\n\t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\n\t\t\t# extract the face ROI\n\t\t\tface = frame[startY:endY, startX:endX]\n\t\t\t(fH, fW) = face.shape[:2]\n\n\t\t\t# ensure the face width and height are sufficiently large\n\t\t\tif fW < 20 or fH < 20:\n\t\t\t\tcontinue\n\n\t\t\t# construct a blob for the face ROI, then pass the blob\n\t\t\t# through our face embedding model to obtain the 128-d\n\t\t\t# quantification of the face\n\t\t\tfaceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255,\n\t\t\t\t(96, 96), (0, 0, 0), swapRB=True, crop=False)\n\t\t\tembedder.setInput(faceBlob)\n\t\t\tvec = embedder.forward()\n\n\t\t\t# perform classification to recognize the face\n\t\t\tpreds = recognizer.predict_proba(vec)[0]\n\t\t\tj = np.argmax(preds)\n\t\t\tproba = preds[j]\n\t\t\tname = le.classes_[j]\n\n\t\t\t# draw the bounding box of the face along with the\n\t\t\t# associated probability\n\t\t\ttext = \"{}: {:.2f}%\".format(name, proba * 100)\n\t\t\ty = startY - 10 if startY - 10 > 10 else startY + 10\n\t\t\tcv2.rectangle(frame, (startX, startY), (endX, endY),\n\t\t\t\t(0, 0, 255), 2)\n\t\t\tcv2.putText(frame, text, (startX, y),\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)\n\n\t# update the FPS counter\n\tfps.update()\n\n\t# show the output frame\n\tcv2.imshow(\"Frame\", frame)\n\tkey = cv2.waitKey(1) & 0xFF\n\n\t# if the `q` key was pressed, break from the loop\n\tif key == ord(\"q\"):\n\t\tbreak\n\n# stop the timer and display FPS information\nfps.stop()\nprint(\"[INFO] elasped time: {:.2f}\".format(fps.elapsed()))\nprint(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n\n# do a bit of cleanup\ncv2.waitKey(0)\ncv2.destroyAllWindows()\nvs.stop()\ncv2.waitKey(1)\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.array", "numpy.argmax" ] ]
jisuk500/ML_learning
[ "4f77eb34bd652753e63fb75fa2be5bd252232f80" ]
[ "tensorflow examples/3 neural networks/neural_network_raw.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 3 17:15:11 2020\n\n@author: jisuk\n\"\"\"\n\n# %% immport some modules\nfrom __future__ import absolute_import, division, print_function\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.datasets import mnist\n\nimport tensorflow as tf\nimport numpy as np\n\n# %% NMIST dataset parameters\n\nnum_classes = 10 # total classes (0~9) digits\nnum_features = 784 # data features(img shape = 28 * 28)\n\n# training parameters\nlearning_rate = 0.001\ntraining_steps = 3000\nbatch_size = 256\ndisplay_step = 100\n\n# network parameters\nn_hidden_1 = 128\nn_hidden_2 = 256\n\n# %% prepare MNIST data\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n# convert to float32\nx_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32)\n# flatten images to 1_D vector of 784 features (28*28)\nx_train, x_test = x_train.reshape(\n [-1, num_features]), x_test.reshape([-1, num_features])\n# normalize images value from [0, 255] to [0, 1]\nx_train, x_test = x_train/255.0, x_test/255.0\n\n# %% Use tf.data API to shuffle and batch data\ntrain_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))\ntrain_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)\n\n# %% store layers weight & bias\n\n# a random value generator to imitialize weights\nrandom_normal = tf.initializers.RandomNormal()\n\nweights = {\n 'h1': tf.Variable(random_normal([num_features, n_hidden_1])),\n 'h2': tf.Variable(random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(random_normal([n_hidden_2, num_classes]))\n}\n\nbiases = {\n 'b1': tf.Variable(tf.zeros([n_hidden_1])),\n 'b2': tf.Variable(tf.zeros([n_hidden_2])),\n 'out': tf.Variable(tf.zeros([num_classes]))\n}\n\n# %% create model\n\n\ndef neural_net(x):\n # Hidden fully connected layer with 128 neurons.\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n # Apply sigmoid to layer_1 output for non-linearity.\n layer_1 = tf.nn.sigmoid(layer_1)\n\n # Hidden fully connected layer with 256 neurons.\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n # Apply sigmoid to layer_2 output for non-linearity.\n layer_2 = tf.nn.sigmoid(layer_2)\n\n # Output fully connected layer with a neuron for each class.\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n # Apply softmax to normalize the logits to a probability distribution.\n return tf.nn.softmax(out_layer)\n\n\n# stochastic gradient descent\noptimizer = tf.optimizers.SGD(learning_rate)\n\n# %% cross entropy loss function\n\ndef cross_entropy(y_pred, y_true):\n # encode label to a one hot encoder\n y_true = tf.one_hot(y_true, depth=num_classes)\n # clip prediction values to avoid log(0) error\n y_pred = tf.clip_by_value(y_pred, 1e-9, 1.)\n # compute cross-entropy\n return tf.reduce_mean(-tf.reduce_sum(y_true * tf.math.log(y_pred)))\n\n# accuracy function\n\n\ndef accuracy(y_pred, y_true):\n # predicted class is the index of highest score in prediction vector (i.e. argmax)\n correct_prediction = tf.equal(\n tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1)\n\n\n# stochastic gradient descent optimizer.\noptimizer = tf.optimizers.SGD(learning_rate)\n\n\n# %% optmization process\n\ndef run_optimization(x, y):\n # Wrap computation inside a GradientTape for automatic differentiation.\n with tf.GradientTape() as g:\n pred = neural_net(x)\n loss = cross_entropy(pred, y)\n\n # Variables to update, i.e. trainable variables.\n trainable_variables = list(weights.values()) + list(biases.values())\n\n # Compute gradients.\n gradients = g.gradient(loss, trainable_variables)\n\n # Update W and b following gradients.\n optimizer.apply_gradients(zip(gradients, trainable_variables))\n\n\n# %% Run training for the given number of steps.\nfor step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):\n # run the optimization to update W and b values.\n run_optimization(batch_x, batch_y)\n\n if step % display_step == 0:\n pred = neural_net(batch_x)\n loss = cross_entropy(pred, batch_y)\n acc = accuracy(pred, batch_y)\n print(\"step: %i, loss: %f, accuracy: %f\" % (step, loss, acc))\n\n# %% test model on validation set.\npred = neural_net(x_test)\nprint(\"Test Accuracy: %f\" % accuracy(pred, y_test))\n\n# visualize predictions.\nimport matplotlib.pyplot as plt\n\n# predict 5 images from validation set.\nn_images = 5\ntest_images = x_test[:n_images]\npredictions = neural_net(test_images)\n\n# display image and model predictions\nfor i in range(n_images):\n plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray')\n plt.show()\n print(\"Model prediction: %i\" % np.argmax(predictions.numpy()[i]))\n" ]
[ [ "tensorflow.clip_by_value", "tensorflow.matmul", "tensorflow.nn.softmax", "tensorflow.nn.sigmoid", "tensorflow.zeros", "tensorflow.initializers.RandomNormal", "numpy.reshape", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.cast", "tensorflow.keras.datasets.mnist.load_data", "tensorflow.math.log", "tensorflow.one_hot", "tensorflow.optimizers.SGD", "tensorflow.argmax", "numpy.array", "matplotlib.pyplot.show", "tensorflow.GradientTape" ] ]
wzzheng/DCML
[ "01a7220bac7ebb1e70416ef663f3ba7cee9e8bf5", "01a7220bac7ebb1e70416ef663f3ba7cee9e8bf5" ]
[ "criteria/oproxy.py", "criteria/imrot.py" ]
[ "import numpy as np\nimport torch, torch.nn as nn, torch.nn.functional as F\nimport batchminer\nimport criteria\n\n\"\"\"=================================================================================================\"\"\"\nALLOWED_MINING_OPS = None\nREQUIRES_BATCHMINER = False\nREQUIRES_OPTIM = True\n\n\nclass Criterion(torch.nn.Module):\n def __init__(self, opt):\n \"\"\"\n Args:\n opt: Namespace containing all relevant parameters.\n \"\"\"\n super(Criterion, self).__init__()\n\n self.pars = opt\n\n ####\n self.ALLOWED_MINING_OPS = ALLOWED_MINING_OPS\n self.REQUIRES_BATCHMINER = REQUIRES_BATCHMINER\n self.REQUIRES_OPTIM = REQUIRES_OPTIM\n\n ####\n self.num_proxies = opt.n_classes\n self.embed_dim = opt.embed_dim\n\n self.proxies = torch.randn(self.num_proxies, self.embed_dim)/8.\n self.proxies = torch.nn.Parameter(self.proxies)\n self.lr = opt.lr * opt.loss_oproxy_lrmulti\n\n ###\n self.class_idxs = torch.arange(self.num_proxies)\n\n self.name = 'oproxy'\n\n pars = {'pos_alpha':opt.loss_oproxy_pos_alpha,\n 'pos_delta':opt.loss_oproxy_pos_delta,\n 'neg_alpha':opt.loss_oproxy_neg_alpha,\n 'neg_delta':opt.loss_oproxy_neg_delta}\n\n self.pars = pars\n\n ###\n self.mode = opt.loss_oproxy_mode\n self.euclidean = opt.loss_oproxy_euclidean\n self.d_mode = 'euclidean' if self.euclidean else 'cosine'\n\n ###\n self.f_soft = torch.nn.Softplus()\n\n\n def prep(self, thing):\n return 1.*torch.nn.functional.normalize(thing, dim=1)\n\n\n def forward(self, batch, labels, **kwargs):\n \"\"\"\n Args:\n batch: torch.Tensor: Input of embeddings with size (BS x DIM)\n labels: nparray/list: For each element of the batch assigns a class [0,...,C-1], shape: (BS x 1)\n \"\"\"\n ###\n bs = len(batch)\n batch = self.prep(batch)\n self.labels = labels.unsqueeze(1)\n\n ###\n self.u_labels = self.labels.view(-1)\n self.same_labels = (self.labels.T == self.u_labels.view(-1,1)).to(batch.device).T\n self.diff_labels = (self.class_idxs.unsqueeze(1) != self.labels.T).to(torch.float).to(batch.device).T\n\n ###\n if self.mode == \"anchor\":\n self.dim = 0\n elif self.mode == \"nca\":\n self.dim = 1\n\n ###\n loss = self.compute_proxyloss(batch)\n\n ###\n return loss\n\n ###\n def compute_proxyloss(self, batch):\n proxies = self.prep(self.proxies)\n pars = {k:-p if self.euclidean and 'alpha' in k else p for k,p in self.pars.items()}\n ###\n pos_sims = self.smat(batch, proxies[self.u_labels], mode=self.d_mode)\n sims = self.smat(batch, proxies, mode=self.d_mode)\n ###\n w_pos_sims = -pars['pos_alpha']*(pos_sims-pars['pos_delta'])\n w_neg_sims = pars['neg_alpha']*(sims-pars['neg_delta'])\n\n\n pos_s = self.masked_logsumexp(w_pos_sims,mask=self.same_labels,dim=self.dim,max=True if self.d_mode=='euclidean' else False)\n neg_s = self.masked_logsumexp(w_neg_sims,mask=self.diff_labels,dim=self.dim,max=False if self.d_mode=='euclidean' else True)\n\n pos_s = self.f_soft(pos_s)\n neg_s = self.f_soft(neg_s)\n\n pos_s, neg_s = pos_s.mean(), neg_s.mean()\n\n loss = pos_s + neg_s\n\n return loss\n\n ###\n def smat(self, A, B, mode='cosine'):\n if mode=='cosine':\n return A.mm(B.T)\n elif mode=='euclidean':\n As, Bs = A.shape, B.shape\n return (A.mm(A.T).diag().unsqueeze(-1)+B.mm(B.T).diag().unsqueeze(0)-2*A.mm(B.T)).clamp(min=1e-20).sqrt()\n\n ###\n def masked_logsumexp(self, sims, dim=0, mask=None, max=True):\n if mask is None:\n return torch.logsumexp(sims, dim=dim)\n else:\n if not max:\n ref_v = (sims*mask).min(dim=dim, keepdim=True)[0]\n else:\n ref_v = (sims*mask).max(dim=dim, keepdim=True)[0]\n\n nz_entries = (sims*mask)\n nz_entries = nz_entries.max(dim=dim,keepdim=True)[0]+nz_entries.min(dim=dim,keepdim=True)[0]\n nz_entries = torch.where(nz_entries.view(-1))[0].view(-1)\n\n if not len(nz_entries):\n return torch.tensor(0).to(torch.float).to(sims.device)\n else:\n return torch.log((torch.sum(torch.exp(sims-ref_v.detach())*mask,dim=dim)).view(-1)[nz_entries])+ref_v.detach().view(-1)[nz_entries]\n\n # return torch.log((torch.sum(torch.exp(sims)*mask,dim=dim)).view(-1))[nz_entries]\n", "import torch, torch.nn as nn\nfrom tqdm import tqdm\n\n\n\"\"\"=================================================================================================\"\"\"\nALLOWED_MINING_OPS = ['random','semihard', 'distance', 'parametric', 'anticollapse_distance']\nREQUIRES_BATCHMINER = False\nREQUIRES_OPTIM = True\nREQUIRES_EMA_NETWORK = True\n\n### MarginLoss with trainable class separation margin beta. Runs on Mini-batches as well.\nclass Criterion(torch.nn.Module):\n def __init__(self, opt):\n \"\"\"\n Args:\n margin: Triplet Margin.\n nu: Regularisation Parameter for beta values if they are learned.\n beta: Class-Margin values.\n n_classes: Number of different classes during training.\n \"\"\"\n super(Criterion, self).__init__()\n self.classifier = torch.nn.Linear(opt.network_feature_dim, 4, bias=False).to(opt.device)\n self.lr = opt.lr * 10\n self.name = 'imrot'\n\n\n def forward(self, feature_batch, imrot_labels):\n \"\"\"\n Args:\n batch: torch.Tensor: Input of embeddings with size (BS x DIM)\n labels: nparray/list: For each element of the batch assigns a class [0,...,C-1], shape: (BS x 1)\n \"\"\"\n pred_batch = self.classifier(feature_batch)\n loss = torch.nn.CrossEntropyLoss()(pred_batch, imrot_labels.to(pred_batch.device))\n return loss\n" ]
[ [ "torch.nn.functional.normalize", "torch.nn.Parameter", "torch.nn.Softplus", "torch.randn", "torch.tensor", "torch.arange", "torch.logsumexp" ], [ "torch.nn.Linear", "torch.nn.CrossEntropyLoss" ] ]
TangJiahui/AC215-Advanced_Practical_Data_Science
[ "904e595630ddb9b2348889ce0d2cc777bb90f071" ]
[ "mushroom-app/api-service/api/tracker.py" ]
[ "\nimport os\nimport asyncio\nfrom glob import glob\nimport json\nimport pandas as pd\n\nimport tensorflow as tf\nfrom google.cloud import storage\n\n\ngcp_project = os.environ[\"GCP_PROJECT\"]\nbucket_name = \"ac215-mushroom-app-models-small\"\nlocal_experiments_path = \"/persistent/experiments\"\n\n# Setup experiments folder\nif not os.path.exists(local_experiments_path):\n os.mkdir(local_experiments_path)\n\n\ndef download_blob(bucket_name, source_blob_name, destination_file_name):\n \"\"\"Downloads a blob from the bucket.\"\"\"\n\n storage_client = storage.Client(project=gcp_project)\n\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n\n\ndef download_experiment_metrics():\n # Get all model metrics\n models_metrics_list = tf.io.gfile.glob(\n \"gs://\"+bucket_name+\"/*/*/*_model_metrics.json\")\n\n timestamp = 0\n\n for metrics_file in models_metrics_list:\n path_splits = metrics_file.split(\"/\")\n user_email = path_splits[3]\n experiment = path_splits[4]\n local_metrics_file = path_splits[-1]\n\n local_metrics_file = os.path.join(\n local_experiments_path, user_email, experiment, local_metrics_file)\n\n if not os.path.exists(local_metrics_file):\n print(\"Copying:\", metrics_file, local_metrics_file)\n\n # Ensure user directory exists\n os.makedirs(os.path.join(\n local_experiments_path, user_email), exist_ok=True)\n os.makedirs(os.path.join(\n local_experiments_path, user_email, experiment), exist_ok=True)\n\n metrics_file = metrics_file.replace(\n \"gs://\"+bucket_name+\"/\", \"\")\n # Download the metric json file\n download_blob(bucket_name, metrics_file,\n local_metrics_file)\n\n file_timestamp = os.path.getmtime(local_metrics_file)\n if file_timestamp > timestamp:\n timestamp = file_timestamp\n\n return timestamp\n\n\ndef agg_experiments():\n print(\"Aggregate all experiments across users\")\n\n # Get Experiments accross users\n models_metrics_list = glob(\n local_experiments_path+\"/*/*/*_model_metrics.json\")\n\n all_models_metrics = []\n for mm_file in models_metrics_list:\n path_splits = mm_file.split(\"/\")\n\n with open(mm_file) as json_file:\n model_metrics = json.load(json_file)\n model_metrics[\"user\"] = path_splits[-3]\n model_metrics[\"experiment\"] = path_splits[-2]\n model_metrics[\"model_name\"] = path_splits[-1].replace(\n \"_model_metrics.json\", \"\")\n all_models_metrics.append(model_metrics)\n\n # Convert to dataframe and save as csv\n df = pd.DataFrame(all_models_metrics)\n df.to_csv(local_experiments_path+\"/all_models_metrics.csv\", index=False)\n\n\ndef compute_leaderboard():\n print(\"Compute Leaderboard and find best model\")\n df = pd.read_csv(local_experiments_path+\"/all_models_metrics.csv\")\n print(\"Shape:\", df.shape)\n print(df.head())\n\n # Group by users\n # Find best model for user (by accuracy)\n leaderboard = df.sort_values(\n by=['accuracy'], ascending=False).groupby('user').head(1).reset_index(drop=True)\n print(\"Shape:\", leaderboard.shape)\n print(leaderboard.head())\n\n # Save a csv with leaderboard.csv\n leaderboard.to_csv(local_experiments_path+\"/leaderboard.csv\", index=False)\n\n # Find the overall best model across users\n best_model = leaderboard.iloc[0].to_dict()\n # Create a json file best_model.json\n with open(os.path.join(local_experiments_path, \"best_model.json\"), \"w\") as json_file:\n json_file.write(json.dumps(best_model))\n\n\ndef download_best_models():\n print(\"Download leaderboard models and artifacts\")\n try:\n\n df = pd.read_csv(local_experiments_path+\"/leaderboard.csv\")\n print(\"Shape:\", df.shape)\n print(df.head())\n\n for index, row in df.iterrows():\n print(row[\"user\"], row[\"experiment\"], row[\"model_name\"])\n\n download_file = os.path.join(\n row[\"user\"], row[\"experiment\"], row[\"model_name\"]+\".hdf5\")\n download_blob(bucket_name, download_file,\n os.path.join(local_experiments_path, download_file))\n\n download_file = os.path.join(\n row[\"user\"], row[\"experiment\"], row[\"model_name\"]+\"_train_history.json\")\n download_blob(bucket_name, download_file,\n os.path.join(local_experiments_path, download_file))\n\n # Data details\n download_file = os.path.join(\n row[\"user\"], row[\"experiment\"], \"data_details.json\")\n download_blob(bucket_name, download_file,\n os.path.join(local_experiments_path, download_file))\n except:\n print(\"Error in download_best_models\")\n\n\nclass TrackerService:\n def __init__(self):\n self.timestamp = 0\n\n async def track(self):\n while True:\n await asyncio.sleep(60)\n print(\"Tracking experiments...\")\n\n # Download new model metrics\n timestamp = download_experiment_metrics()\n\n if timestamp > self.timestamp:\n # Aggregate all experiments across users\n agg_experiments()\n\n # Compute Leaderboard and find best model\n compute_leaderboard()\n\n # Download best model\n download_best_models()\n" ]
[ [ "pandas.read_csv", "tensorflow.io.gfile.glob", "pandas.DataFrame" ] ]
kentaroy47/UniTrack
[ "e18aece2a9046225ec3b3dd595a35490d594c699" ]
[ "tracker/mot/box.py" ]
[ "###################################################################\n# File Name: box.py\n# Author: Zhongdao Wang\n# mail: wcd17@mails.tsinghua.edu.cn\n# Created Time: Fri Jan 29 15:16:53 2021\n###################################################################\n\nimport torch\nfrom torchvision import ops\n\nfrom .basetrack import STrack\nfrom .multitracker import AssociationTracker\nfrom utils.box import scale_box, scale_box_input_size, xywh2xyxy, tlbr_to_tlwh\n\n\nclass BoxAssociationTracker(AssociationTracker):\n def __init__(self, opt):\n super(BoxAssociationTracker, self).__init__(opt)\n\n def extract_emb(self, img, obs):\n feat = self.app_model(img.unsqueeze(0).to(self.opt.device).float())\n scale = [feat.shape[-1]/self.opt.img_size[0],\n feat.shape[-2]/self.opt.img_size[1]]\n obs_feat = scale_box(scale, obs).to(self.opt.device)\n obs_feat = [obs_feat[:, :4], ]\n ret = ops.roi_align(feat, obs_feat, self.opt.feat_size).detach().cpu()\n return ret\n\n def prepare_obs(self, img, img0, obs):\n obs = torch.from_numpy(obs[obs[:, 4] > self.opt.conf_thres]).float()\n if len(obs) > 0:\n obs = xywh2xyxy(obs)\n obs = scale_box(self.opt.img_size, obs)\n embs = self.extract_emb(img, obs)\n obs = scale_box_input_size(self.opt.img_size, obs, img0.shape)\n\n if obs.shape[1] == 5:\n detections = [STrack(tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f,\n self.buffer_size, use_kalman=self.opt.use_kalman)\n for (tlbrs, f) in zip(obs, embs)]\n elif obs.shape[1] == 6:\n detections = [STrack(tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f,\n self.buffer_size, category=tlbrs[5],\n use_kalman=self.opt.use_kalman)\n for (tlbrs, f) in zip(obs, embs)]\n else:\n raise ValueError(\n 'Shape of observations should be [n, 5] or [n, 6].')\n else:\n detections = []\n return detections\n" ]
[ [ "torch.from_numpy" ] ]
InkGenius/MLA
[ "c0e73eff9923f38b5c2c3f4f643d0da6084b372e" ]
[ "clustering/nmi.py" ]
[ "# coding=utf-8\nimport numpy as np\nimport math\n\n\ndef compute_cost(A, B):\n # len(A) should be equal to len(B)\n total = len(A)\n A_ids = set(A)\n B_ids = set(B)\n\n # Mutual information\n MI = 0\n eps = 1.4e-45\n for idA in A_ids:\n for idB in B_ids:\n idAOccur = np.where(A == idA)\n idBOccur = np.where(B == idB)\n idABOccur = np.intersect1d(idAOccur, idBOccur)\n px = 1.0 * len(idAOccur[0])/total\n py = 1.0 * len(idBOccur[0])/total\n pxy = 1.0 * len(idABOccur)/total\n MI = MI + pxy * math.log(pxy/(px*py)+eps, 2)\n\n # Normalized Mutual information\n Hx = 0\n for idA in A_ids:\n idAOccurCount = 1.0 * len(np.where(A == idA)[0])\n Hx = Hx - (idAOccurCount/total) * math.log(idAOccurCount/total+eps, 2)\n\n Hy = 0\n for idB in B_ids:\n idBOccurCount = 1.0 * len(np.where(B == idB)[0])\n Hy = Hy - (idBOccurCount/total)*math.log(idBOccurCount/total+eps, 2)\n MIhat = 2.0*MI/(Hx+Hy)\n return MIhat\n\nif __name__ == '__main__':\n A = np.array([1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,3])\n B = np.array([1,2,1,1,1,1,1,2,2,2,2,3,1,1,3,3,3])\n print(compute_cost(A,B))" ]
[ [ "numpy.intersect1d", "numpy.array", "numpy.where" ] ]
eedeleon/pytorch-dp
[ "0a5404f700f67a3e823bf64cf267fbb2d6d95f7a" ]
[ "torchdp/test/per_sample_gradient_clip_test.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport unittest\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchdp import PerSampleGradientClipper\nfrom torchvision import transforms\nfrom torchvision.datasets import FakeData\n\n\nclass SampleConvNet(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 16, 8, 3)\n self.conv2 = nn.Conv1d(16, 32, 3, 1)\n self.convf = nn.Conv1d(32, 32, 1, 1)\n for p in self.convf.parameters():\n p.requires_grad = False\n self.fc1 = nn.Linear(23, 17)\n self.fc2 = nn.Linear(32 * 17, 10)\n\n def forward(self, x):\n # x of shape [B, 1, 28, 28]\n x = F.relu(self.conv1(x)) # -> [B, 16, 10, 10]\n x = F.max_pool2d(x, 2, 2) # -> [B, 16, 5, 5]\n x = x.view(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]) # -> [B, 16, 25]\n x = F.relu(self.conv2(x)) # -> [B, 32, 23]\n x = self.convf(x) # -> [B, 32, 23]\n x = self.fc1(x) # -> [B, 32, 17]\n x = x.view(-1, x.shape[-2] * x.shape[-1]) # -> [B, 32 * 17]\n x = self.fc2(x) # -> [B, 10]\n return x\n\n def name(self):\n return \"SampleConvNet\"\n\n\nclass PerSampleGradientClipper_test(unittest.TestCase):\n def setUp(self):\n self.DATA_SIZE = 64\n self.criterion = nn.CrossEntropyLoss()\n\n self.setUp_data()\n self.setUp_original_model()\n self.setUp_clipped_model(clip_value=0.003, run_clipper_step=True)\n\n def setUp_data(self):\n self.ds = FakeData(\n size=self.DATA_SIZE,\n image_size=(1, 35, 35),\n num_classes=10,\n transform=transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]\n ),\n )\n self.dl = DataLoader(self.ds, batch_size=self.DATA_SIZE)\n\n def setUp_original_model(self):\n self.original_model = SampleConvNet()\n for x, y in self.dl:\n logits = self.original_model(x)\n loss = self.criterion(logits, y)\n loss.backward() # puts grad in self.original_model.parameters()\n self.original_grads_norms = torch.stack(\n [\n p.grad.norm()\n for p in self.original_model.parameters()\n if p.requires_grad\n ],\n dim=-1,\n )\n\n def setUp_clipped_model(self, clip_value=0.003, run_clipper_step=True):\n # Deep copy\n self.clipped_model = SampleConvNet() # create the structure\n self.clipped_model.load_state_dict(self.original_model.state_dict()) # fill it\n\n # Intentionally clipping to a very small value\n self.clipper = PerSampleGradientClipper(self.clipped_model, clip_value)\n for x, y in self.dl:\n logits = self.clipped_model(x)\n loss = self.criterion(logits, y)\n loss.backward() # puts grad in self.clipped_model.parameters()\n if run_clipper_step:\n self.clipper.step()\n self.clipped_grads_norms = torch.stack(\n [p.grad.norm() for p in self.clipped_model.parameters() if p.requires_grad],\n dim=-1,\n )\n\n def test_clipped_grad_norm_is_smaller(self):\n \"\"\"\n Test that grad are clipped and their value changes\n \"\"\"\n for original_layer_norm, clipped_layer_norm in zip(\n self.original_grads_norms, self.clipped_grads_norms\n ):\n self.assertLess(float(clipped_layer_norm), float(original_layer_norm))\n\n def test_clipped_grad_norms_not_zero(self):\n \"\"\"\n Test that grads aren't killed by clipping\n \"\"\"\n allzeros = torch.zeros_like(self.clipped_grads_norms)\n self.assertFalse(torch.allclose(self.clipped_grads_norms, allzeros))\n\n def test_clipping_to_high_value_does_nothing(self):\n self.setUp_clipped_model(\n clip_value=9999, run_clipper_step=True\n ) # should be a no-op\n self.assertTrue(\n torch.allclose(self.original_grads_norms, self.clipped_grads_norms)\n )\n\n def test_grad_norms_untouched_without_clip_step(self):\n \"\"\"\n Test that grad are not clipped until clipper.step() is called\n \"\"\"\n self.setUp_clipped_model(clip_value=0.003, run_clipper_step=False)\n self.assertTrue(\n torch.allclose(self.original_grads_norms, self.clipped_grads_norms)\n )\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.Conv2d", "torch.utils.data.DataLoader", "torch.zeros_like", "torch.nn.Linear", "torch.nn.Conv1d", "torch.allclose", "torch.nn.functional.max_pool2d" ] ]
SubmissionSNIPit/SNIP-it
[ "0358ec4e7d94a08be73d5d7710f9608684efc861" ]
[ "models/trainers/TickTock.py" ]
[ "import torch\nimport torch.nn as nn\n\nfrom models.networks.assisting_layers.GateDecoratorLayers import GatedBatchNorm\nfrom models.trainers.DefaultTrainer import DefaultTrainer\nfrom utils.constants import OPTIMS\nfrom utils.model_utils import find_right_model\n\n\nclass TickTock(DefaultTrainer):\n\n \"\"\"\n Our interpretation/implementation of TickTock schedule of their paper\n Gate Decorator: Global Filter Pruning Method for Accelerating Deep Convolutional Neural Networks\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.ticks_done = 0\n self.tocks_done = 0\n self.lam = 1\n\n def _forward_pass(self,\n *args,\n train=True):\n \"\"\" implementation of a forward pass \"\"\"\n\n if train:\n self._optimizer_gates.zero_grad()\n accuracy, loss, out = super()._forward_pass(*args, train=train)\n _, __, at_tocks = self.get_state()\n if at_tocks:\n accumulated = torch.zeros([1], device=self._device)\n for name, module in self._model.named_modules():\n if isinstance(module, GatedBatchNorm):\n accumulated += self.lam * module.gate.data.sum()\n loss += accumulated\n return accuracy, loss, out\n\n def _epoch_iteration(self,\n *args):\n \"\"\" implementation of an epoch \"\"\"\n\n self._optimizer_gates = find_right_model(OPTIMS, self._arguments.optimizer,\n params=[param for name, param in self._model.named_parameters() if\n \"gate\" in name],\n lr=self._arguments.learning_rate,\n weight_decay=self._arguments.l2_reg)\n\n at_ft, at_ticks, at_tocks = self.get_state()\n\n # noinspection PyArgumentList\n assert torch.BoolTensor([at_ft, at_ticks, at_tocks]).sum() == 1\n\n super()._epoch_iteration(*args)\n\n if at_ticks:\n self.ticks_done += 1\n at_ft, at_ticks, at_tocks = self.get_state()\n if at_ft:\n self.ticks_done, self.tocks_done = -1, -1\n\n if at_tocks:\n self.tocks_done += 1\n if self.tocks_done == 10:\n self.ticks_done, self.tocks_done = 0, 0\n\n def get_state(self):\n at_ticks = self.ticks_done < 10 and self.tocks_done == 0\n at_tocks = self.ticks_done == 10 and self.tocks_done < 10\n at_ft = self._arguments.pruning_limit <= self._model.structural_sparsity\n return at_ft, at_ticks, at_tocks\n\n def _is_pruning_time(self, epoch):\n\n return self.get_state()[1]\n\n def _backward_pass(self, loss):\n \"\"\" implementation of a backward pass \"\"\"\n\n loss.backward()\n self._model.insert_noise_for_gradient(self._arguments.grad_noise)\n if self._arguments.grad_clip > 0:\n torch.nn.utils.clip_grad_norm_(self._model.parameters(), self._arguments.grad_clip)\n at_ft, at_ticks, at_tocks = self.get_state()\n opt = self._optimizer_gates if at_ticks else self._optimizer\n opt.step()\n\n def train(self):\n\n # convert to gated batchnorms\n for name, module in self._model.named_modules():\n if isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d)):\n new_module = GatedBatchNorm(module, device=self._device)\n names = name.split(\".\")\n last_name = names.pop(-1)\n parent = self._model\n for left in names:\n parent = parent._modules[left]\n parent._modules[last_name] = new_module\n\n self._optimizer = find_right_model(OPTIMS, self._arguments.optimizer,\n params=[param for name, param in self._model.named_parameters() if\n \"gate\" not in name],\n lr=self._arguments.learning_rate,\n weight_decay=self._arguments.l2_reg)\n\n self._optimizer_gates = find_right_model(OPTIMS, self._arguments.optimizer,\n params=[param for name, param in self._model.named_parameters() if\n \"gate\" in name],\n lr=self._arguments.learning_rate,\n weight_decay=self._arguments.l2_reg)\n\n super().train()\n" ]
[ [ "torch.BoolTensor", "torch.zeros" ] ]
nv-research-israel/On-Learning-Sets-of-Symmetric-Elements
[ "3a72f3287a66f04388fd713a5546f9496e5ae5ed", "3a72f3287a66f04388fd713a5546f9496e5ae5ed" ]
[ "color_matching_exp/layers.py", "image_selection_exp/layers.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n\nclass SetDropout(nn.Module):\n def __init__(self,p_drop):\n super().__init__()\n self.drop = nn.Dropout2d(p=p_drop)\n\n def forward(self, x):\n b,n,c,h,w = x.size()\n x = x.view(b*n,c,h,w)\n x = self.drop(x)\n x = x.view(b, n, c, h, w)\n return x\n\n\nclass Conv2dSAittala(nn.Module):\n def __init__(self,in_channels, out_channels, kernel_size,padding=1):\n super().__init__()\n self.out_channels = out_channels\n self.conv = nn.Conv2d(in_channels, int(out_channels/2), kernel_size, padding=padding)\n torch.nn.init.xavier_normal_(self.conv.weight)\n self.bn = nn.BatchNorm2d(num_features=out_channels)\n\n def forward(self, x):\n b, n, c, h, w = x.size()\n x = self.conv(x.view(n * b, c, h, w))\n x = x.view(b, n, int(self.out_channels/2), h, w)\n x_max = x.max(dim=1, keepdim=True)[0]\n x = torch.cat([x, x_max.repeat(1,n,1,1,1)],dim=2)\n x = x.view(b * n, self.out_channels, h, w)\n x = self.bn(x)\n x = x.view(b, n, self.out_channels, h, w)\n return x\n\n\nclass Conv2dSridhar(nn.Module):\n def __init__(self,in_channels, out_channels, kernel_size,padding=1):\n super().__init__()\n self.out_channels = out_channels\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding)\n torch.nn.init.xavier_normal_(self.conv.weight)\n self.bn = nn.BatchNorm2d(num_features=out_channels)\n\n def forward(self, x):\n b, n, c, h, w = x.size()\n x = self.conv(x.view(n * b, c, h, w))\n x = x.view(b, n, self.out_channels, h, w)\n x_mean = x.mean(dim=1, keepdim=True)\n x = x - x_mean\n x = x.view(b * n, self.out_channels, h, w)\n x = self.bn(x)\n x = x.view(b, n, self.out_channels, h, w)\n return x\n\n\nclass Conv2dSiamese(nn.Module):\n def __init__(self,in_channels, out_channels, kernel_size,padding=1):\n super().__init__()\n self.out_channels = out_channels\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding)\n torch.nn.init.xavier_normal_(self.conv.weight)\n self.bn = nn.BatchNorm2d(num_features=out_channels)\n\n\n def forward(self, x):\n b,n,c,h,w = x.size()\n x = x.view(b*n,c,h,w)\n x = self.bn(self.conv(x))\n x = x.view(b, n, self.out_channels, h, w)\n return x\n\n\nclass Conv2dDeepSym(nn.Module):\n def __init__(self,in_channels, out_channels, kernel_size,padding=0,use_max=0):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.use_max = use_max\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding)\n self.conv_s = nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding)\n self.bn = nn.BatchNorm2d(num_features=out_channels)\n self.bns = nn.BatchNorm2d(num_features=out_channels)\n torch.nn.init.xavier_normal_(self.conv.weight)\n torch.nn.init.xavier_normal_(self.conv_s.weight)\n\n def forward(self, x):\n b, n, c, h, w = x.size()\n x1 = self.bn(self.conv(x.view(n * b, c, h, w)))\n if self.use_max:\n x2 = self.bns(self.conv_s(torch.max(x, dim=1, keepdim=False)[0]))\n else:\n x2 = self.bns(self.conv_s(torch.sum(x, dim=1, keepdim=False)))\n x2 = x2.view(b, 1, h, w, self.out_channels).repeat(1, n, 1, 1, 1).view(b * n, self.out_channels, h, w)\n x = x1 + x2\n x = x.view(b, n, self.out_channels, h, w)\n return x\n\n\nclass ReluSets(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n b,n,c,h,w = x.size()\n x = x.view(b*n,c,h,w)\n x = F.relu(x)\n x = x.view(b, n, c, h, w)\n return x\n\n\nclass SetMaxPool2d(nn.Module):\n def __init__(self,stride):\n self.stride = stride\n super().__init__()\n\n def forward(self, x):\n b,n,c,h,w = x.size()\n x = x.view(b*n,c,h,w)\n x = F.max_pool2d(x,kernel_size = 2,stride=self.stride)\n x = x.view(b, n, c, int(h/self.stride), int(w/self.stride))\n return x\n\n\nclass SetUpsample(nn.Module):\n def __init__(self,scale_factor):\n self.scale_factor = scale_factor\n super().__init__()\n\n def forward(self, x):\n b, n, c, h, w = x.size()\n x = x.view(b * n, c, h, w)\n x = F.upsample(x, scale_factor=self.scale_factor, mode='bilinear', align_corners=True)\n x = x.view(b, n, c, self.scale_factor*h, self.scale_factor*w)\n return x\n\n\nclass DeepSetsBlock(nn.Module):\n def __init__(self,channels=(256, 128, 1)):\n super(DeepSetsBlock, self).__init__()\n # layers\n self.channels = channels\n self.fc_1 = torch.nn.Linear(in_features=self.channels[0], out_features=self.channels[0])\n self.fc_2 = torch.nn.Linear(in_features=self.channels[0], out_features=self.channels[1])\n self.fc_3 = torch.nn.Linear(in_features=self.channels[1], out_features=self.channels[2])\n self.fc_1s = torch.nn.Linear(in_features=self.channels[0], out_features=self.channels[0])\n self.fc_2s = torch.nn.Linear(in_features=self.channels[0], out_features=self.channels[1])\n self.fc_3s = torch.nn.Linear(in_features=self.channels[1], out_features=self.channels[2])\n\n self.bn1 = torch.nn.BatchNorm1d(channels[0])\n self.bn2 = torch.nn.BatchNorm1d(channels[1])\n self.bn3 = torch.nn.BatchNorm1d(channels[2])\n\n self.bn1s = torch.nn.BatchNorm1d(channels[0])\n self.bn2s = torch.nn.BatchNorm1d(channels[1])\n self.bn3s = torch.nn.BatchNorm1d(channels[2])\n\n # initializations\n torch.nn.init.xavier_normal_(self.fc_1.weight)\n torch.nn.init.xavier_normal_(self.fc_2.weight)\n torch.nn.init.xavier_normal_(self.fc_3.weight)\n torch.nn.init.xavier_normal_(self.fc_1s.weight)\n torch.nn.init.xavier_normal_(self.fc_2s.weight)\n torch.nn.init.xavier_normal_(self.fc_3s.weight)\n\n def forward(self, x):\n b, n, c = x.size()\n x_col = x.view(b * n, c)\n x1 = self.bn1(self.fc_1(x_col))\n x2 = self.fc_1s(torch.max(x, dim=1, keepdim=False)[0])\n x2 = self.bn1s(x2.view(b,1,self.channels[0]).repeat(1,n,1).view(b*n,self.channels[0]))\n x = x1 + x2\n x = F.relu(x)\n x = x.view(b,n,self.channels[0])\n\n x_col = x.view(b * n, self.channels[0])\n x1 = self.bn2(self.fc_2(x_col))\n x2 = self.fc_2s(torch.max(x, dim=1, keepdim=False)[0])\n x2 = self.bn2s(x2.view(b, 1, self.channels[1]).repeat(1, n, 1).view(b * n, self.channels[1]))\n x = x1 + x2\n x = F.relu(x)\n x = x.view(b, n, self.channels[1])\n\n x_col = x.view(b * n, self.channels[1])\n x1 = self.bn3(self.fc_3(x_col))\n x2 = self.fc_3s(torch.max(x, dim=1, keepdim=False)[0])\n x2 = self.bn3s(x2.view(b, 1, self.channels[1]).repeat(1, n, 1).view(b * n, self.channels[2]))\n x = x1 + x2\n x = F.relu(x)\n x = x.view(b, n, self.channels[2])\n return x\n\n\nclass DeepSetsBlockSiamese(nn.Module):\n def __init__(self, channels=(256, 128, 1)):\n super(DeepSetsBlockSiamese, self).__init__()\n use_bn = False\n # layers\n self.channels = channels\n self.fc_1 = torch.nn.Linear(in_features=self.channels[0], out_features=self.channels[0])\n self.fc_2 = torch.nn.Linear(in_features=self.channels[0], out_features=self.channels[1])\n self.fc_3 = torch.nn.Linear(in_features=self.channels[1], out_features=self.channels[2])\n\n self.bn1 = torch.nn.BatchNorm1d(channels[0])\n self.bn2 = torch.nn.BatchNorm1d(channels[1])\n self.bn3 = torch.nn.BatchNorm1d(channels[2])\n\n # initializations\n torch.nn.init.xavier_normal_(self.fc_1.weight)\n torch.nn.init.xavier_normal_(self.fc_2.weight)\n torch.nn.init.xavier_normal_(self.fc_3.weight)\n\n def forward(self, x):\n b, n, c = x.size()\n x_col = x.view(b * n, c)\n x = self.bn1(self.fc_1(x_col))\n x = F.relu(x)\n x = x.view(b, n, self.channels[0])\n\n x_col = x.view(b * n, self.channels[0])\n x = self.bn2(self.fc_2(x_col))\n x = F.relu(x)\n x = x.view(b, n, self.channels[1])\n\n x_col = x.view(b * n, self.channels[1])\n x = self.bn3(self.fc_3(x_col))\n x = F.relu(x)\n x = x.view(b, n, self.channels[2])\n return x\n\n\nclass MLPBlock(nn.Module):\n def __init__(self, channels=(256, 128, 1)):\n super(MLPBlock, self).__init__()\n n = 6\n # layers\n self.channels = channels\n self.fc_1 = torch.nn.Linear(in_features=n*self.channels[0], out_features=n*self.channels[0])\n self.fc_2 = torch.nn.Linear(in_features=n*self.channels[0], out_features=n*self.channels[1])\n self.fc_3 = torch.nn.Linear(in_features=n*self.channels[1], out_features=n*self.channels[2])\n\n self.bn1 = torch.nn.BatchNorm1d(n*channels[0])\n self.bn2 = torch.nn.BatchNorm1d(n*channels[1])\n self.bn3 = torch.nn.BatchNorm1d(n*channels[2])\n\n # initializations\n torch.nn.init.xavier_normal_(self.fc_1.weight)\n torch.nn.init.xavier_normal_(self.fc_2.weight)\n torch.nn.init.xavier_normal_(self.fc_3.weight)\n\n def forward(self, x):\n b, n, c = x.size()\n x = x.view(b, n*c)\n x = self.bn1(self.fc_1(x))\n x = F.relu(x)\n x = self.bn2(self.fc_2(x))\n x = F.relu(x)\n x = self.bn3(self.fc_3(x))\n x = F.relu(x)\n x = x.view(b, n, self.channels[2])\n return x\n", "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\nfrom __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Identity(nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n def forward(self, x):\n return x\n\n\nclass MLP(nn.Module):\n def __init__(self, channels,args):\n super(MLP, self).__init__()\n use_bn = 0\n self.channels = channels\n self.fc_1 = torch.nn.Linear(in_features=self.channels[0], out_features=self.channels[0])\n self.fc_2 = torch.nn.Linear(in_features=self.channels[0], out_features=self.channels[1])\n self.fc_3 = torch.nn.Linear(in_features=self.channels[1], out_features=self.channels[2])\n self.drop1 = torch.nn.Dropout(p=args.drop_rate_mlp)\n self.drop2 = torch.nn.Dropout(p=args.drop_rate_mlp)\n if use_bn:\n self.bn1 = torch.nn.BatchNorm1d(256)\n self.bn2 = torch.nn.BatchNorm1d(128)\n self.bn1s = torch.nn.BatchNorm1d(256)\n self.bn2s = torch.nn.BatchNorm1d(128)\n else:\n self.bn1 = Identity()\n self.bn2 = Identity()\n self.bn1s = Identity()\n self.bn2s = Identity()\n # initializations\n torch.nn.init.xavier_normal_(self.fc_1.weight)\n torch.nn.init.xavier_normal_(self.fc_2.weight)\n torch.nn.init.xavier_normal_(self.fc_3.weight)\n\n\n def forward(self, x):\n x = self.drop1(F.elu(self.bn1(self.fc_1(x))))\n x = self.drop2(F.elu(self.bn2(self.fc_2(x))))\n x = self.fc_3(x)\n return x\n\n\n\nclass ConvBlock(nn.Module):\n def __init__(self, channels,args):\n super(ConvBlock, self).__init__()\n self.channels = channels\n # layers\n self.conv_1 = torch.nn.Conv2d(in_channels = channels[0], out_channels = channels[1], kernel_size = 3)\n self.conv_2 = torch.nn.Conv2d(in_channels = channels[1], out_channels = channels[1], kernel_size = 3)\n self.conv_3 = torch.nn.Conv2d(in_channels=channels[1], out_channels=channels[2], kernel_size=3)\n self.bn1 = torch.nn.BatchNorm2d(num_features=channels[1])\n self.bn2 = torch.nn.BatchNorm2d(num_features=channels[1])\n self.bn3 = torch.nn.BatchNorm2d(num_features=channels[2])\n self.drop1 = torch.nn.Dropout2d(p=args.drop_rate_conv)\n self.drop2 = torch.nn.Dropout2d(p=args.drop_rate_conv)\n self.drop3 = torch.nn.Dropout2d(p=args.drop_rate_conv)\n\n # initializations\n torch.nn.init.xavier_normal_(self.conv_1.weight)\n torch.nn.init.xavier_normal_(self.conv_2.weight)\n torch.nn.init.xavier_normal_(self.conv_3.weight)\n\n def forward(self, x):\n b,n,c,h,w = x.size()\n x = x.view(n*b,c,h,w)\n x = self.drop1(F.elu(self.bn1(self.conv_1(x))))\n x = self.drop2(F.elu(self.bn2(self.conv_2(x))))\n x = self.drop3(F.elu(self.bn3(self.conv_3(x))))\n return x\n\n\nclass DeepSetsBlock(nn.Module):\n def __init__(self,args,channels=(256, 128, 1)):\n super(DeepSetsBlock, self).__init__()\n use_bn = False\n # layers\n self.channels = channels\n self.fc_1 = torch.nn.Linear(in_features=self.channels[0], out_features=self.channels[0])\n self.fc_2 = torch.nn.Linear(in_features=self.channels[0], out_features=self.channels[1])\n self.fc_3 = torch.nn.Linear(in_features=self.channels[1], out_features=self.channels[2])\n self.fc_1s = torch.nn.Linear(in_features=self.channels[0], out_features=self.channels[0])\n self.fc_2s = torch.nn.Linear(in_features=self.channels[0], out_features=self.channels[1])\n self.fc_3s = torch.nn.Linear(in_features=self.channels[1], out_features=self.channels[2])\n self.drop1 = torch.nn.Dropout(p=args.drop_rate_mlp)\n self.drop2 = torch.nn.Dropout(p=args.drop_rate_mlp)\n if use_bn:\n self.bn1 = torch.nn.BatchNorm1d(256)\n self.bn2 = torch.nn.BatchNorm1d(128)\n self.bn1s = torch.nn.BatchNorm1d(256)\n self.bn2s = torch.nn.BatchNorm1d(128)\n else:\n self.bn1 = Identity()\n self.bn2 = Identity()\n self.bn1s = Identity()\n self.bn2s = Identity()\n # initializations\n torch.nn.init.xavier_normal_(self.fc_1.weight)\n torch.nn.init.xavier_normal_(self.fc_2.weight)\n torch.nn.init.xavier_normal_(self.fc_3.weight)\n torch.nn.init.xavier_normal_(self.fc_1s.weight)\n torch.nn.init.xavier_normal_(self.fc_2s.weight)\n torch.nn.init.xavier_normal_(self.fc_3s.weight)\n\n def forward(self, x):\n b, n, c = x.size()\n x_col = x.view(b * n, c)\n x1 = self.bn1(self.fc_1(x_col))\n x2 = self.fc_1s(torch.max(x, dim=1, keepdim=False)[0])\n x2 = self.bn1s(x2.view(b,1,self.channels[0]).repeat(1,n,1).view(b*n,self.channels[0]))\n x = x1 + x2\n x = self.drop1(F.elu(x))\n x = x.view(b,n,self.channels[0])\n\n x_col = x.view(b * n, self.channels[0])\n x1 = self.bn2(self.fc_2(x_col))\n x2 = self.fc_2s(torch.max(x, dim=1, keepdim=False)[0])\n x2 = self.bn2s(x2.view(b, 1, self.channels[1]).repeat(1, n, 1).view(b * n, self.channels[1]))\n x = x1 + x2\n x = self.drop2(F.elu(x))\n x = x.view(b, n, self.channels[1])\n\n x_col = x.view(b * n, self.channels[1])\n x1 = self.fc_3(x_col)\n x2 = self.fc_3s(torch.max(x, dim=1, keepdim=False)[0])\n x2 = x2.view(b, 1, self.channels[2]).repeat(1, n, 1).view(b * n, self.channels[2])\n x = x1 + x2\n x = x.view(b, n,self.channels[2])\n return x\n\n\nclass DeepSymmetricConvBlock(nn.Module):\n def __init__(self, channels,args):\n super(DeepSymmetricConvBlock, self).__init__()\n self.channels = channels\n self.use_max = args.use_max\n # layers\n self.conv_1 = torch.nn.Conv2d(in_channels = channels[0], out_channels = channels[1], kernel_size = 3)\n self.conv_2 = torch.nn.Conv2d(in_channels = channels[1], out_channels = channels[1], kernel_size = 3)\n self.conv_3 = torch.nn.Conv2d(in_channels=channels[1], out_channels=channels[2], kernel_size=3)\n self.bn1 = torch.nn.BatchNorm2d(num_features=channels[1])\n self.bn2 = torch.nn.BatchNorm2d(num_features=channels[1])\n self.bn3 = torch.nn.BatchNorm2d(num_features=channels[2])\n self.conv_1s = torch.nn.Conv2d(in_channels=channels[0], out_channels=channels[1], kernel_size=3)\n self.conv_2s = torch.nn.Conv2d(in_channels=channels[1], out_channels=channels[1], kernel_size=3)\n self.conv_3s = torch.nn.Conv2d(in_channels=channels[1], out_channels=channels[2], kernel_size=3)\n self.bn1s = torch.nn.BatchNorm2d(num_features=channels[1])\n self.bn2s = torch.nn.BatchNorm2d(num_features=channels[1])\n self.bn3s = torch.nn.BatchNorm2d(num_features=channels[2])\n self.drop1 = torch.nn.Dropout2d(p=args.drop_rate_conv)\n self.drop2 = torch.nn.Dropout2d(p=args.drop_rate_conv)\n self.drop3 = torch.nn.Dropout2d(p=args.drop_rate_conv)\n\n # initializations\n torch.nn.init.xavier_normal_(self.conv_1.weight)\n torch.nn.init.xavier_normal_(self.conv_2.weight)\n torch.nn.init.xavier_normal_(self.conv_3.weight)\n torch.nn.init.xavier_normal_(self.conv_1s.weight)\n torch.nn.init.xavier_normal_(self.conv_2s.weight)\n torch.nn.init.xavier_normal_(self.conv_3s.weight)\n\n def forward(self, x):\n b,n,c,h,w = x.size()\n x1 = self.bn1(self.conv_1(x.view(n*b,c,h,w)))\n if self.use_max:\n x2 = self.bn1s(self.conv_1s(torch.max(x, dim=1,keepdim=False)[0]))\n else:\n x2 = self.bn1s(self.conv_1s(torch.sum(x, dim=1,keepdim=False)))\n\n x2 = x2.unsqueeze(1).repeat(1,n,1,1,1).view(b*n,self.channels[1],h-2,w-2) # bug fix\n x = x1 + x2\n x = self.drop1(F.elu(x))\n x = x.view(b, n,self.channels[1],h-2,w-2)\n\n b, n, c, h, w = x.size()\n x1 = self.bn2(self.conv_2(x.view(n * b, c, h, w)))\n if self.use_max:\n x2 = self.bn2s(self.conv_2s(torch.max(x, dim=1, keepdim=False)[0]))\n else:\n x2 = self.bn2s(self.conv_2s(torch.sum(x, dim=1, keepdim=False)))\n\n x2 = x2.unsqueeze(1).repeat(1, n, 1, 1, 1).view(b * n, self.channels[1], h - 2,w - 2)\n x = x1 + x2\n x = self.drop2(F.elu(x))\n x = x.view(b, n,self.channels[1], h - 2, w - 2)\n\n b, n, c, h, w = x.size()\n x1 = self.bn3(self.conv_3(x.view(n * b, c, h, w)))\n if self.use_max:\n x2 = self.bn3s(self.conv_3s(torch.max(x, dim=1, keepdim=False)[0]))\n else:\n x2 = self.bn3s(self.conv_3s(torch.sum(x, dim=1, keepdim=False)))\n\n x2 = x2.unsqueeze(1).repeat(1, n, 1, 1, 1).view(b * n, self.channels[2], h - 2,w - 2)\n x = x1 + x2\n x = self.drop3(F.elu(x))\n return x\n\n\nclass SridharConvBlock(nn.Module):\n def __init__(self, channels,args):\n super(SridharConvBlock, self).__init__()\n self.channels = channels\n # layers\n self.conv_1 = torch.nn.Conv2d(in_channels = channels[0], out_channels = channels[1], kernel_size = 3)\n self.conv_2 = torch.nn.Conv2d(in_channels = channels[1], out_channels = channels[1], kernel_size = 3)\n self.conv_3 = torch.nn.Conv2d(in_channels=channels[1], out_channels=channels[2], kernel_size=3)\n self.bn1 = torch.nn.BatchNorm2d(num_features=channels[1])\n self.bn2 = torch.nn.BatchNorm2d(num_features=channels[1])\n self.bn3 = torch.nn.BatchNorm2d(num_features=channels[2])\n self.drop1 = torch.nn.Dropout2d(p=args.drop_rate_conv)\n self.drop2 = torch.nn.Dropout2d(p=args.drop_rate_conv)\n self.drop3 = torch.nn.Dropout2d(p=args.drop_rate_conv)\n\n # initializations\n torch.nn.init.xavier_normal_(self.conv_1.weight)\n torch.nn.init.xavier_normal_(self.conv_2.weight)\n torch.nn.init.xavier_normal_(self.conv_3.weight)\n\n def forward(self, x):\n b,n,c,h,w = x.size()\n x = self.conv_1(x.view(n*b,c,h,w))\n x = x.view(b, n,self.channels[1],h-2,w-2)\n x_mean = x.mean(dim=1,keepdim=True)\n x = x-x_mean\n x = x.view(b*n,self.channels[1],h-2,w-2)\n x = self.bn1(x)\n x = self.drop1(F.elu(x))\n x = x.view(b,n,self.channels[1],h-2,w-2)\n\n b, n, c, h, w = x.size()\n x = self.conv_2(x.view(n * b, c, h, w))\n x = x.view(b, n, self.channels[1], h - 2, w - 2)\n x_mean = x.mean(dim=1, keepdim=True)\n x = x - x_mean\n x = x.view(b * n, self.channels[1], h - 2, w - 2)\n x = self.bn2(x)\n x = self.drop2(F.elu(x))\n x = x.view(b, n, self.channels[1], h - 2, w - 2)\n\n b, n, c, h, w = x.size()\n x = self.conv_3(x.view(n * b, c, h, w))\n x = x.view(b, n, self.channels[2], h - 2, w - 2)\n x_mean = x.mean(dim=1, keepdim=True)\n x = x - x_mean\n x = x.view(b * n, self.channels[2], h - 2, w - 2)\n x = self.bn3(x)\n x = self.drop3(F.elu(x))\n return x\n\n\nclass AittalaConvBlock(nn.Module):\n def __init__(self, channels,args):\n super(AittalaConvBlock, self).__init__()\n self.channels = channels\n # layers\n self.conv_1 = torch.nn.Conv2d(in_channels = channels[0], out_channels = channels[1], kernel_size = 3)\n self.conv_2 = torch.nn.Conv2d(in_channels = 2*channels[1], out_channels = channels[1], kernel_size = 3)\n self.conv_3 = torch.nn.Conv2d(in_channels= 2*channels[1], out_channels = channels[2], kernel_size=3)\n self.bn1 = torch.nn.BatchNorm2d(num_features=2*channels[1])\n self.bn2 = torch.nn.BatchNorm2d(num_features=2*channels[1])\n self.bn3 = torch.nn.BatchNorm2d(num_features=2*channels[2])\n self.drop1 = torch.nn.Dropout2d(p=args.drop_rate_conv)\n self.drop2 = torch.nn.Dropout2d(p=args.drop_rate_conv)\n self.drop3 = torch.nn.Dropout2d(p=args.drop_rate_conv)\n\n # initializations\n torch.nn.init.xavier_normal_(self.conv_1.weight)\n torch.nn.init.xavier_normal_(self.conv_2.weight)\n torch.nn.init.xavier_normal_(self.conv_3.weight)\n\n def forward(self, x):\n\n b,n,c,h,w = x.size()\n x = self.conv_1(x.view(n*b,c,h,w))\n x = x.view(b, n,self.channels[1],h-2,w-2)\n x_max = x.max(dim=1,keepdim=True)[0]\n x_max = x_max.repeat(1,n,1,1,1)\n x = torch.cat([x, x_max],dim=2)\n x = x.view(b*n,2*self.channels[1],h-2,w-2)\n x = self.bn1(x)\n x = self.drop1(F.elu(x))\n x = x.view(b,n,2*self.channels[1],h-2,w-2)\n\n b, n, c, h, w = x.size()\n x = self.conv_2(x.view(n * b, c, h, w))\n x = x.view(b, n, self.channels[1], h - 2, w - 2)\n x_max = x.max(dim=1, keepdim=True)[0]\n x_max = x_max.repeat(1, n, 1, 1, 1)\n x = torch.cat([x, x_max], dim=2)\n x = x.view(b * n, 2 * self.channels[1], h - 2, w - 2)\n x = self.bn2(x)\n x = self.drop2(F.elu(x))\n x = x.view(b, n, 2 * self.channels[1], h - 2, w - 2)\n\n b, n, c, h, w = x.size()\n x = self.conv_3(x.view(n * b, c, h, w))\n x = x.view(b, n, self.channels[2], h - 2, w - 2)\n x_max = x.max(dim=1, keepdim=True)[0]\n x_max = x_max.repeat(1, n, 1, 1, 1)\n x = torch.cat([x, x_max], dim=2)\n x = x.view(b * n, 2 * self.channels[2], h - 2, w - 2)\n x = self.bn3(x)\n x = self.drop3(F.elu(x))\n return x\n" ]
[ [ "torch.nn.functional.upsample", "torch.nn.BatchNorm1d", "torch.nn.Dropout2d", "torch.max", "torch.nn.Conv2d", "torch.nn.init.xavier_normal_", "torch.sum", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.BatchNorm2d", "torch.nn.functional.max_pool2d" ], [ "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.nn.Dropout2d", "torch.max", "torch.cat", "torch.nn.Conv2d", "torch.nn.init.xavier_normal_", "torch.sum", "torch.nn.Linear", "torch.nn.BatchNorm2d", "torch.nn.functional.elu" ] ]
JorgeSaNel/Predicci-n-de-valores-y-gesti-n-de-cartera-en-el-mercado-burs-til
[ "02660a9d141e8e82533cfc115f0dcc787c8661b3" ]
[ "py/import_data_stock_prediction.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"1_Import_Data_Stock_Prediction.ipynb\n\nAutomatically generated by Colaboratory.\n\nOriginal file is located at\n https://colab.research.google.com/drive/16M9YfgtSjMARmvqPtTB3nMXsUoMhH3wb\n\"\"\"\n\n# Descomentar esta línea si se quiere ejecutar en local\n# !pip install yfinance\n\n# Importando librerías\nimport numpy as np\nimport matplotlib. pyplot as plt\nimport pandas as pd\nimport yfinance as yf\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nimport datetime as dt\nimport seaborn as sns\nimport matplotlib.dates as mdates\n\n\"\"\"## Funciones\"\"\"\n\ndef GetStockDataByTicker(tickerSymbol, start_date, end_date):\n # Descarga de datos\n tickerData = yf.Ticker(tickerSymbol)\n\n # Obtiene el precio histórico de la compañía\n tickerDf = tickerData.history(period='1d', start=start_date, end=end_date, auto_adjust=False)\n tickerDf = tickerDf.dropna().sort_index(ascending=False)\n\n # Se modifica el valor 'Adj Close' teniendo en cuenta los Splits y Dividendos de cada compañía, y se crea el valor 'Adj Open'\n tickerDf = ChangeStockSplit(tickerDf)\n\n # Se calculan las medias de las últimas 20-50-100-200 semanas con este nuevo valor.\n SMA_20 = 20\n SMA_50 = 50\n SMA_100 = 100\n SMA_200 = 200\n SMAs=[SMA_20, SMA_50, SMA_100, SMA_200]\n tickerDf = CalculateSMA(tickerDf, SMAs)\n\n # Se calcula la EMA de los últimos 10 días\n tickerDf = CalculateEMA(tickerDf, 10)\n\n return tickerDf\n\ndef GetStockInformationByTicker(tickerSymbol, start_date, end_date):\n tickerDf = GetStockDataByTicker(tickerSymbol, start_date, end_date)\n\n companyInformation = pd.DataFrame(columns=['Ticker', 'Short Name', 'Sector', 'Industry', 'Recommendation', 'Recommendation Mean', 'Target Low Price', 'Target High Price', 'Target Mean Price', 'Current Price', '%', 'Volume', 'Average Volume', 'EBITDA', 'EBITDA Margins', 'Fecha Actualización', '52 Week Change', 'SMA20', 'SMA50', 'SMA100', 'SMA200'])\n\n #get data on this ticker\n tickerData = yf.Ticker(tickerSymbol)\n companyList = {'Ticker': tickerData.info['symbol'], \n 'Short Name': tickerData.info['shortName'], \n 'Sector': tickerData.info['sector'], \n 'Industry': tickerData.info['industry'], \n 'Recommendation': tickerData.info['recommendationKey'], \n 'Recommendation Mean': tickerData.info['recommendationMean'],\n 'Target Low Price': tickerData.info['targetLowPrice'],\n 'Target High Price': tickerData.info['targetHighPrice'],\n 'Target Mean Price': tickerData.info['targetMeanPrice'], \n 'Current Price': tickerData.info['currentPrice'],\n '%': 0,\n 'Volume': tickerData.info['volume'], \n 'Average Volume': tickerData.info['averageVolume'],\n 'EBITDA': tickerData.info['ebitda'],\n 'EBITDA Margins': tickerData.info['ebitdaMargins'],\n 'Fecha Actualización': datetime.now().strftime('%Y-%m-%d %H:%M'),\n '52 Week Change': tickerData.info['52WeekChange'],\n 'SMA20': round(tickerDf.iloc[-1]['SMA_20'], 2),\n 'SMA50': round(tickerDf.iloc[-1]['SMA_50'], 2),\n 'SMA100': round(tickerDf.iloc[-1]['SMA_100'], 2),\n 'SMA200': round(tickerDf.iloc[-1]['SMA_200'], 2),\n 'EMA10': round(tickerDf.iloc[-1]['EMA_10'], 2)}\n companyInformation = companyInformation.append(companyList, ignore_index=True)\n\n return companyInformation\n\ndef CalculateSMA(data, SMAs):\n data = data.sort_index(ascending=True)\n for i in SMAs:\n data[\"SMA_\" + str(i)] = data['Adj Close'].rolling(window=i).mean()\n \n return data\n\ndef CalculateEMA(data, days):\n data['EMA_' + str(days)] = data['Adj Close'].ewm(span=days, adjust=False).mean()\n\n return data\n\ndef Graph_StockEvolutionByTime(tickerSymbol, stockEvolution, last_days=0):\n if last_days != 0:\n stockEvolution = stockEvolution.tail(last_days)\n \n fig, ax = plt.subplots(figsize=(14,8))\n\n import matplotlib.dates as mdates\n my_year_month_fmt = mdates.DateFormatter('%m-%Y')\n\n ax.plot(stockEvolution['Adj Close'], label='Price')\n \n # SMA Representation\n ax.plot(stockEvolution['SMA_20'], label = '20-days SMA')\n ax.plot(stockEvolution['SMA_50'], label = '50-days SMA')\n ax.plot(stockEvolution['SMA_100'], label = '100-days SMA')\n ax.plot(stockEvolution['SMA_200'], label = '200-days SMA')\n\n # EMA Representation\n ax.plot(stockEvolution['EMA_10'], label = '10-days EMA')\n\n ax.legend(loc='best')\n ax.set_ylabel('Precio en $')\n ax.set_xlabel('Evolución en el Tiempo')\n title = ('Evolución ajustada de ' + tickerSymbol + ' en el tiempo')\n ax.set_title(title)\n ax.xaxis.set_major_formatter(my_year_month_fmt)\n\ndef ChangeStockSplit(adj_TickerDf):\n adj_TickerDf = adj_TickerDf.sort_index(ascending=False)\n new_split = 1\n for i in range(len(adj_TickerDf)-1):\n first_change = False;\n div = adj_TickerDf['Adj Close'].iloc[i+1] / adj_TickerDf['Adj Close'].iloc[i]\n\n # Se calculan los nuevos valores\n adj_TickerDf.loc[adj_TickerDf.index[i], 'Adj Open'] = adj_TickerDf['Open'].iloc[i] / new_split\n adj_TickerDf.loc[adj_TickerDf.index[i], 'Adj Close'] = adj_TickerDf['Adj Close'].iloc[i] / new_split\n\n if (round(div) > 1):\n new_split = round(div)\n \n # Se cambia el valor del último registro ya que no se está recorriendo en el bucle\n adj_TickerDf.loc[adj_TickerDf.index[-1], 'Adj Open'] = adj_TickerDf['Open'].iloc[-1] / new_split\n adj_TickerDf.loc[adj_TickerDf.index[-1], 'Adj Close'] = adj_TickerDf['Close'].iloc[-1] / new_split\n\n return adj_TickerDf\n\ndef Graph_StockEvolutionWithHighLowValues(tickerDf, tickerSymbol):\n price_close = pd.Series(tickerDf['Adj Close'].values, index=tickerDf.index)\n close_ma = price_close.rolling(2).mean()\n close_mstd = price_close.rolling(2).std()\n price_open = pd.Series(tickerDf['Adj Open'].values, index=tickerDf.index)\n open_ma = price_open.rolling(2).mean()\n open_mstd = price_open.rolling(2).std()\n\n fig, axs = plt.subplots(1, 2, figsize=(18,8), sharey=True)\n\n for nn, ax in enumerate(axs):\n prices = [price_close, price_open]\n colors = [\"cornflowerblue\", \"forestgreen\"]\n means = [close_ma, open_ma]\n stds = [close_mstd, open_mstd]\n \n locator = mdates.AutoDateLocator(minticks=3, maxticks=20)\n formatter = mdates.ConciseDateFormatter(locator)\n \n ax.xaxis.set_major_locator(locator)\n ax.xaxis.set_major_formatter(formatter)\n ax.set_xlabel(\"Date\")\n ax.plot(prices[nn].index, prices[nn], colors[nn])\n ax.fill_between(stds[nn].index, means[nn] - 2 * stds[nn], means[nn] + 2 * stds[nn], color=\"grey\", alpha=0.3)\n\n axs[0].set_ylabel(\"Precio de Cierre\")\n axs[1].set_ylabel(\"Precio de Apertura\")\n\n title = (\"Desviación típica de los precios de Apertura y Cierre en el último año para la compañía \" + tickerSymbol)\n fig.suptitle(title)\n\n\"\"\"## Descarga de Datos\"\"\"\n\n# tickerSymbol = 'AAPL'\n\n# # Obtenemos los datos desde el día actual hasta hace 5 años\n# five_years_ago = datetime.now() - relativedelta(years=5)\n# start_date = five_years_ago.strftime('%Y-%m-%d')\n# end_date = datetime.now().strftime('%Y-%m-%d')\n\n# tickerDf = GetStockDataByTicker(tickerSymbol, start_date, end_date)\n\n# GetStockInformationByTicker(tickerSymbol, start_date, end_date)\n\n# \"\"\"## Gráficas de la acción\"\"\"\n\n# Graph_StockEvolutionByTime(tickerSymbol, tickerDf, 180)\n\n# one_year_ago_df = tickerDf.loc[tickerDf.index >= datetime.now() - relativedelta(years=1)]\n# Graph_StockEvolutionWithHighLowValues(one_year_ago_df, tickerSymbol)\n\n# corr = tickerDf.drop(['Dividends', 'Stock Splits'], axis=1)\n# plt.subplots(figsize=(12,8))\n# sns.heatmap(corr.corr(), cmap = 'RdGy', annot=True)" ]
[ [ "matplotlib.dates.DateFormatter", "pandas.Series", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.dates.ConciseDateFormatter", "matplotlib.dates.AutoDateLocator" ] ]
MikeMork/tracepy
[ "42e1e9a43f79b266a22dc07ba76ec215673e427f" ]
[ "tracepy/transforms.py" ]
[ "# Authors: Gavin Niendorf <gavinniendorf@gmail.com>\n#\n# Functions for transforming between reference frames.\n#\n# License: MIT\n\nimport numpy as np\nfrom numpy import cos, sin\n\ndef gen_rot(ang):\n \"\"\"Returns a rotation matrix from 3 rotation angles.\n\n Note\n ----\n np.matrix is deprecated and needs to be changed to a\n normal np.array but I remember difficulties with the\n regular np.array. Needs to be fixed eventually.\n\n Parameters\n ----------\n ang : np.array of length 3\n Euler angles alpha, beta, gamma in the lab frame.\n\n Returns\n -------\n np.matrix((3,3))\n Returns the rotation matrix.\n\n \"\"\"\n\n alpha, beta, gamma = ang\n R_11 = cos(alpha)*cos(gamma)+sin(alpha)*sin(beta)*sin(gamma)\n R_12 = -cos(beta)*sin(gamma)\n R_13 = -sin(alpha)*cos(gamma)+cos(alpha)*sin(beta)*sin(gamma)\n R_21 = cos(alpha)*sin(gamma)-sin(alpha)*sin(beta)*cos(gamma)\n R_22 = cos(beta)*cos(gamma)\n R_23 = -sin(alpha)*sin(gamma)-cos(alpha)*sin(beta)*cos(gamma)\n R_31 = sin(alpha)*cos(beta)\n R_32 = sin(beta)\n R_33 = cos(alpha)*cos(beta)\n R = np.matrix([[R_11, R_12, R_13],\\\n [R_21, R_22, R_23],\\\n [R_31, R_32, R_33]])\n return R\n\ndef transform(R, surface, points, D=None):\n \"\"\"Transforms points into the reference frame of surface.\n\n Note\n ----\n Arrays are flattened before return if they only have one row.\n\n Parameters\n ----------\n R : np.matrix((3,3))\n Rotation matrix for surface.\n surface : geometry object\n Surface whos reference frame to transform into.\n points : 2d np.array\n Point for each row that will be transformed.\n D (optional): 2d np.array\n Direction for each row that will be transformed.\n\n Returns\n -------\n tran_points : 2d np.array\n Points in the transformed reference frame.\n tran_D (optional): 2d np.array\n Directions in the transformed reference frame.\n\n \"\"\"\n\n tran_points = np.array(np.dot(R, (points-surface.P).T).T)\n if D is not None:\n tran_D = np.array(np.dot(R, D.T).T)\n if len(tran_points) == 1:\n tran_points = tran_points.flatten()\n tran_D = tran_D.flatten()\n return tran_points, tran_D\n if len(tran_points) == 1:\n tran_points = tran_points.flatten()\n return tran_points\n\ndef lab_frame(R, surface, points, D=None):\n \"\"\"Transforms points into the lab frame.\n\n Note\n ----\n Arrays are flattened before return if they only have one row.\n\n Parameters\n ----------\n R : np.matrix((3,3))\n Rotation matrix for surface.\n surface : geometry object\n Surface whos reference frame to transform from.\n points : 2d np.array\n Point for each row that will be transformed.\n D (optional): 2d np.array\n Direction for each row that will be transformed.\n\n Returns\n -------\n lab_points : 2d np.array\n Points in the lab reference frame.\n lab_D (optional): 2d np.array\n Directions in the lab reference frame.\n\n \"\"\"\n\n lab_points = np.array(np.dot(R.T, points.T).T)+surface.P\n if D is not None:\n lab_D = np.array(np.dot(R.T, D.T).T)\n if len(lab_points) == 1:\n lab_points = lab_points.flatten()\n lab_D = lab_D.flatten()\n return lab_points, lab_D\n if len(lab_points) == 1:\n lab_points = lab_points.flatten()\n return lab_points\n" ]
[ [ "numpy.matrix", "numpy.dot", "numpy.cos", "numpy.sin" ] ]
jinfenglin/SparkTrace
[ "19cbb1cc9dfccf0746b49c80c9b22e7bf933263a" ]
[ "scripts/query2.py" ]
[ "import math\n\nimport pandas as pd\n\nfrom Preprocessor import Preprocessor\nfrom VSM import VSM\n\n\ndef get_req(query, reqs):\n query = query.lower()\n query_tokens = query.split()\n preprocessor = Preprocessor()\n docs = []\n for x in reqs.values():\n if not isinstance(x, str):\n x = \"\"\n tmp = \" \".join(preprocessor.get_tokens(x))\n docs.append(tmp)\n vsm = VSM(\"en\")\n vsm.build_model(docs)\n scores = []\n for i, id in enumerate(reqs):\n req_tokens = docs[i].split()\n score = vsm._get_doc_similarity(query_tokens, req_tokens)\n scores.append(score)\n df = pd.DataFrame()\n df[\"req_id\"] = reqs.keys()\n df[\"scores\"] = scores\n return df\n\n\nif __name__ == \"__main__\":\n query = {\n \"commu_query\": \"Communication messaging messages hl7 protocol synchronous asynchronous tcp transmit send transmission receive interface memory router exchange transport API\",\n \"archi_query \": \"Platform system database client server distributed SQL SOA\",\n \"security\": \"access author user inform ensure data authenticate secure system malicious prevent incorrect product ar\"\n }\n # links = pd.read_csv(\"vist_req_code_raw.csv\")\n # links = links[links[\"scores\"] > 0]\n # links = links.groupby(\"req_id\")[\"req_id\", \"code_id\", \"scores\"].apply(\n # lambda x: x.nlargest(5, columns=['scores'])).reset_index(\n # drop=True)\n # links.to_csv(\"vet_links.csv\")\n reqs = pd.read_csv(\"package_requirement.csv\")\n\n req_dict = {}\n for id, content in zip(reqs[\"req_id\"], reqs[\"req_content\"]):\n req_dict[id] = content\n links = pd.read_csv(\"vet_links.csv\")\n link_dict = dict()\n for code_id, req_id in zip(links[\"code_id\"], links[\"req_id\"]):\n codes = link_dict.get(req_id, set())\n codes.add(code_id)\n link_dict[req_id] = codes\n\n for q in query:\n res = []\n df = pd.DataFrame()\n req_rel_scores = get_req(query[q], req_dict)\n req_rel_scores = req_rel_scores.sort_values(by=[\"scores\"], ascending=False)\n req_rel_scores.to_csv(q + \"_req_relevance.csv\")\n for id, score in zip(req_rel_scores[\"req_id\"], req_rel_scores[\"scores\"]):\n if score > 0.1:\n if id in link_dict:\n code = link_dict[id]\n res.extend(code)\n df = pd.DataFrame()\n df[\"query_related_code\"] = res\n df.to_csv(q + \"_related_code_files.csv\")\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
panpiort8/MolBERT
[ "b312700733d6f14ba5bf90347adb59b0d99be65d" ]
[ "molbert/apps/base.py" ]
[ "import logging\nimport pprint\nfrom abc import ABC\nfrom argparse import ArgumentParser, Namespace\n\nimport torch\nfrom pytorch_lightning import Trainer, seed_everything\nfrom pytorch_lightning.callbacks import ModelCheckpoint\n\nfrom molbert.apps.args import get_default_parser\nfrom molbert.models.base import MolbertModel\n\nlogging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')\nlogger = logging.getLogger(__name__)\n\n\nclass BaseMolbertApp(ABC):\n @staticmethod\n def load_model_weights(model: MolbertModel, checkpoint_file: str) -> MolbertModel:\n \"\"\"\n PL `load_from_checkpoint` seems to fail to reload model weights. This function loads them manually.\n See: https://github.com/PyTorchLightning/pytorch-lightning/issues/525\n \"\"\"\n logger.info(f'Loading model weights from {checkpoint_file}')\n checkpoint = torch.load(checkpoint_file, map_location=lambda storage, loc: storage)\n\n # load weights from checkpoint, strict=False allows to ignore some weights\n # e.g. weights of a head that was used during pretraining but isn't present during finetuning\n # and also allows to missing keys in the checkpoint, e.g. heads that are used for finetuning\n # but weren't present during pretraining\n model.load_state_dict(checkpoint['state_dict'], strict=False)\n return model\n\n def run(self, args=None):\n args = self.parse_args(args)\n seed_everything(args.seed)\n\n pprint.pprint('args')\n pprint.pprint(args.__dict__)\n pprint.pprint('*********************')\n\n checkpoint_callback = ModelCheckpoint(monitor='valid_loss', verbose=True, save_last=True)\n\n logger.info(args)\n\n lr_logger = LearningRateLogger()\n\n trainer = Trainer(\n default_root_dir=args.default_root_dir,\n progress_bar_refresh_rate=args.progress_bar_refresh_rate,\n min_epochs=args.min_epochs,\n max_epochs=args.max_epochs,\n val_check_interval=args.val_check_interval,\n limit_val_batches=args.limit_val_batches,\n gpus=args.gpus,\n distributed_backend=args.distributed_backend,\n row_log_interval=1,\n amp_level=args.amp_level,\n precision=args.precision,\n num_nodes=args.num_nodes,\n tpu_cores=args.tpu_cores,\n accumulate_grad_batches=args.accumulate_grad_batches,\n checkpoint_callback=checkpoint_callback,\n resume_from_checkpoint=args.resume_from_checkpoint,\n fast_dev_run=args.fast_dev_run,\n callbacks=[lr_logger],\n )\n\n model = self.get_model(args)\n logger.info(f'Start Training model {model}')\n\n logger.info('')\n trainer.fit(model)\n logger.info('Training loop finished.')\n\n return trainer\n\n def parse_args(self, args) -> Namespace:\n \"\"\"\n Parse command line arguments\n \"\"\"\n parser = get_default_parser()\n parser = self.add_parser_arguments(parser)\n return parser.parse_args(args=args)\n\n @staticmethod\n def get_model(args) -> MolbertModel:\n raise NotImplementedError\n\n @staticmethod\n def add_parser_arguments(parser: ArgumentParser) -> ArgumentParser:\n \"\"\"\n Adds model specific options to the default parser\n \"\"\"\n raise NotImplementedError\n" ]
[ [ "torch.load" ] ]
ModelTC/mqbench-paper
[ "8d25a3b63c0cde4d904f77439fc435b49b0b33d4" ]
[ "prototype/utils/misc.py" ]
[ "import os\nimport logging\nimport torch\nimport spring.linklink as link\nfrom collections import defaultdict\nimport numpy as np\ntry:\n from sklearn.metrics import precision_score, recall_score, f1_score\nexcept ImportError:\n print('Import metrics failed!')\n\nfrom .dist import simple_group_split\nimport yaml\nfrom easydict import EasyDict\n\n_logger = None\n_logger_fh = None\n_logger_names = []\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, length=0):\n self.length = length\n self.reset()\n\n def reset(self):\n if self.length > 0:\n self.history = []\n else:\n self.count = 0\n self.sum = 0.0\n self.val = 0.0\n self.avg = 0.0\n\n def reduce_update(self, tensor, num=1):\n link.allreduce(tensor)\n self.update(tensor.item(), num=num)\n\n def update(self, val, num=1):\n if self.length > 0:\n # currently assert num==1 to avoid bad usage, refine when there are some explict requirements\n assert num == 1\n self.history.append(val)\n if len(self.history) > self.length:\n del self.history[0]\n\n self.val = self.history[-1]\n self.avg = np.mean(self.history)\n else:\n self.val = val\n self.sum += val*num\n self.count += num\n self.avg = self.sum / self.count\n\n\ndef makedir(path):\n if link.get_rank() == 0 and not os.path.exists(path):\n os.makedirs(path)\n link.barrier()\n\n\ndef parse_config(config_file):\n with open(config_file) as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n # config = yaml.safe_load(f)\n config = EasyDict(config)\n return config\n\n\nclass RankFilter(logging.Filter):\n def filter(self, record):\n return False\n\n\ndef get_bn(config):\n if config.use_sync_bn:\n group_size = config.kwargs.group_size\n var_mode = config.kwargs.var_mode\n if group_size == 1:\n bn_group = None\n else:\n world_size, rank = link.get_world_size(), link.get_rank()\n assert world_size % group_size == 0\n bn_group = simple_group_split(\n world_size, rank, world_size // group_size)\n\n del config.kwargs['group_size']\n config.kwargs.group = bn_group\n config.kwargs.var_mode = (\n link.syncbnVarMode_t.L1 if var_mode == 'L1' else link.syncbnVarMode_t.L2)\n\n def BNFunc(*args, **kwargs):\n return link.nn.SyncBatchNorm2d(*args, **kwargs, **config.kwargs)\n\n return BNFunc\n else:\n def BNFunc(*args, **kwargs):\n return torch.nn.BatchNorm2d(*args, **kwargs, **config.kwargs)\n return BNFunc\n\n\n\ndef create_logger(log_file, level=logging.INFO):\n global _logger, _logger_fh\n if _logger is None:\n _logger = logging.getLogger()\n formatter = logging.Formatter(\n '[%(asctime)s][%(filename)15s][line:%(lineno)4d][%(levelname)8s] %(message)s')\n fh = logging.FileHandler(log_file)\n fh.setFormatter(formatter)\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n _logger.setLevel(level)\n _logger.addHandler(fh)\n _logger.addHandler(sh)\n _logger_fh = fh\n else:\n _logger.removeHandler(_logger_fh)\n _logger.setLevel(level)\n\n return _logger\n\n\ndef get_logger(name, level=logging.INFO):\n global _logger_names\n logger = logging.getLogger(name)\n # logger.parent = None\n if name in _logger_names:\n return logger\n\n _logger_names.append(name)\n if link.get_rank() > 0:\n logger.addFilter(RankFilter())\n\n return logger\n\n\ndef param_group_all(model, config):\n logger = get_logger(__name__)\n pgroup_normal = []\n pgroup = {'bn_w': [], 'bn_b': [], 'conv_b': [], 'linear_b': []}\n names = {'bn_w': [], 'bn_b': [], 'conv_b': [], 'linear_b': []}\n if 'conv_dw_w' in config:\n pgroup['conv_dw_w'] = []\n names['conv_dw_w'] = []\n if 'conv_dw_b' in config:\n pgroup['conv_dw_b'] = []\n names['conv_dw_b'] = []\n if 'conv_dense_w' in config:\n pgroup['conv_dense_w'] = []\n names['conv_dense_w'] = []\n if 'conv_dense_b' in config:\n pgroup['conv_dense_b'] = []\n names['conv_dense_b'] = []\n if 'linear_w' in config:\n pgroup['linear_w'] = []\n names['linear_w'] = []\n\n names_all = []\n type2num = defaultdict(lambda: 0)\n for name, m in model.named_modules():\n if isinstance(m, torch.nn.Conv2d):\n if m.bias is not None:\n if 'conv_dw_b' in pgroup and m.groups == m.in_channels:\n pgroup['conv_dw_b'].append(m.bias)\n names_all.append(name+'.bias')\n names['conv_dw_b'].append(name+'.bias')\n type2num[m.__class__.__name__+'.bias(dw)'] += 1\n elif 'conv_dense_b' in pgroup and m.groups == 1:\n pgroup['conv_dense_b'].append(m.bias)\n names_all.append(name+'.bias')\n names['conv_dense_b'].append(name+'.bias')\n type2num[m.__class__.__name__+'.bias(dense)'] += 1\n else:\n pgroup['conv_b'].append(m.bias)\n names_all.append(name+'.bias')\n names['conv_b'].append(name+'.bias')\n type2num[m.__class__.__name__+'.bias'] += 1\n if 'conv_dw_w' in pgroup and m.groups == m.in_channels:\n pgroup['conv_dw_w'].append(m.weight)\n names_all.append(name+'.weight')\n names['conv_dw_w'].append(name+'.weight')\n type2num[m.__class__.__name__+'.weight(dw)'] += 1\n elif 'conv_dense_w' in pgroup and m.groups == 1:\n pgroup['conv_dense_w'].append(m.weight)\n names_all.append(name+'.weight')\n names['conv_dense_w'].append(name+'.weight')\n type2num[m.__class__.__name__+'.weight(dense)'] += 1\n\n elif isinstance(m, torch.nn.Linear):\n if m.bias is not None:\n pgroup['linear_b'].append(m.bias)\n names_all.append(name+'.bias')\n names['linear_b'].append(name+'.bias')\n type2num[m.__class__.__name__+'.bias'] += 1\n if 'linear_w' in pgroup:\n pgroup['linear_w'].append(m.weight)\n names_all.append(name+'.weight')\n names['linear_w'].append(name+'.weight')\n type2num[m.__class__.__name__+'.weight'] += 1\n elif (isinstance(m, torch.nn.BatchNorm2d)\n or isinstance(m, torch.nn.BatchNorm1d)\n or isinstance(m, link.nn.SyncBatchNorm2d)):\n if m.weight is not None:\n pgroup['bn_w'].append(m.weight)\n names_all.append(name+'.weight')\n names['bn_w'].append(name+'.weight')\n type2num[m.__class__.__name__+'.weight'] += 1\n if m.bias is not None:\n pgroup['bn_b'].append(m.bias)\n names_all.append(name+'.bias')\n names['bn_b'].append(name+'.bias')\n type2num[m.__class__.__name__+'.bias'] += 1\n\n for name, p in model.named_parameters():\n if name not in names_all:\n pgroup_normal.append(p)\n\n param_groups = [{'params': pgroup_normal}]\n for ptype in pgroup.keys():\n if ptype in config.keys():\n param_groups.append({'params': pgroup[ptype], **config[ptype]})\n else:\n param_groups.append({'params': pgroup[ptype]})\n\n logger.info(ptype)\n for k, v in param_groups[-1].items():\n if k == 'params':\n logger.info(' params: {}'.format(len(v)))\n else:\n logger.info(' {}: {}'.format(k, v))\n\n for ptype, pconf in config.items():\n logger.info('names for {}({}): {}'.format(\n ptype, len(names[ptype]), names[ptype]))\n\n return param_groups, type2num\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\ndef load_state_model(model, state):\n\n logger = get_logger(__name__)\n logger.info('======= loading model state... =======')\n\n model.load_state_dict(state, strict=False)\n\n state_keys = set(state.keys())\n model_keys = set(model.state_dict().keys())\n missing_keys = model_keys - state_keys\n for k in missing_keys:\n logger.warn(f'missing key: {k}')\n\n\ndef load_state_optimizer(optimizer, state):\n\n logger = get_logger(__name__)\n logger.info('======= loading optimizer state... =======')\n\n optimizer.load_state_dict(state)\n\n\ndef modify_state(state, config):\n if hasattr(config, 'key'):\n for key in config['key']:\n if key == 'optimizer':\n state.pop(key)\n elif key == 'last_iter':\n state['last_iter'] = 0\n elif key == 'ema':\n state.pop('ema')\n\n if hasattr(config, 'model'):\n for module in config['model']:\n state['model'].pop(module)\n return state\n\n\ndef mixup_data(x, y, alpha=1.0):\n '''Returns mixed inputs, pairs of targets, and lambda'''\n if alpha > 0:\n lam = np.random.beta(alpha, alpha)\n else:\n lam = 1\n\n batch_size = x.size()[0]\n index = torch.randperm(batch_size).cuda()\n\n mixed_x = lam * x + (1 - lam) * x[index, :]\n y_a, y_b = y, y[index]\n return mixed_x, y_a, y_b, lam\n\n\ndef mix_criterion(criterion, pred, y_a, y_b, lam):\n return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)\n\n\ndef rand_bbox(size, lam):\n W = size[2]\n H = size[3]\n cut_rat = np.sqrt(1. - lam)\n cut_w = np.int(W * cut_rat)\n cut_h = np.int(H * cut_rat)\n\n # uniform\n cx = np.random.randint(W)\n cy = np.random.randint(H)\n\n bbx1 = np.clip(cx - cut_w // 2, 0, W)\n bby1 = np.clip(cy - cut_h // 2, 0, H)\n bbx2 = np.clip(cx + cut_w // 2, 0, W)\n bby2 = np.clip(cy + cut_h // 2, 0, H)\n\n return bbx1, bby1, bbx2, bby2\n\n\ndef cutmix_data(input, target, alpha=0.0):\n lam = np.random.beta(alpha, alpha)\n rand_index = torch.randperm(input.size()[0]).cuda()\n\n target_a = target\n target_b = target[rand_index]\n\n # generate mixed sample\n bbx1, bby1, bbx2, bby2 = rand_bbox(input.size(), lam)\n input[:, :, bbx1:bbx2, bby1:bby2] = input[rand_index, :, bbx1:bbx2, bby1:bby2]\n # adjust lambda to exactly match pixel ratio\n lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (input.size()[-1] * input.size()[-2]))\n return input, target_a, target_b, lam\n" ]
[ [ "numpy.random.beta", "numpy.sqrt", "numpy.clip", "torch.randperm", "numpy.int", "numpy.mean", "torch.nn.BatchNorm2d", "numpy.random.randint" ] ]
ChenjunZou/deep-rl-tensorflow
[ "95d3e2dde77d4a7a393ec418fe3537094d08c2ba" ]
[ "agents/experience.py" ]
[ "\"\"\"Modification of https://github.com/tambetm/simple_dqn/blob/master/src/replay_memory.py\"\"\"\n\nimport random\nimport numpy as np\n\nclass Experience(object):\n def __init__(self, data_format, batch_size, history_length, memory_size, observation_dims):\n self.data_format = data_format\n self.batch_size = batch_size\n self.history_length = history_length\n self.memory_size = memory_size\n\n self.actions = np.empty(self.memory_size, dtype=np.uint8)\n self.rewards = np.empty(self.memory_size, dtype=np.int8)\n self.observations = np.empty([self.memory_size] + observation_dims, dtype=np.uint8)\n self.terminals = np.empty(self.memory_size, dtype=np.bool)\n\n # pre-allocate prestates and poststates for minibatch\n self.prestates = np.empty([self.batch_size, self.history_length] + observation_dims, dtype = np.float16)\n self.poststates = np.empty([self.batch_size, self.history_length] + observation_dims, dtype = np.float16)\n\n self.count = 0\n self.current = 0\n\n def add(self, observation, reward, action, terminal):\n self.actions[self.current] = action\n self.rewards[self.current] = reward\n self.observations[self.current, ...] = observation\n self.terminals[self.current] = terminal\n self.count = max(self.count, self.current + 1)\n self.current = (self.current + 1) % self.memory_size\n\n def sample(self):\n indexes = []\n while len(indexes) < self.batch_size:\n while True:\n index = random.randint(self.history_length, self.count - 1)\n if index >= self.current and index - self.history_length < self.current:\n continue\n if self.terminals[(index - self.history_length):index].any():\n continue\n break\n \n self.prestates[len(indexes), ...] = self.retreive(index - 1)\n self.poststates[len(indexes), ...] = self.retreive(index)\n indexes.append(index)\n\n actions = self.actions[indexes]\n rewards = self.rewards[indexes]\n terminals = self.terminals[indexes]\n\n if self.data_format == 'NHWC' and len(self.prestates.shape) == 4:\n return np.transpose(self.prestates, (0, 2, 3, 1)), actions, \\\n rewards, np.transpose(self.poststates, (0, 2, 3, 1)), terminals\n else:\n return self.prestates, actions, rewards, self.poststates, terminals\n\n def retreive(self, index):\n index = index % self.count\n if index >= self.history_length - 1:\n return self.observations[(index - (self.history_length - 1)):(index + 1), ...]\n else:\n indexes = [(index - i) % self.count for i in reversed(range(self.history_length))]\n return self.observations[indexes, ...]\n" ]
[ [ "numpy.empty", "numpy.transpose" ] ]
ktavabi/autoreject
[ "9571e1f35497bc5484af8f2cfafcc3c91b6b6f18" ]
[ "autoreject/tests/test_utils.py" ]
[ "# Author: Mainak Jas <mainak.jas@telecom-paristech.fr>\n# License: BSD-3-Clause\n\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\nimport pytest\n\nimport mne\nfrom mne.datasets import sample\nfrom mne.bem import _check_origin\nfrom mne import io\n\nfrom autoreject.utils import clean_by_interp, interpolate_bads\nfrom autoreject.utils import _interpolate_bads_eeg\nimport mne.channels.interpolation\n\ndata_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nraw = io.read_raw_fif(raw_fname, preload=False)\nraw.crop(0, 15)\nraw.del_proj()\n\nevoked_fname = data_path + '/MEG/sample/sample_audvis-ave.fif'\nevoked = mne.read_evokeds(evoked_fname, condition='Left Auditory',\n baseline=(None, 0))\n\n\ndef test_utils():\n \"\"\"Test utils.\"\"\"\n event_id = {'Visual/Left': 3}\n tmin, tmax = -0.2, 0.5\n events = mne.find_events(raw)\n picks = mne.pick_channels(raw.info['ch_names'],\n ['MEG 2443', 'MEG 2442', 'MEG 2441'])\n epochs = mne.Epochs(raw, events, event_id, tmin, tmax,\n picks=picks, baseline=(None, 0),\n reject=None, preload=True)\n\n this_epoch = epochs.copy()\n assert this_epoch.info['bads'] == ['MEG 2443']\n epochs_clean = clean_by_interp(this_epoch)\n assert this_epoch.info['bads'] == ['MEG 2443']\n assert_array_equal(this_epoch.get_data(), epochs.get_data())\n pytest.raises(AssertionError, assert_array_equal, epochs_clean.get_data(),\n this_epoch.get_data())\n\n picks_meg = mne.pick_types(evoked.info, meg='grad', eeg=False, exclude=[])\n picks_eeg = mne.pick_types(evoked.info, meg=False, eeg=True, exclude=[])\n picks_bad_meg = mne.pick_channels(evoked.ch_names, include=['MEG 2443'])\n picks_bad_eeg = mne.pick_channels(evoked.ch_names, include=['EEG 053'])\n evoked_orig = evoked.copy()\n for picks, picks_bad in zip([picks_meg, picks_eeg],\n [picks_bad_meg, picks_bad_eeg]):\n evoked_autoreject = interpolate_bads(evoked, picks=picks,\n reset_bads=False)\n evoked.interpolate_bads(reset_bads=False)\n assert_array_equal(evoked.data[picks_bad],\n evoked_autoreject.data[picks_bad])\n pytest.raises(AssertionError, assert_array_equal,\n evoked_orig.data[picks_bad], evoked.data[picks_bad])\n\n # test that autoreject EEG interpolation code behaves the same as MNE\n evoked_ar = evoked_orig.copy()\n evoked_mne = evoked_orig.copy()\n\n origin = _check_origin('auto', evoked_ar.info)\n _interpolate_bads_eeg(evoked_ar, picks=None)\n mne.channels.interpolation._interpolate_bads_eeg(evoked_mne, origin=origin)\n assert_array_almost_equal(evoked_ar.data, evoked_mne.data)\n\n\ndef test_interpolate_bads():\n \"\"\"Test interpolate bads.\"\"\"\n event_id = None\n events = mne.find_events(raw)\n tmin, tmax = -0.2, 0.5\n for ii, ch_name in enumerate(raw.info['ch_names'][:14]):\n raw.set_channel_types({ch_name: 'bio'})\n raw.rename_channels({ch_name: 'BIO%02d' % ii})\n\n picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=False)\n epochs = mne.Epochs(raw, events, event_id, tmin, tmax,\n baseline=(None, 0), decim=10,\n reject=None, preload=True)[:10]\n epochs.info['bads'] = ['MEG 2212']\n interpolate_bads(epochs, picks)\n" ]
[ [ "numpy.testing.assert_array_equal", "numpy.testing.assert_array_almost_equal" ] ]
exile79/my_ml_service
[ "e912cb169b817fd8dfd490e1ffbdc71727a34c29" ]
[ "backend/server/apps/endpoints/views.py" ]
[ "from django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework import viewsets\nfrom rest_framework import mixins\n\nfrom apps.endpoints.models import Endpoint\nfrom apps.endpoints.serializers import EndpointSerializer\n\nfrom apps.endpoints.models import MLAlgorithm\nfrom apps.endpoints.serializers import MLAlgorithmSerializer\n\nfrom apps.endpoints.models import MLAlgorithmStatus\nfrom apps.endpoints.serializers import MLAlgorithmStatusSerializer\n\nfrom apps.endpoints.models import MLRequest\nfrom apps.endpoints.serializers import MLRequestSerializer\n\nimport json\nfrom numpy.random import rand\nfrom rest_framework import views, status\nfrom rest_framework.response import Response\nfrom apps.ml.registry import MLRegistry\nfrom server.wsgi import registry\nfrom django.db import transaction\nfrom apps.endpoints.models import ABTest\nfrom apps.endpoints.serializers import ABTestSerializer\nfrom django.db.models import F\nimport datetime\n\n\nclass EndpointViewSet(\n mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet\n):\n serializer_class = EndpointSerializer\n queryset = Endpoint.objects.all()\n\n\nclass MLAlgorithmViewSet(\n mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet\n):\n serializer_class = MLAlgorithmSerializer\n queryset = MLAlgorithm.objects.all()\n\n\ndef deactivate_other_statuses(instance):\n old_statuses = MLAlgorithmStatus.objects.filter(parent_mlalgorithm=instance.parent_mlalgorithm,\n created_at__lt=instance.created_at,\n active=True)\n for i in range(len(old_statuses)):\n old_statuses[i].active = False\n MLAlgorithmStatus.objects.bulk_update(old_statuses, [\"active\"])\n\n\nclass MLAlgorithmStatusViewSet(\n mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet,\n mixins.CreateModelMixin\n):\n serializer_class = MLAlgorithmStatusSerializer\n queryset = MLAlgorithmStatus.objects.all()\n\n def perform_create(self, serializer):\n try:\n with transaction.atomic():\n instance = serializer.save(active=True)\n # set active=False for other statuses\n deactivate_other_statuses(instance)\n\n except Exception as e:\n raise APIException(str(e))\n\n\nclass MLRequestViewSet(\n mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet,\n mixins.UpdateModelMixin\n):\n serializer_class = MLRequestSerializer\n queryset = MLRequest.objects.all()\n\n\nclass PredictView(views.APIView):\n def post(self, request, endpoint_name, format=None):\n\n algorithm_status = self.request.query_params.get(\n \"status\", \"production\")\n algorithm_version = self.request.query_params.get(\"version\")\n\n algs = MLAlgorithm.objects.filter(\n parent_endpoint__name=endpoint_name, status__status=algorithm_status, status__active=True)\n\n if algorithm_version is not None:\n algs = algs.filter(version=algorithm_version)\n\n if len(algs) == 0:\n return Response(\n {\"status\": \"Error\", \"message\": \"ML algorithm is not available\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n if len(algs) != 1 and algorithm_status != \"ab_testing\":\n return Response(\n {\"status\": \"Error\", \"message\": \"ML algorithm selection is ambiguous. Please specify algorithm version.\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n alg_index = 0\n if algorithm_status == \"ab_testing\":\n alg_index = 0 if rand() < 0.5 else 1\n\n algorithm_object = registry.endpoints[algs[alg_index].id]\n prediction = algorithm_object.compute_prediction(request.data)\n\n label = prediction[\"label\"] if \"label\" in prediction else \"error\"\n ml_request = MLRequest(\n input_data=json.dumps(request.data),\n full_response=prediction,\n response=label,\n feedback=\"\",\n parent_mlalgorithm=algs[alg_index],\n )\n ml_request.save()\n\n prediction[\"request_id\"] = ml_request.id\n\n return Response(prediction)\n\n\nclass ABTestViewSet(\n mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet,\n mixins.CreateModelMixin, mixins.UpdateModelMixin\n):\n serializer_class = ABTestSerializer\n queryset = ABTest.objects.all()\n\n def perform_create(self, serializer):\n try:\n with transaction.atomic():\n instance = serializer.save()\n # update status for first algorithm\n\n status_1 = MLAlgorithmStatus(status=\"ab_testing\",\n created_by=instance.created_by,\n parent_mlalgorithm=instance.parent_mlalgorithm_1,\n active=True)\n status_1.save()\n deactivate_other_statuses(status_1)\n # update status for second algorithm\n status_2 = MLAlgorithmStatus(status=\"ab_testing\",\n created_by=instance.created_by,\n parent_mlalgorithm=instance.parent_mlalgorithm_2,\n active=True)\n status_2.save()\n deactivate_other_statuses(status_2)\n\n except Exception as e:\n raise APIException(str(e))\n\n\nclass StopABTestView(views.APIView):\n def post(self, request, ab_test_id, format=None):\n\n try:\n ab_test = ABTest.objects.get(pk=ab_test_id)\n\n if ab_test.ended_at is not None:\n return Response({\"message\": \"AB Test already finished.\"})\n\n date_now = datetime.datetime.now()\n # alg #1 accuracy\n all_responses_1 = MLRequest.objects.filter(\n parent_mlalgorithm=ab_test.parent_mlalgorithm_1, created_at__gt=ab_test.created_at, created_at__lt=date_now).count()\n correct_responses_1 = MLRequest.objects.filter(\n parent_mlalgorithm=ab_test.parent_mlalgorithm_1, created_at__gt=ab_test.created_at, created_at__lt=date_now, response=F('feedback')).count()\n accuracy_1 = correct_responses_1 / float(all_responses_1)\n print(all_responses_1, correct_responses_1, accuracy_1)\n\n # alg #2 accuracy\n all_responses_2 = MLRequest.objects.filter(\n parent_mlalgorithm=ab_test.parent_mlalgorithm_2, created_at__gt=ab_test.created_at, created_at__lt=date_now).count()\n correct_responses_2 = MLRequest.objects.filter(\n parent_mlalgorithm=ab_test.parent_mlalgorithm_2, created_at__gt=ab_test.created_at, created_at__lt=date_now, response=F('feedback')).count()\n accuracy_2 = correct_responses_2 / float(all_responses_2)\n print(all_responses_2, correct_responses_2, accuracy_2)\n\n # select algorithm with higher accuracy\n alg_id_1, alg_id_2 = ab_test.parent_mlalgorithm_1, ab_test.parent_mlalgorithm_2\n # swap\n if accuracy_1 < accuracy_2:\n alg_id_1, alg_id_2 = alg_id_2, alg_id_1\n\n status_1 = MLAlgorithmStatus(status=\"production\",\n created_by=ab_test.created_by,\n parent_mlalgorithm=alg_id_1,\n active=True)\n status_1.save()\n deactivate_other_statuses(status_1)\n # update status for second algorithm\n status_2 = MLAlgorithmStatus(status=\"testing\",\n created_by=ab_test.created_by,\n parent_mlalgorithm=alg_id_2,\n active=True)\n status_2.save()\n deactivate_other_statuses(status_2)\n\n summary = \"Algorithm #1 accuracy: {}, Algorithm #2 accuracy: {}\".format(\n accuracy_1, accuracy_2)\n ab_test.ended_at = date_now\n ab_test.summary = summary\n ab_test.save()\n\n except Exception as e:\n return Response({\"status\": \"Error\", \"message\": str(e)},\n status=status.HTTP_400_BAD_REQUEST\n )\n return Response({\"message\": \"AB Test finished.\", \"summary\": summary})\n" ]
[ [ "numpy.random.rand" ] ]
mCodingLLC/magnum-bindings
[ "5994150a68a216621582f76ecf394d1b42758abc" ]
[ "src/python/corrade/test/test_containers_numpy.py" ]
[ "#\n# This file is part of Magnum.\n#\n# Copyright © 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019,\n# 2020, 2021 Vladimír Vondruš <mosra@centrum.cz>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n#\n\nimport unittest\n\nfrom corrade import containers\nimport test_stridedarrayview\n\ntry:\n import numpy as np\nexcept ModuleNotFoundError:\n raise unittest.SkipTest(\"numpy not installed\")\n\nclass StridedArrayViewCustomType(unittest.TestCase):\n # short and mutable_int tested in test_containers, as for those memoryview\n # works well... well, for one dimension it does\n\n def test_mutable_vector3d(self):\n a = test_stridedarrayview.MutableContainer3d()\n self.assertEqual(type(a.view), containers.MutableStridedArrayView2D)\n self.assertEqual(a.view.format, 'ddd')\n self.assertEqual(a.list, [\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]\n ])\n a.view[0][1] = [-765.6581, 3.5, 1.125]\n a.view[1][2] = [4.666, 0.25, -7.5]\n self.assertEqual(a.list, [\n [0.0, 0.0, 0.0],\n [-765.6581, 3.5, 1.125],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [4.666, 0.25, -7.5]\n ])\n\n # memoryview ... doesn't understand the type. HAH\n mav = memoryview(a.view[0])\n with self.assertRaisesRegex(NotImplementedError, \"unsupported format ddd\"):\n self.assertEqual(mav[1], [-765.6581, 3.5, 1.125])\n\n # Test that numpy understands the type and has changes reflected\n av = np.array(a.view, copy=False)\n a.view[1][0] = [-3.33, 1.0, 0.0]\n # Converting to a tuple, otherwise numpy always compares to False\n self.assertEqual(tuple(av[1][0]), (-3.33, 1.0, 0.0))\n self.assertEqual(tuple(av[1][1]), (0.0, 0.0, 0.0))\n self.assertEqual(tuple(av[1][2]), (4.666, 0.25, -7.5))\n\n # And the other way around as well\n av[1][1] = (1.0, 0.125, 1.125)\n self.assertEqual(a.list, [\n [0.0, 0.0, 0.0],\n [-765.6581, 3.5, 1.125],\n [0.0, 0.0, 0.0],\n [-3.33, 1.0, 0.0],\n [1.0, 0.125, 1.125],\n [4.666, 0.25, -7.5]\n ])\n\n def test_mutable_long_float(self):\n a = test_stridedarrayview.MutableContainerlf()\n self.assertEqual(type(a.view), containers.MutableStridedArrayView2D)\n self.assertEqual(a.view.format, 'Qf')\n self.assertEqual(a.list, [\n (0, 0.0),\n (0, 0.0),\n (0, 0.0),\n (0, 0.0),\n (0, 0.0),\n (0, 0.0)\n ])\n a.view[0][1] = (7656581356781257, 1.125)\n a.view[1][2] = (4666025, -7.5)\n self.assertEqual(a.list, [\n (0, 0.0),\n (7656581356781257, 1.125),\n (0, 0.0),\n (0, 0.0),\n (0, 0.0),\n (4666025, -7.5)\n ])\n\n # memoryview ... doesn't understand the type. HAH\n mav = memoryview(a.view[0])\n with self.assertRaisesRegex(NotImplementedError, \"unsupported format Qf\"):\n self.assertEqual(mav[1], (7656581356781257, 1.125))\n\n # Test that numpy understands the type and has changes reflected\n av = np.array(a.view, copy=False)\n a.view[1][0] = (333106832, 0.0)\n # Converting to a tuple, otherwise numpy always compares to False\n self.assertEqual(tuple(av[1][0]), (333106832, 0.0))\n self.assertEqual(tuple(av[1][1]), (0, 0.0))\n self.assertEqual(tuple(av[1][2]), (4666025, -7.5))\n\n # And the other way around as well\n av[1][1] = (1001, 1.125)\n self.assertEqual(a.list, [\n (0, 0.0),\n (7656581356781257, 1.125),\n (0, 0.0),\n (333106832, 0.0),\n (1001, 1.125),\n (4666025, -7.5)\n ])\n" ]
[ [ "numpy.array" ] ]
xmuyzz/IVContrast
[ "f3100e54f1808e1a796acd97ef5d23d0a2fd4f6c" ]
[ "src/go_model/old_version/call_backs2.py" ]
[ "#----------------------------------------------------------------------\n# Deep learning for classification for contrast CT;\n# Transfer learning using Google Inception V3;\n#-------------------------------------------------------------------------------------------\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport seaborn as sn\nimport tensorflow as tf\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.callbacks import LearningRateScheduler\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.callbacks import TensorBoard\n\n\n# ----------------------------------------------------------------------------------\n# scheduler\n# ----------------------------------------------------------------------------------\ndef scheduler(epoch, lr):\n if epoch < 10:\n return lr\n else:\n return lr * tf.math.exp(-0.1)\n\n# ----------------------------------------------------------------------------------\n# scheduler\n# ----------------------------------------------------------------------------------\ndef callbacks(log_dir):\n \n check_point = ModelCheckpoint(\n filepath=os.path.join(log_dir, 'model.{epoch:02d}-{val_loss:.2f}.h5'),\n monitor='val_acc',\n verbose=1,\n save_best_model_only=True,\n save_weights_only=False,\n mode='max'\n )\n \n tensor_board = TensorBoard(\n log_dir=log_dir,\n histogram_freq=0,\n write_graph=True,\n write_images=False,\n update_freq='epoch',\n profile_batch=2,\n embeddings_freq=0,\n embeddings_metadata=None\n )\n\n early_stopping = EarlyStopping(\n monitor='val_loss',\n min_delta=0,\n patience=10,\n verbose=0,\n mode='auto',\n baseline=None,\n restore_best_weights=False\n )\n \n my_callbacks = [\n #ModelSave(),\n #early_stopping,\n #LearningRateScheduler(shcheduler),\n #check_point,\n tensor_board\n ]\n\n return my_callbacks\n\n\n \n\n \n" ]
[ [ "tensorflow.keras.callbacks.TensorBoard", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.math.exp" ] ]
baajur/catalyst
[ "a35297ecab8d1a6c2f00b6435ea1d6d37ec9f441" ]
[ "catalyst/core/callbacks/metrics.py" ]
[ "from typing import Any, Callable, Dict, List, Union\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nimport logging\n\nimport torch\n\nfrom catalyst.core import utils\nfrom catalyst.core.callback import Callback, CallbackNode, CallbackOrder\nfrom catalyst.core.runner import IRunner\nfrom catalyst.tools import meters\n\nlogger = logging.getLogger(__name__)\n\n\nclass _MetricCallback(ABC, Callback):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n\n def __init__(\n self,\n prefix: str,\n input_key: Union[str, List[str], Dict[str, str]] = \"targets\",\n output_key: Union[str, List[str], Dict[str, str]] = \"logits\",\n multiplier: float = 1.0,\n **metrics_kwargs,\n ):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n super().__init__(order=CallbackOrder.Metric, node=CallbackNode.All)\n self.prefix = prefix\n # self.metric_fn = partial(metric_fn, **metrics_kwargs)\n self.input_key = input_key\n self.output_key = output_key\n self.multiplier = multiplier\n self.metrics_kwargs = metrics_kwargs\n\n self._get_input = utils.get_dictkey_auto_fn(self.input_key)\n self._get_output = utils.get_dictkey_auto_fn(self.output_key)\n kv_types = (dict, tuple, list, type(None))\n\n is_value_input = (\n isinstance(self.input_key, str) and self.input_key != \"__all__\"\n )\n is_value_output = (\n isinstance(self.output_key, str) and self.output_key != \"__all__\"\n )\n is_kv_input = (\n isinstance(self.input_key, kv_types) or self.input_key == \"__all__\"\n )\n is_kv_output = (\n isinstance(self.output_key, kv_types)\n or self.output_key == \"__all__\"\n )\n\n # @TODO: fix to only KV usage\n if hasattr(self, \"_compute_metric\"):\n pass # overridden in descendants\n elif is_value_input and is_value_output:\n self._compute_metric = self._compute_metric_value\n elif is_kv_input and is_kv_output:\n self._compute_metric = self._compute_metric_key_value\n else:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def metric_fn(self):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n pass\n\n def _compute_metric_value(self, runner: IRunner):\n output = self._get_output(runner.output, self.output_key)\n input = self._get_input(runner.input, self.input_key)\n\n metric = self.metric_fn(output, input, **self.metrics_kwargs)\n return metric\n\n def _compute_metric_key_value(self, runner: IRunner):\n output = self._get_output(runner.output, self.output_key)\n input = self._get_input(runner.input, self.input_key)\n\n metric = self.metric_fn(**output, **input, **self.metrics_kwargs)\n return metric\n\n def on_batch_end(self, runner: IRunner) -> None:\n \"\"\"Computes the metric and add it to batch metrics.\"\"\"\n metric = self._compute_metric(runner) * self.multiplier\n runner.batch_metrics[self.prefix] = metric\n\n\nclass MetricCallback(_MetricCallback):\n \"\"\"A callback that returns single metric on `runner.on_batch_end`.\"\"\"\n\n def __init__(\n self,\n prefix: str,\n metric_fn: Callable,\n input_key: Union[str, List[str], Dict[str, str]] = \"targets\",\n output_key: Union[str, List[str], Dict[str, str]] = \"logits\",\n multiplier: float = 1.0,\n **metric_kwargs,\n ):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n super().__init__(\n prefix=prefix,\n input_key=input_key,\n output_key=output_key,\n multiplier=multiplier,\n **metric_kwargs,\n )\n self.metric = metric_fn\n\n @property\n def metric_fn(self):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n return self.metric\n\n\nclass MultiMetricCallback(MetricCallback):\n \"\"\"A callback that returns multiple metrics on `runner.on_batch_end`.\"\"\"\n\n def __init__(\n self,\n prefix: str,\n metric_fn: Callable,\n list_args: List,\n input_key: Union[str, List[str], Dict[str, str]] = \"targets\",\n output_key: Union[str, List[str], Dict[str, str]] = \"logits\",\n multiplier: float = 1.0,\n **metrics_kwargs,\n ):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n super().__init__(\n prefix=prefix,\n metric_fn=metric_fn,\n input_key=input_key,\n output_key=output_key,\n multiplier=multiplier,\n **metrics_kwargs,\n )\n self.list_args = list_args\n\n def on_batch_end(self, runner: IRunner) -> None:\n \"\"\"Batch end hook.\n\n Args:\n runner (IRunner): current runner\n \"\"\"\n metrics_ = self._compute_metric(runner)\n\n for arg, metric in zip(self.list_args, metrics_):\n if isinstance(arg, int):\n key = f\"{self.prefix}{arg:02}\"\n else:\n key = f\"{self.prefix}_{arg}\"\n runner.batch_metrics[key] = metric * self.multiplier\n\n\nclass MetricAggregationCallback(Callback):\n \"\"\"A callback to aggregate several metrics in one value.\"\"\"\n\n def __init__(\n self,\n prefix: str,\n metrics: Union[str, List[str], Dict[str, float]] = None,\n mode: str = \"mean\",\n multiplier: float = 1.0,\n ) -> None:\n \"\"\"\n Args:\n prefix (str): new key for aggregated metric.\n metrics (Union[str, List[str], Dict[str, float]]): If not None,\n it aggregates only the values from the metric by these keys.\n for ``weighted_sum`` aggregation it must be a Dict[str, float].\n mode (str): function for aggregation.\n Must be either ``sum``, ``mean`` or ``weighted_sum``.\n multiplier (float): scale factor for the aggregated metric.\n \"\"\"\n super().__init__(\n order=CallbackOrder.MetricAggregation, node=CallbackNode.All\n )\n\n if prefix is None or not isinstance(prefix, str):\n raise ValueError(\"prefix must be str\")\n\n if mode in (\"sum\", \"mean\"):\n if metrics is not None and not isinstance(metrics, list):\n raise ValueError(\n \"For `sum` or `mean` mode the metrics must be \"\n \"None or list or str (not dict)\"\n )\n elif mode in (\"weighted_sum\", \"weighted_mean\"):\n if metrics is None or not isinstance(metrics, dict):\n raise ValueError(\n \"For `weighted_sum` or `weighted_mean` mode \"\n \"the metrics must be specified \"\n \"and must be a dict\"\n )\n else:\n raise NotImplementedError(\n \"mode must be `sum`, `mean` \"\n \"or `weighted_sum` or `weighted_mean`\"\n )\n\n if isinstance(metrics, str):\n metrics = [metrics]\n\n self.prefix = prefix\n self.metrics = metrics\n self.mode = mode\n self.multiplier = multiplier\n\n if mode in (\"sum\", \"weighted_sum\", \"weighted_mean\"):\n self.aggregation_fn = (\n lambda x: torch.sum(torch.stack(x)) * multiplier\n )\n if mode == \"weighted_mean\":\n weights_sum = sum(metrics.items())\n self.metrics = {\n key: weight / weights_sum\n for key, weight in metrics.items()\n }\n elif mode == \"mean\":\n self.aggregation_fn = (\n lambda x: torch.mean(torch.stack(x)) * multiplier\n )\n\n def _preprocess(self, metrics: Any) -> List[float]:\n if self.metrics is not None:\n if self.mode == \"weighted_sum\":\n result = [\n metrics[key] * value for key, value in self.metrics.items()\n ]\n else:\n result = [metrics[key] for key in self.metrics]\n else:\n result = list(metrics.values())\n return result\n\n def on_batch_end(self, runner: IRunner) -> None:\n \"\"\"Computes the metric and add it to the metrics.\n\n Args:\n runner (IRunner): current runner\n \"\"\"\n metrics = self._preprocess(runner.batch_metrics)\n metric = self.aggregation_fn(metrics)\n runner.batch_metrics[self.prefix] = metric\n\n\nclass MetricManagerCallback(Callback):\n \"\"\"\n Prepares metrics for logging, transferring values from PyTorch to numpy.\n \"\"\"\n\n def __init__(self):\n \"\"\"@TODO: Docs. Contribution is welcome.\"\"\"\n super().__init__(\n order=CallbackOrder.Logging - 1, node=CallbackNode.All,\n )\n self.meters: Dict[str, meters.AverageValueMeter] = None\n\n @staticmethod\n def _to_single_value(value: Any) -> float:\n if hasattr(value, \"item\"):\n value = value.item()\n\n value = float(value)\n return value\n\n @staticmethod\n def _process_metrics(metrics: Dict[str, Any]):\n output = {}\n for key, value in metrics.items():\n value = utils.get_distributed_mean(value)\n value = MetricManagerCallback._to_single_value(value)\n output[key] = value\n return output\n\n def on_epoch_start(self, runner: IRunner) -> None:\n \"\"\"Epoch start hook.\n\n Args:\n runner (IRunner): current runner\n \"\"\"\n runner.epoch_metrics = defaultdict(None)\n\n def on_loader_start(self, runner: IRunner) -> None:\n \"\"\"Loader start hook.\n\n Args:\n runner (IRunner): current runner\n \"\"\"\n runner.loader_metrics = defaultdict(None)\n self.meters = defaultdict(meters.AverageValueMeter)\n\n def on_loader_end(self, runner: IRunner) -> None:\n \"\"\"Loader end hook.\n\n Args:\n runner (IRunner): current runner\n \"\"\"\n for key, value in self.meters.items():\n value = value.mean\n runner.loader_metrics[key] = value\n for key, value in runner.loader_metrics.items():\n runner.epoch_metrics[f\"{runner.loader_name}_{key}\"] = value\n\n def on_batch_start(self, runner: IRunner) -> None:\n \"\"\"Batch start hook.\n\n Args:\n runner (IRunner): current runner\n \"\"\"\n runner.batch_metrics = defaultdict(None)\n\n def on_batch_end(self, runner: IRunner) -> None:\n \"\"\"Batch end hook.\n\n Args:\n runner (IRunner): current runner\n \"\"\"\n runner.batch_metrics = self._process_metrics(runner.batch_metrics)\n for key, value in runner.batch_metrics.items():\n self.meters[key].add(value, runner.batch_size)\n\n\n__all__ = [\n \"_MetricCallback\",\n \"MetricCallback\",\n \"MultiMetricCallback\",\n \"MetricAggregationCallback\",\n \"MetricManagerCallback\",\n]\n" ]
[ [ "torch.stack" ] ]
SCUT-AILab/CRN_tvqa
[ "0680ed828208ec8c104965438fa0b1cd2010df1f" ]
[ "pythia/scripts/features/extract_resnet152_feat.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\r\nfrom __future__ import (absolute_import, division, print_function,\r\n unicode_literals)\r\n\r\nimport argparse\r\nimport os\r\nfrom glob import glob\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torchvision.models as models\r\nimport torchvision.transforms as transforms\r\nfrom PIL import Image\r\nfrom torch.autograd import Variable\r\n\r\nTARGET_IMAGE_SIZE = [448, 448]\r\nCHANNEL_MEAN = [0.485, 0.456, 0.406]\r\nCHANNEL_STD = [0.229, 0.224, 0.225]\r\ndata_transforms = transforms.Compose(\r\n [\r\n transforms.Resize(TARGET_IMAGE_SIZE),\r\n transforms.ToTensor(),\r\n transforms.Normalize(CHANNEL_MEAN, CHANNEL_STD),\r\n ]\r\n)\r\n\r\nuse_cuda = torch.cuda.is_available()\r\n\r\n# NOTE feat path \"https://download.pytorch.org/models/resnet152-b121ed2d.pth\"\r\nRESNET152_MODEL = models.resnet152(pretrained=True)\r\nRESNET152_MODEL.eval()\r\n\r\nif use_cuda:\r\n RESNET152_MODEL = RESNET152_MODEL.cuda()\r\n\r\n\r\nclass ResNet152FeatModule(nn.Module):\r\n def __init__(self):\r\n super(ResNet152FeatModule, self).__init__()\r\n modules = list(RESNET152_MODEL.children())[:-2]\r\n self.feature_module = nn.Sequential(*modules)\r\n\r\n def forward(self, x):\r\n return self.feature_module(x)\r\n\r\n\r\n_resnet_module = ResNet152FeatModule()\r\nif use_cuda:\r\n _resnet_module = _resnet_module.cuda()\r\n\r\n\r\ndef extract_image_feat(img_file):\r\n img = Image.open(img_file).convert(\"RGB\")\r\n img_transform = data_transforms(img)\r\n # make sure grey scale image is processed correctly\r\n if img_transform.shape[0] == 1:\r\n img_transform = img_transform.expand(3, -1, -1)\r\n img_var = Variable(img_transform.unsqueeze(0))\r\n if use_cuda:\r\n img_var = img_var.cuda()\r\n\r\n img_feat = _resnet_module(img_var)\r\n return img_feat\r\n\r\n\r\ndef get_image_id(image_name):\r\n image_id = int(image_name.split(\".\")[0].split(\"_\")[-1])\r\n return image_id\r\n\r\n\r\ndef extract_dataset_pool5(image_dir, save_dir, total_group, group_id, ext_filter):\r\n image_list = glob(image_dir + \"/*.\" + ext_filter)\r\n if not os.path.exists(save_dir):\r\n os.makedirs(save_dir)\r\n\r\n for n_im, impath in enumerate(image_list):\r\n if (n_im + 1) % 100 == 0:\r\n print(\"processing %d / %d\" % (n_im + 1, len(image_list)))\r\n image_name = os.path.basename(impath)\r\n image_id = get_image_id(image_name)\r\n if image_id % total_group != group_id:\r\n continue\r\n\r\n feat_name = image_name.replace(ext_filter, \"npy\")\r\n save_path = os.path.join(save_dir, feat_name)\r\n tmp_lock = save_path + \".lock\"\r\n\r\n if os.path.exists(save_path) and not os.path.exists(tmp_lock):\r\n continue\r\n if not os.path.exists(tmp_lock):\r\n os.makedirs(tmp_lock)\r\n\r\n # pool5_val = extract_image_feat(impath).permute(0, 2, 3, 1)\r\n try:\r\n pool5_val = extract_image_feat(impath).permute(0, 2, 3, 1)\r\n except:\r\n print(\"error for\" + image_name)\r\n continue\r\n\r\n feat = pool5_val.data.cpu().numpy()\r\n np.save(save_path, feat)\r\n os.rmdir(tmp_lock)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--total_group\", type=int, default=1)\r\n parser.add_argument(\"--group_id\", type=int, default=0)\r\n parser.add_argument(\"--data_dir\", type=str, required=True)\r\n parser.add_argument(\"--out_dir\", type=str, required=True)\r\n parser.add_argument(\"--image_ext\", type=str, default=\"jpg\")\r\n\r\n args = parser.parse_args()\r\n\r\n extract_dataset_pool5(\r\n args.data_dir, args.out_dir, args.total_group, args.group_id, args.image_ext\r\n )\r\n" ]
[ [ "torch.nn.Sequential", "numpy.save", "torch.cuda.is_available" ] ]
IOdevelop/composite
[ "878cc8319d4b988f0205969b41f58235f9926514" ]
[ "onadata/apps/api/tools.py" ]
[ "import os\n\nfrom datetime import datetime\nimport numpy as np\nimport inspect\nimport re\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.files.storage import get_storage_class\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.sites.models import Site\nfrom django.db.models import Q\nfrom django.http import HttpResponseNotFound\nfrom django.http import HttpResponseRedirect\nfrom django.utils.translation import ugettext as _\nfrom django.shortcuts import get_object_or_404\nfrom taggit.forms import TagField\nfrom rest_framework import exceptions\nimport rest_framework.views as rest_framework_views\nfrom registration.models import RegistrationProfile\n\nfrom onadata.apps.api.models.organization_profile import OrganizationProfile\nfrom onadata.apps.api.models.project import Project\nfrom onadata.apps.api.models.project_xform import ProjectXForm\nfrom onadata.apps.api.models.team import Team\nfrom onadata.apps.main.forms import QuickConverter\nfrom onadata.apps.main.models import UserProfile\nfrom onadata.apps.logger.models.xform import XForm\nfrom onadata.apps.viewer.models.parsed_instance import datetime_from_str\nfrom onadata.libs.data.query import get_field_records\nfrom onadata.libs.data.query import get_numeric_fields\nfrom onadata.libs.utils.logger_tools import publish_form\nfrom onadata.libs.utils.logger_tools import response_with_mimetype_and_name\nfrom onadata.libs.utils.user_auth import check_and_set_form_by_id\nfrom onadata.libs.utils.user_auth import check_and_set_form_by_id_string\nfrom onadata.libs.data.statistics import _chk_asarray\nfrom onadata.libs.permissions import get_object_users_with_permissions,\\\n get_role_in_org\nfrom onadata.libs.permissions import OwnerRole, ReadOnlyRole\nfrom onadata.libs.permissions import ROLES\n\nDECIMAL_PRECISION = 2\n\n\ndef _get_first_last_names(name):\n name_split = name.split()\n first_name = name_split[0]\n last_name = u''\n if len(name_split) > 1:\n last_name = u' '.join(name_split[1:])\n return first_name, last_name\n\n\ndef _get_id_for_type(record, mongo_field):\n date_field = datetime_from_str(record[mongo_field])\n mongo_str = '$' + mongo_field\n\n return {\"$substr\": [mongo_str, 0, 10]} if isinstance(date_field, datetime)\\\n else mongo_str\n\n\ndef get_accessible_forms(owner=None, shared_form=False, shared_data=False):\n xforms = XForm.objects.filter()\n\n if shared_form and not shared_data:\n xforms = xforms.filter(shared=True)\n elif (shared_form and shared_data) or \\\n (owner == 'public' and not shared_form and not shared_data):\n xforms = xforms.filter(Q(shared=True) | Q(shared_data=True))\n elif not shared_form and shared_data:\n xforms = xforms.filter(shared_data=True)\n\n if owner != 'public':\n xforms = xforms.filter(user__username=owner)\n\n return xforms.distinct()\n\n\ndef create_organization(name, creator):\n \"\"\"\n Organization created by a user\n - create a team, OwnerTeam with full permissions to the creator\n - Team(name='Owners', organization=organization).save()\n\n \"\"\"\n organization = User.objects.create(username=name)\n organization_profile = OrganizationProfile.objects.create(\n user=organization, creator=creator)\n return organization_profile\n\n\ndef create_organization_object(org_name, creator, attrs={}):\n '''Creates an OrganizationProfile object without saving to the database'''\n name = attrs.get('name', org_name)\n first_name, last_name = _get_first_last_names(name)\n email = attrs.get('email', u'')\n new_user = User(username=org_name, first_name=first_name,\n last_name=last_name, email=email, is_active=True)\n new_user.save()\n registration_profile = RegistrationProfile.objects.create_profile(new_user)\n if email:\n site = Site.objects.get(pk=settings.SITE_ID)\n registration_profile.send_activation_email(site)\n profile = OrganizationProfile(\n user=new_user, name=name, creator=creator,\n created_by=creator,\n city=attrs.get('city', u''),\n country=attrs.get('country', u''),\n organization=attrs.get('organization', u''),\n # home_page=attrs.get('home_page', u''),\n # twitter=attrs.get('twitter', u'')\n )\n return profile\n\n\ndef create_organization_team(organization, name, permission_names=[]):\n organization = organization.user \\\n if isinstance(organization, OrganizationProfile) else organization\n team = Team.objects.create(organization=organization, name=name)\n content_type = ContentType.objects.get(\n app_label='api', model='organizationprofile')\n if permission_names:\n # get permission objects\n perms = Permission.objects.filter(\n codename__in=permission_names, content_type=content_type)\n if perms:\n team.permissions.add(*tuple(perms))\n return team\n\n\ndef get_organization_members_team(organization):\n \"\"\"Get organization members team\n create members team if it does not exist and add organization owner\n to the members team\"\"\"\n try:\n team = Team.objects.get(\n name=u'%s#%s' % (organization.user.username, 'members'))\n except Team.DoesNotExist:\n team = create_organization_team(organization, 'members')\n add_user_to_team(team, organization.user)\n\n return team\n\n\ndef remove_user_from_organization(organization, user):\n \"\"\"Remove a user from an organization\"\"\"\n team = get_organization_members_team(organization)\n remove_user_from_team(team, user)\n\n\ndef remove_user_from_team(team, user):\n user.groups.remove(team)\n\n\ndef add_user_to_organization(organization, user):\n \"\"\"Add a user to an organization\"\"\"\n team = get_organization_members_team(organization)\n add_user_to_team(team, user)\n\n\ndef add_user_to_team(team, user):\n user.groups.add(team)\n\n\ndef get_organization_members(organization):\n \"\"\"Get members team user queryset\"\"\"\n team = get_organization_members_team(organization)\n\n return team.user_set.all()\n\n\ndef create_organization_project(organization, project_name, created_by):\n \"\"\"Creates a project for a given organization\n :param organization: User organization\n :param project_name\n :param created_by: User with permissions to create projects within the\n organization\n\n :returns: a Project instance\n \"\"\"\n profile = OrganizationProfile.objects.get(user=organization)\n\n if not profile.is_organization_owner(created_by):\n return None\n\n project = Project.objects.create(name=project_name,\n organization=organization,\n created_by=created_by)\n\n return project\n\n\ndef add_team_to_project(team, project):\n \"\"\"Adds a team to a project\n\n :param team:\n :param project:\n\n :returns: True if successful or project has already been added to the team\n \"\"\"\n if isinstance(team, Team) and isinstance(project, Project):\n if not team.projects.filter(pk=project.pk):\n team.projects.add(project)\n return True\n return False\n\n\ndef add_xform_to_project(xform, project, creator):\n \"\"\"Adds an xform to a project\"\"\"\n # remove xform from any previous relation to a project\n xform.projectxform_set.all().delete()\n\n # make new connection\n instance = ProjectXForm.objects.create(\n xform=xform, project=project, created_by=creator)\n instance.save()\n\n # check if the project is a public and make the form public\n if project.shared != xform.shared:\n xform.shared = project.shared\n xform.shared_data = project.shared\n xform.save()\n\n for perm in get_object_users_with_permissions(project):\n user = perm['user']\n\n if user != creator:\n ReadOnlyRole.add(user, xform)\n else:\n OwnerRole.add(user, xform)\n\n return instance\n\n\ndef publish_xlsform(request, user, existing_xform=None):\n '''\n If `existing_xform` is specified, that form will be overwritten with the\n new XLSForm\n '''\n if not request.user.has_perm(\n 'can_add_xform',\n UserProfile.objects.get_or_create(user=user)[0]\n ):\n raise exceptions.PermissionDenied(\n detail=_(u\"User %(user)s has no permission to add xforms to \"\n \"account %(account)s\" % {'user': request.user.username,\n 'account': user.username}))\n if existing_xform and not request.user.has_perm(\n 'change_xform', existing_xform):\n raise exceptions.PermissionDenied(\n detail=_(u\"User %(user)s has no permission to change this \"\n \"form.\" % {'user': request.user.username, })\n )\n\n def set_form():\n form = QuickConverter(request.POST, request.FILES)\n if existing_xform:\n return form.publish(user, existing_xform.id_string)\n else:\n return form.publish(user)\n\n return publish_form(set_form)\n\n\ndef publish_project_xform(request, project):\n def set_form():\n form = QuickConverter(request.POST, request.FILES)\n\n return form.publish(project.organization)\n\n xform = None\n\n if 'formid' in request.data:\n xform = get_object_or_404(XForm, pk=request.data.get('formid'))\n else:\n xform = publish_form(set_form)\n\n if isinstance(xform, XForm):\n add_xform_to_project(xform, project, request.user)\n\n return xform\n\n\ndef mode(a, axis=0):\n \"\"\"\n Adapted from\n https://github.com/scipy/scipy/blob/master/scipy/stats/stats.py#L568\n \"\"\"\n a, axis = _chk_asarray(a, axis)\n scores = np.unique(np.ravel(a)) # get ALL unique values\n testshape = list(a.shape)\n testshape[axis] = 1\n oldmostfreq = np.zeros(testshape)\n oldcounts = np.zeros(testshape)\n for score in scores:\n template = (a == score)\n counts = np.expand_dims(np.sum(template, axis), axis)\n mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)\n oldcounts = np.maximum(counts, oldcounts)\n oldmostfreq = mostfrequent\n return mostfrequent, oldcounts\n\n\ndef get_median_for_field(field, xform):\n return np.median(get_field_records(field, xform))\n\n\ndef get_median_for_numeric_fields_in_form(xform, field=None):\n data = {}\n for field_name in [field] if field else get_numeric_fields(xform):\n median = get_median_for_field(field_name, xform)\n data.update({field_name: median})\n return data\n\n\ndef get_mean_for_field(field, xform):\n return np.mean(get_field_records(field, xform))\n\n\ndef get_mean_for_numeric_fields_in_form(xform, field):\n data = {}\n for field_name in [field] if field else get_numeric_fields(xform):\n mean = get_mean_for_field(field_name, xform)\n data.update({field_name: round(mean, DECIMAL_PRECISION)})\n return data\n\n\ndef get_mode_for_field(field, xform):\n a = np.array(get_field_records(field, xform))\n m, count = mode(a)\n return m\n\n\ndef get_mode_for_numeric_fields_in_form(xform, field=None):\n data = {}\n for field_name in [field] if field else get_numeric_fields(xform):\n mode = get_mode_for_field(field_name, xform)\n data.update({field_name: round(mode, DECIMAL_PRECISION)})\n return data\n\n\ndef get_min_max_range_for_field(field, xform):\n a = np.array(get_field_records(field, xform))\n _max = np.max(a)\n _min = np.min(a)\n _range = _max - _min\n return _min, _max, _range\n\n\ndef get_min_max_range(xform, field=None):\n data = {}\n for field_name in [field] if field else get_numeric_fields(xform):\n _min, _max, _range = get_min_max_range_for_field(field_name, xform)\n data[field_name] = {'max': _max, 'min': _min, 'range': _range}\n return data\n\n\ndef get_all_stats(xform, field=None):\n data = {}\n for field_name in [field] if field else get_numeric_fields(xform):\n _min, _max, _range = get_min_max_range_for_field(field_name, xform)\n mode = get_mode_for_field(field_name, xform)\n mean = get_mean_for_field(field_name, xform)\n median = get_median_for_field(field_name, xform)\n data[field_name] = {\n 'mean': round(mean, DECIMAL_PRECISION),\n 'median': median,\n 'mode': round(mode, DECIMAL_PRECISION),\n 'max': _max,\n 'min': _min,\n 'range': _range\n }\n return data\n\n\ndef get_xform(formid, request, username=None):\n try:\n formid = int(formid)\n except ValueError:\n username = username is None and request.user.username\n xform = check_and_set_form_by_id_string(username, formid, request)\n else:\n xform = check_and_set_form_by_id(int(formid), request)\n\n if not xform:\n raise exceptions.PermissionDenied(_(\n \"You do not have permission to view data from this form.\"))\n\n return xform\n\n\ndef get_user_profile_or_none(username):\n profile = None\n\n try:\n user = User.objects.get(username=username)\n except User.DoesNotExist:\n pass\n else:\n profile, created = UserProfile.objects.get_or_create(user=user)\n\n return profile\n\n\ndef add_tags_to_instance(request, instance):\n class TagForm(forms.Form):\n tags = TagField()\n\n form = TagForm(request.data)\n\n if form.is_valid():\n tags = form.cleaned_data.get('tags', None)\n\n if tags:\n for tag in tags:\n instance.instance.tags.add(tag)\n instance.save()\n\n\ndef get_media_file_response(metadata):\n if metadata.data_file:\n file_path = metadata.data_file.name\n filename, extension = os.path.splitext(file_path.split('/')[-1])\n extension = extension.strip('.')\n dfs = get_storage_class()()\n\n if dfs.exists(file_path):\n response = response_with_mimetype_and_name(\n metadata.data_file_type,\n filename, extension=extension, show_date=False,\n file_path=file_path, full_mime=True)\n\n return response\n else:\n return HttpResponseNotFound()\n else:\n return HttpResponseRedirect(metadata.data_value)\n\n\ndef check_inherit_permission_from_project(xform_id, user):\n if xform_id == 'public':\n return\n\n try:\n int(xform_id)\n except ValueError:\n return\n\n # get the project_xform\n projects_xform = ProjectXForm.objects.filter(xform=xform_id)\n\n if not projects_xform:\n return\n\n # get and compare the project role to the xform role\n project_role = get_role_in_org(user, projects_xform[0].project)\n xform_role = get_role_in_org(user, projects_xform[0].xform)\n\n # if diff set the project role to the xform\n if xform_role != project_role:\n _set_xform_permission(project_role, user, projects_xform[0].xform)\n\n\ndef _set_xform_permission(role, user, xform):\n role_class = ROLES.get(role)\n\n if role_class:\n role_class.add(user, xform)\n\ndef get_view_name(view_cls, suffix=None):\n ''' Override Django REST framework's name for the base API class '''\n # The base API class should inherit directly from APIView. We can't use\n # issubclass() because ViewSets also inherit (indirectly) from APIView.\n try:\n if inspect.getmro(view_cls)[1] is rest_framework_views.APIView:\n return 'KoBo Api' # awkward capitalization for consistency\n except KeyError:\n pass\n return rest_framework_views.get_view_name(view_cls, suffix)\n\ndef get_view_description(view_cls, html=False):\n ''' Replace example.com in Django REST framework's default API description\n with the domain name of the current site '''\n domain = Site.objects.get_current().domain\n description = rest_framework_views.get_view_description(view_cls,\n html)\n # description might not be a plain string: e.g. it could be a SafeText\n # to prevent further HTML escaping\n original_type = type(description)\n description = original_type(re.sub(\n '(https*)://example.com',\n '\\\\1://{}'.format(domain),\n description\n ))\n return description\n" ]
[ [ "numpy.maximum", "numpy.sum", "numpy.min", "numpy.max", "numpy.ravel", "numpy.zeros", "numpy.where" ] ]
JahodaPaul/FIT_CTU
[ "2d96f18c7787ddfe340a15a36da6eea910225461" ]
[ "Thesis - Autonomous Car Chasing/RC_Version/SemanticSegmentation.py" ]
[ "import numpy as np\nimport math\n\nclass SemanticSegmentation:\n def __init__(self):\n self.counter = 0\n self.imageWidth = 0\n self.imageHeight = 0\n\n self.CoordRectangles = [] #y,x\n\n self.lastN = 5\n self.lastNX = []\n self.lastNY = []\n self.exponentialMovingAverageX = 0\n self.exponentialMovingAverageY = 0\n self.alpha = 0.5\n\n self.size_w = 0\n self.size_h = 0\n\n self.bboxInARow = 0\n\n def EuclidianDistance(self,x1,x2,y1,y2):\n return math.sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2))\n\n\n def BresenhamLineSample(self,arr, k):\n if k >= len(arr):\n return arr\n else:\n x0 = 0\n x1 = k - 1\n y0 = 0\n y1 = len(arr) - (k + 1)\n\n dx = x1 - x0\n dy = abs(y1 - y0)\n D = 2 * dy - dx\n y = y0\n res = []\n counter = 0\n\n for x in range(x0, x1 + 1):\n res.append(arr[counter])\n counter += 1\n while D > 0 and (x != x1 or y != y1):\n y = y + (1 if y1 >= y0 else -1)\n counter += 1\n D = D - 2 * dx\n D = D + 2 * dy\n return res\n\n def BresenhamLine(self,x0, y0, x1, y1):\n counter = 0\n if x0 > x1:\n tmpX = x1\n tmpY = y1\n x1 = x0\n x0 = tmpX\n y1 = y0\n y0 = tmpY\n\n coords = []\n dx = x1 - x0\n dy = abs(y1 - y0)\n D = 2 * dy - dx\n y = y0\n\n for x in range(x0, x1 + 1):\n # if counter%3==0:\n coords.append([x, y])\n counter += 1\n while D > 0 and (x!=x1 or y != y1):\n y = y + (1 if y1 >= y0 else -1)\n # if counter % 3 == 0:\n coords.append([x, y])\n counter += 1\n D = D - 2 * dx\n D = D + 2 * dy\n return coords\n\n def LimitAngles(self,angle):\n return min(max(angle,-175),175)\n\n def GetPercentage(self,middleX,xCoord, currentPredictedX, otherSide=False):\n overallDistX = abs(currentPredictedX - middleX)\n distFromMiddle = abs(middleX - xCoord)\n percentage = distFromMiddle / overallDistX\n # percentage = percentage * percentage\n if otherSide:\n percentage = -1 * percentage\n return percentage\n\n def FindClosestRect(self,x,y):\n smallestVal = 10000000\n smallestIndex = 0\n for i in range(len(self.CoordRectangles)):\n if abs(self.CoordRectangles[i][0] - y) + abs(self.CoordRectangles[i][1] - x) < smallestVal:\n smallestVal = abs(self.CoordRectangles[i][0] - y) + abs(self.CoordRectangles[i][1] - x)\n smallestIndex = i\n return smallestIndex\n\n def FindPossibleAngle(self,bbox,maxAngle,drivableIndexes,width,height):\n x_Middle = 0\n y_Middle = 0\n\n n_cols = 10\n n_rows = 10\n self.size_w = width // n_cols\n self.size_h = height // n_rows\n\n rows = height // self.size_h\n cols = width // self.size_w\n if len(self.CoordRectangles) == 0:\n for i in range(rows):\n for j in range(cols):\n self.CoordRectangles.append([i * self.size_h + (self.size_h//2),j * self.size_w + (self.size_w//2)])\n\n\n if len(bbox) != 0:\n # print(bbox)\n # points = [[int(bbox[i, 0]), int(bbox[i, 1])] for i in range(len(bbox))]\n x_Middle = (bbox[0] + bbox[2]) // 2\n y_Middle = int(bbox[3]) # max\n self.lastNX.append(x_Middle)\n self.lastNY.append(y_Middle)\n if len(self.lastNX) > self.lastN:\n self.lastNX = self.lastNX[1:]\n self.lastNY = self.lastNY[1:]\n\n if self.bboxInARow == 0:\n self.bboxInARow = 1\n self.lastNX.append(x_Middle)\n self.lastNY.append(y_Middle)\n if len(self.lastNX) > self.lastN:\n self.lastNX = self.lastNX[1:]\n self.lastNY = self.lastNY[1:]\n\n alpha = self.alpha if len(self.lastNX) > 1 else 1\n self.exponentialMovingAverageX = alpha * x_Middle + (1 - alpha) * self.exponentialMovingAverageX\n self.exponentialMovingAverageY = alpha * y_Middle + (1 - alpha) * self.exponentialMovingAverageY\n\n self.imageHeight = height\n self.imageWidth = width\n\n if self.counter >= 30:\n #Find bounding box\n if len(bbox) != 0:\n x_Middle = (bbox[0] + bbox[2]) // 2\n y_Middle = int(bbox[3]) # max\n else:\n if len(self.lastNX) >= 2:\n self.bboxInARow = 0\n x_Middle = 2 * self.lastNX[-1] - self.lastNX[-2] # simple extrapolation\n y_Middle = 2 * self.lastNY[-1] - self.lastNY[-2] # simple extrapolation\n\n self.exponentialMovingAverageX = self.alpha * x_Middle + (1 - self.alpha) * self.exponentialMovingAverageX\n self.exponentialMovingAverageY = self.alpha * y_Middle + (1 - self.alpha) * self.exponentialMovingAverageY\n\n self.lastNX.append(x_Middle)\n self.lastNY.append(y_Middle)\n if len(self.lastNX) > self.lastN:\n self.lastNX = self.lastNX[1:]\n self.lastNY = self.lastNY[1:]\n x_Middle = self.exponentialMovingAverageX\n y_Middle = self.exponentialMovingAverageY\n y_Middle += self.size_h\n\n closestRectIndex = self.FindClosestRect(x_Middle, y_Middle)\n tmp = closestRectIndex%10\n\n coords = self.BresenhamLine(self.imageWidth // 2, self.imageHeight - 1, self.CoordRectangles[closestRectIndex][1], self.CoordRectangles[closestRectIndex][0])\n coords = self.BresenhamLineSample(coords,8)\n possible = True\n for i in range(len(coords)):\n closestRectIndex = self.FindClosestRect(coords[i][0], coords[i][1])\n if drivableIndexes[closestRectIndex] == 0:\n possible = False\n if possible:\n # Can drive straight\n return maxAngle, drivableIndexes\n else:\n # Need to find another path\n closestRectIndex = self.FindClosestRect(x_Middle, y_Middle)\n line = closestRectIndex//10 #TODO if the number of rectangles changes\n if line == 9:\n return 0, drivableIndexes\n drivability = []\n closeness = []\n\n goodnessScore = []\n mostDrivableIndex = 0\n minn = 0; maxx = 10\n if tmp < 4:\n minn = tmp\n elif tmp > 5:\n maxx = tmp\n for j in range(minn,maxx):\n closestRectIndex = line*10+j\n coords = self.BresenhamLine(self.imageWidth // 2, self.imageHeight - 1,self.CoordRectangles[closestRectIndex][1],self.CoordRectangles[closestRectIndex][0])\n coords = self.BresenhamLineSample(coords, 8)\n\n\n current = 0\n for i in range(len(coords)):\n closestRectIndex = self.FindClosestRect(coords[i][0], coords[i][1])\n if drivableIndexes[closestRectIndex] == 1:\n current += 1\n drivability.append(current)\n closeness.append(self.EuclidianDistance(self.CoordRectangles[closestRectIndex][1],x_Middle,self.CoordRectangles[closestRectIndex][0],y_Middle))\n closeness = np.array(closeness)/float(np.max(closeness))\n closeness = 1.0 - closeness\n for i in range(len(drivability)):\n goodnessScore.append(closeness[i]+float(drivability[i]))\n mostDrivableIndex = line*10 + minn+np.argmax(goodnessScore)\n\n percentage = self.GetPercentage(self.imageWidth//2,self.CoordRectangles[mostDrivableIndex][1],x_Middle)\n # Trick to see if the drivable x coordinate and extrapoled X coordinate are on the same side of the image\n if (self.CoordRectangles[mostDrivableIndex][1] - self.imageWidth//2) * (x_Middle - self.imageWidth//2) < 0:\n percentage = percentage*-1\n return self.LimitAngles(maxAngle*percentage), drivableIndexes\n else:\n self.counter += 1\n return self.LimitAngles(maxAngle), []\n return self.LimitAngles(maxAngle), drivableIndexes" ]
[ [ "numpy.max", "numpy.array", "numpy.argmax" ] ]
piiq/SlicerNotebooks
[ "c02ccab403723aa48fa205a171b54cbdf08ed9a6" ]
[ "SlicerPlayground/playground_utils.py" ]
[ "\"\"\"\nUtility fuctions for Slicer Playground.\n\nThese functions are copies from notebooks where they were created to enable reuse.\n\"\"\"\nimport numpy as np\nimport vtk\nimport slicer\nfrom emoji import UNICODE_EMOJI\n\n\ndef create_np_text_img(text: str, size: tuple = (128, 128),\n font_size: int = 24, emoj_size: int = 64) -> np.ndarray:\n \"\"\"\n Create a numpy text image.\n\n Creates a text-on-background image and returns it as a flat 3D numpy array.\n\n Check font paths when copying this function.\n The font paths should point to actual true-type font files on the disk.\n\n :param text: Input unicode text.\n :type text: str\n :param size: Target image size (optional).\n :type size: tuple\n :param font_size: Font size of the text (optional).\n :type font_size: int\n\n :returns: Flat 3D numpy array containing pixel values.\n :rtype: np.ndarray\n \"\"\"\n from PIL import Image, ImageDraw, ImageFont\n if bool(set(text).intersection(UNICODE_EMOJI)):\n font_path = \"/System/Library/Fonts/Apple Color Emoji.ttc\"\n font = ImageFont.truetype(font_path, emoj_size)\n else:\n font_path = \"/System/Library/Fonts/Microsoft/Arial Black.ttf\"\n font = ImageFont.truetype(font_path, font_size)\n\n text_width, text_height = font.getsize(text)\n\n text_image = Image.new('I', size, \"black\")\n draw = ImageDraw.Draw(text_image)\n draw.text((text_width/2, text_height/2), text, 'white', font)\n return np.asarray(text_image).reshape(*size, 1)\n\n\ndef show_slice_in_slice_view(volumeNode: slicer.vtkMRMLScalarVolumeNode,\n sliceNum: int = 0,\n sliceView: str = 'Red'):\n \"\"\"\n Render a numpy image on slice view.\n\n :param volumeNode: The volume node\n :type volumeNode: vtkMRMLScalarVolumeNode\n :param sliceNum: The number of the slice that we want to show. Optional. Defaults to 0.\n :type sliceNum: int\n :param sliceView: One of default slice views ('Red', 'Green', 'Yellow')\n :type sliceView: str\n \"\"\"\n sliceViewWidget = slicer.app.layoutManager().sliceWidget(sliceView)\n sliceWidgetLogic = sliceViewWidget.sliceLogic()\n sliceWidgetLogic.GetSliceCompositeNode().SetBackgroundVolumeID(volumeNode.GetID())\n sliceWidgetLogic.FitSliceToAll()\n sliceWidgetLogic.SetSliceOffset(sliceNum)\n pass\n\n\ndef fit_slice_view(sliceView: str = 'all'):\n \"\"\"\n Fit slice field of view to data.\n\n :param sliceView: Either one of default slice views ['Red', 'Green', 'Yellow'] or 'all'.\n :type sliceView: str\n \"\"\"\n if sliceView == 'all':\n sliceView = slicer.app.layoutManager().sliceViewNames()\n elif sliceView in ['Red', 'Yellow', 'Green']:\n sliceView = [sliceView]\n for sv in sliceView:\n sliceViewWidget = slicer.app.layoutManager().sliceWidget(sv)\n sliceWidgetLogic = sliceViewWidget.sliceLogic()\n sliceWidgetLogic.FitSliceToAll()\n pass\n\n\ndef log_image_info(volume: slicer.vtkMRMLScalarVolumeNode):\n \"\"\"Log basic image information to console.\"\"\"\n print(f'Volume name: {volume.GetName()}')\n print(f'Origin: {volume.GetOrigin()}')\n print(f'Spacing: {volume.GetSpacing()}')\n print(f'Dimensions: {volume.GetImageData().GetDimensions()}\\n')\n\n\ndef layout_3_volumes(volumeList: list):\n \"\"\"Prepare 3x3 layout with 3 volumes in slice views.\"\"\"\n ORIENTATIONS = [\"Axial\", \"Sagittal\", \"Coronal\"]\n THREE_BY_THREE_SLICES = [['Red', 'Yellow', 'Green'],\n ['Slice4', 'Slice5', 'Slice6'],\n ['Slice7', 'Slice8', 'Slice9']]\n for volumeIndex in range(3):\n inputVolumeNode = slicer.mrmlScene.GetFirstNodeByName(volumeList[volumeIndex])\n for view in THREE_BY_THREE_SLICES[volumeIndex]:\n show_slice_in_slice_view(volumeNode=inputVolumeNode,\n sliceView=view)\n sliceWidgetNode = slicer.app.layoutManager().sliceWidget(view).mrmlSliceNode()\n sliceWidgetNode.SetOrientation(ORIENTATIONS[THREE_BY_THREE_SLICES[volumeIndex].index(view)])\n\n\ndef create_seed_geometry(seedPositions: list, seedSize: int) -> vtk.vtkPolyData:\n \"\"\"\n Create spheres at given positions.\n\n :param seedPositions: A list of lists of seed coordinates [r, a, s]\n :type seedPositions: list\n :param seedSize: The sphere diameter\n :type seedSize: int\n\n :returns: A vtk filter that has vtkPolyData as output\n :rtype: vtkPolyData\n \"\"\"\n seedGeometry = vtk.vtkAppendPolyData()\n for position in seedPositions:\n seed = vtk.vtkSphereSource()\n seed.SetCenter(position)\n seed.SetRadius(seedSize)\n seed.Update()\n seedGeometry.AddInputData(seed.GetOutput())\n seedGeometry.Update()\n return seedGeometry\n\n\ndef rotate_x(geometry: vtk.vtkPolyData,\n angle: int,\n centerPoint: list) -> vtk.vtkTransformPolyDataFilter:\n \"\"\"\n Rotate vtkPolyData axainst X axis.\n\n Generates a transform filter\n and applies rotation to a vtkPolyData input.\n\n :param geometry: The geometry\n :type geometry: vtkPolyData\n :param angle: The angle (degrees)\n :type angle: int\n :param centerPoint: Coordinates of the PolyData center\n :type centerPoint: list\n\n :returns: A vtk filter that has the rotated data as output.\n :rtype: vtkTransformPolyDataFilter\n \"\"\"\n transform = vtk.vtkTransform()\n transform.Translate(centerPoint[0], centerPoint[1], centerPoint[2])\n transform.RotateX(angle)\n transform.Translate(-centerPoint[0], -centerPoint[1], -centerPoint[2])\n transformFilter = vtk.vtkTransformPolyDataFilter()\n transformFilter.SetTransform(transform)\n transformFilter.SetInputConnection(geometry.GetOutputPort())\n transformFilter.Update()\n return transformFilter\n" ]
[ [ "numpy.asarray" ] ]
saalfeldlab/gunpowder-stardist
[ "eb099c7a095d6f5adb0cb3e1626cbb2bd6bf260c" ]
[ "setup.py" ]
[ "import os\nfrom setuptools import find_packages, setup, Extension\nfrom numpy.distutils.misc_util import get_numpy_include_dirs\n\nNAME = \"gpstardist\"\nDESCRIPTION = \"Gunpowder node for stardist computation\"\nURL = \"https://github.com/saalfeldlab/gunpowder-stardist\"\nEMAIL = \"heinrichl@janelia.hhmi.org\"\nAUTHOR = \"Larissa Heinrich\"\nREQUIRES_PYTHON = \">=3.6\"\nVERSION = \"0.1.dev1\"\n\nREQUIRED = [\n \"tensorflow_gpu<1.15\",\n \"numpy\",\n \"stardist\",\n \"gunpowder\"\n]\n\nEXTRAS = {\n 'examples': ['raster_geometry', 'zarr']\n}\n\nDEPENDENCY_LINKS = [\n]\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(here, \"README.md\"), \"r\") as f:\n LONG_DESCRIPTION = \"\\n\" + f.read()\n\nwith open(os.path.join(here, \"ACKNOWLEDGMENTS\"), \"r\") as f:\n LONG_DESCRIPTION += \"\\n\\n\"\n LONG_DESCRIPTION += f.read()\n\n\nEXTENSION = Extension(\n 'gpstardist.lib.stardist3d_custom',\n sources=['gpstardist/lib/stardist3d_custom.cpp', 'gpstardist/lib/stardist3d_custom_impl.cpp'] ,\n extra_compile_args = ['-std=c++11'],\n include_dirs=get_numpy_include_dirs(),\n )\n\nsetup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n author=AUTHOR,\n author_email=EMAIL,\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(),\n ext_modules=[EXTENSION,],\n entry_points={},\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n license=\"BSD-2-Clause\",\n classifiers=[\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n)\n" ]
[ [ "numpy.distutils.misc_util.get_numpy_include_dirs" ] ]
tweigel-dev/CSIKit
[ "6c8037dddca8ed87b8e1f86d0e0255f95c8a86d1" ]
[ "tests/matlab/nexmon/test_nexmon.py" ]
[ "from CSIKit.reader import NEXBeamformReader\n\nimport numpy as np\n\nimport errno\nimport glob\nimport os\nimport scipy.io\n\n\nclass InconsistentOutputError(RuntimeError):\n def __init__(self, arg):\n self.args = arg\n\ndef test_nexmon_matlab_consistency():\n\n example_dir = os.environ[\"NEX_TEST_EXAMPLE_DIR\"]\n mat_dir = os.environ[\"NEX_TEST_MAT_DIR\"]\n\n if not os.path.isdir(example_dir):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), example_dir)\n\n if not os.path.isdir(mat_dir):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), mat_dir)\n\n # Instantiate reader.\n reader = NEXBeamformReader()\n\n # Load all files from data/nexmon.\n example_files = sorted(glob.glob(os.path.join(example_dir, \"*.pcap\")))\n matlab_files = sorted(glob.glob(os.path.join(mat_dir, \"*.mat\")))\n\n test_count, success_count = 0, 0\n\n for pcap_path, mat_path in zip(example_files, matlab_files):\n\n # Check if the accompanying .mat file exists.\n # pcap_file/mat_file are absolute paths.\n # os.path.basename returns the last part of the path including the file extension.\n # We need the filename without extension, so we use the first element in os.path.splitext.\n pcap_filename = os.path.splitext(os.path.basename(pcap_path))[0]\n mat_filename = os.path.splitext(os.path.basename(mat_path))[0]\n\n if pcap_filename != mat_filename:\n print(\"Unknown PCAP file found in examples: {}\".format(pcap_filename))\n print(\"No accompanying MAT file found. Ensure one has been generated for sanity testing.\")\n print(\"Exiting.\")\n exit(1)\n\n mat_dict = scipy.io.loadmat(mat_path)\n mat_csibuff = mat_dict[\"csi_buff\"]\n\n # MATLAB's complex double comes out to an np.complex128.\n # We use complex64.\n if mat_csibuff.dtype == np.complex128:\n mat_csibuff = mat_csibuff.astype(np.complex64)\n\n pcap_csidata = reader.read_file(pcap_path)\n pcap_csiframes = [x.csi_matrix for x in pcap_csidata.frames]\n pcap_csibuff = np.array(pcap_csiframes, dtype=np.complex64)\n\n # The matrices produced by the MATLAB code do not include singular dimensions.\n # CSIKit includes these for consistency between different CSI matrices.\n # For comparison, we'll remove this with np.squeeze.\n\n pcap_csibuff = np.squeeze(pcap_csibuff)\n\n if not np.allclose(mat_csibuff, pcap_csibuff, atol=1e-8):\n raise InconsistentOutputError(\"Stored MATLAB output does not match CSIKit's generated matrices.\")\n\n # if np.allclose(mat_csibuff, pcap_csibuff, atol=1e-8):\n # print(\"Test Successful: {}\".format(pcap_filename))\n # success_count += 1\n # else:\n # print(\"Test Failed: {}\".format(pcap_filename))\n\n test_count += 1\n\n # print(\"Nexmon Tests complete: {}/{} successful.\".format(success_count, test_count))\n" ]
[ [ "numpy.squeeze", "numpy.array", "numpy.allclose" ] ]
knebiolo/emergent
[ "a6edb20c9907d4122d165eecaeeff33782d24a48" ]
[ "scripts/simple_sim.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 17 21:21:42 2021\n\n@author: KNebiolo\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nfrom matplotlib.patches import Polygon\nfrom matplotlib import colors\nfrom matplotlib.collections import PatchCollection\nimport os\nimport numpy as np\nimport time\nfrom emergent import emergent\nimport pandas as pd\n\n# identify project workspace and parameters\ninputWS = r\"D:\\ABM Simulations\\Data\"\noutputWS = r\"D:\\ABM Simulations\\Output\"\nproj_dir = r\"D:\\ABM Simulations\"\nsimName = 'fack'\nn_frames = 3600 # Define length of simulation, each frame is 1 second of model time\nn_agents = 20\nresultsDB = emergent.dBase(outputWS,simName)\ndbDir = os.path.join(outputWS,simName + '.db')\n\n# Create obstacle dataframes\nobsWS = os.path.join(inputWS,'obstacles')\nobs = emergent.obstacles(obsWS)\nlandWS = os.path.join(inputWS,'land')\nland = emergent.obstacles(landWS)\nobs = obs.append(land)\n\n# Create Origins Dataframe\norigins = pd.read_csv(os.path.join(inputWS,'origin.csv'))\norigins = origins[origins.OBJECTID != 5]\nnodes = origins.OBJECTID.values\n\n# Create Destinations Dataframe\ndestinations = pd.read_csv(os.path.join(inputWS,'destination.csv'))\nfuck\n# start up simulation initialization function\nstart_up = emergent.simulation(proj_dir,simName, n_frames,n_agents,obs, land, origins, destinations)\n\nfuck\n\n# create simulation environment\nfig = plt.figure()\nfig.set_size_inches(14,7,True)\nax = fig.add_subplot(111)\npatches = []\nfor i in agents:\n rotPos = i.shapePos()\n polygon = Polygon(rotPos[:,:2], color = 'b', fill = True, closed = True)\n patches.append(polygon)\np = PatchCollection(patches)\ndestL = []\nfor i in agents:\n destL.append(np.ndarray.tolist(i.dest))\ndestL = np.array(destL)\ndel i\n\nax.plot(destL[:,0],destL[:,1], 'gv')\nax.add_collection(p)\nax.add_collection(f)\nax.set_xlim([726678,769259])\nax.set_ylim([4556396,4586487])\ntext99 = ax.text(727000,4557000,'time: ', fontsize = 8)\n\n# run a simulation!\nt0 = time.time()\nani = animation.FuncAnimation(fig, shippy.simulate, init_func = shippy.sim_init, frames = n_frames, interval = 100, fargs = (agents, 1000000000, n_frames)).save(os.path.join(outputWS,simName + '.mp4'))#, writer = writer, dpi = dpi)\nplt.show()\nt1 = time.time()\nprint (\"animation complete in %s seconds, output saved\"%(round(t1 - t0,3)))\n\n" ]
[ [ "matplotlib.collections.PatchCollection", "matplotlib.animation.FuncAnimation", "numpy.ndarray.tolist", "numpy.array", "matplotlib.patches.Polygon", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
dominicpanarello/inky
[ "fa59811bb5a9aad9eac82da3e58b6dc2ead75b8c" ]
[ "library/inky/inky_uc8159.py" ]
[ "\"\"\"Inky e-Ink Display Driver.\"\"\"\nimport time\nimport struct\n\ntry:\n from PIL import Image\nexcept ImportError:\n Image = None\n\nfrom . import eeprom\n\ntry:\n import numpy\nexcept ImportError:\n raise ImportError('This library requires the numpy module\\nInstall with: sudo apt install python-numpy')\n\nBLACK = 0\nWHITE = 1\nGREEN = 2\nBLUE = 3\nRED = 4\nYELLOW = 5\nORANGE = 6\nCLEAN = 7\n\nDESATURATED_PALETTE = [\n [0, 0, 0],\n [255, 255, 255],\n [0, 255, 0],\n [0, 0, 255],\n [255, 0, 0],\n [255, 255, 0],\n [255, 140, 0],\n [255, 255, 255]\n]\n\nSATURATED_PALETTE = [\n [57, 48, 57],\n [255, 255, 255],\n [58, 91, 70],\n [61, 59, 94],\n [156, 72, 75],\n [208, 190, 71],\n [177, 106, 73],\n [255, 255, 255]\n]\n\nRESET_PIN = 27\nBUSY_PIN = 17\nDC_PIN = 22\n\nMOSI_PIN = 10\nSCLK_PIN = 11\nCS0_PIN = 8\n\nUC8159_PSR = 0x00\nUC8159_PWR = 0x01\nUC8159_POF = 0x02\nUC8159_PFS = 0x03\nUC8159_PON = 0x04\nUC8159_BTST = 0x06\nUC8159_DSLP = 0x07\nUC8159_DTM1 = 0x10\nUC8159_DSP = 0x11\nUC8159_DRF = 0x12\nUC8159_IPC = 0x13\nUC8159_PLL = 0x30\nUC8159_TSC = 0x40\nUC8159_TSE = 0x41\nUC8159_TSW = 0x42\nUC8159_TSR = 0x43\nUC8159_CDI = 0x50\nUC8159_LPD = 0x51\nUC8159_TCON = 0x60\nUC8159_TRES = 0x61\nUC8159_DAM = 0x65\nUC8159_REV = 0x70\nUC8159_FLG = 0x71\nUC8159_AMV = 0x80\nUC8159_VV = 0x81\nUC8159_VDCS = 0x82\nUC8159_PWS = 0xE3\nUC8159_TSSET = 0xE5\n\n_SPI_CHUNK_SIZE = 4096\n_SPI_COMMAND = 0\n_SPI_DATA = 1\n\n_RESOLUTION = {\n (600, 448): (600, 448, 0, 0, 0)\n}\n\n\nclass Inky:\n \"\"\"Inky e-Ink Display Driver.\"\"\"\n\n BLACK = 0\n WHITE = 1\n GREEN = 2\n BLUE = 3\n RED = 4\n YELLOW = 5\n ORANGE = 6\n CLEAN = 7\n\n WIDTH = 600\n HEIGHT = 448\n\n def __init__(self, resolution=(600, 448), colour='multi', cs_pin=CS0_PIN, dc_pin=DC_PIN, reset_pin=RESET_PIN, busy_pin=BUSY_PIN, h_flip=False, v_flip=False, spi_bus=None, i2c_bus=None, gpio=None): # noqa: E501\n \"\"\"Initialise an Inky Display.\n\n :param resolution: (width, height) in pixels, default: (600, 448)\n :param colour: one of red, black or yellow, default: black\n :param cs_pin: chip-select pin for SPI communication\n :param dc_pin: data/command pin for SPI communication\n :param reset_pin: device reset pin\n :param busy_pin: device busy/wait pin\n :param h_flip: enable horizontal display flip, default: False\n :param v_flip: enable vertical display flip, default: False\n\n \"\"\"\n self._spi_bus = spi_bus\n self._i2c_bus = i2c_bus\n\n if resolution not in _RESOLUTION.keys():\n raise ValueError('Resolution {}x{} not supported!'.format(*resolution))\n\n self.resolution = resolution\n self.width, self.height = resolution\n self.border_colour = WHITE\n self.cols, self.rows, self.rotation, self.offset_x, self.offset_y = _RESOLUTION[resolution]\n\n if colour not in ('multi'):\n raise ValueError('Colour {} is not supported!'.format(colour))\n\n self.colour = colour\n self.eeprom = eeprom.read_eeprom(i2c_bus=i2c_bus)\n self.lut = colour\n\n # The EEPROM is used to disambiguate the variants of wHAT and pHAT\n # 1 Red pHAT (High-Temp)\n # 2 Yellow wHAT (1_E)\n # 3 Black wHAT (1_E)\n # 4 Black pHAT (Normal)\n # 5 Yellow pHAT (DEP0213YNS75AFICP)\n # 6 Red wHAT (Regular)\n # 7 Red wHAT (High-Temp)\n # 8 Red wHAT (DEPG0420RWS19AF0HP)\n # 10 BW pHAT (ssd1608) (DEPG0213BNS800F13CP)\n # 11 Red pHAT (ssd1608)\n # 12 Yellow pHAT (ssd1608)\n # if self.eeprom is not None:\n # # Only support new-style variants\n # if self.eeprom.display_variant not in (10, 11, 12):\n # raise RuntimeError('This driver is not compatible with your board.')\n # if self.eeprom.width != self.width or self.eeprom.height != self.height:\n # pass\n # # TODO flash correct heights to new EEPROMs\n # # raise ValueError('Supplied width/height do not match Inky: {}x{}'.format(self.eeprom.width, self.eeprom.height))\n\n self.buf = numpy.zeros((self.rows, self.cols), dtype=numpy.uint8)\n\n self.dc_pin = dc_pin\n self.reset_pin = reset_pin\n self.busy_pin = busy_pin\n self.cs_pin = cs_pin\n try:\n self.cs_channel = [8, 7].index(cs_pin)\n except ValueError:\n self.cs_channel = 0\n self.h_flip = h_flip\n self.v_flip = v_flip\n\n self._gpio = gpio\n self._gpio_setup = False\n\n self._luts = None\n\n def _palette_blend(self, saturation, dtype='uint8'):\n saturation = float(saturation)\n palette = []\n for i in range(7):\n rs, gs, bs = [c * saturation for c in SATURATED_PALETTE[i]]\n rd, gd, bd = [c * (1.0 - saturation) for c in DESATURATED_PALETTE[i]]\n if dtype == 'uint8':\n palette += [int(rs + rd), int(gs + gd), int(bs + bd)]\n if dtype == 'uint24':\n palette += [(int(rs + rd) << 16) | (int(gs + gd) << 8) | int(bs + bd)]\n if dtype == 'uint8':\n palette += [255, 255, 255]\n if dtype == 'uint24':\n palette += [0xffffff]\n return palette\n\n def setup(self):\n \"\"\"Set up Inky GPIO and reset display.\"\"\"\n if not self._gpio_setup:\n if self._gpio is None:\n try:\n import RPi.GPIO as GPIO\n self._gpio = GPIO\n except ImportError:\n raise ImportError('This library requires the RPi.GPIO module\\nInstall with: sudo apt install python-rpi.gpio')\n self._gpio.setmode(self._gpio.BCM)\n self._gpio.setwarnings(False)\n self._gpio.setup(self.cs_pin, self._gpio.OUT, initial=self._gpio.HIGH)\n self._gpio.setup(self.dc_pin, self._gpio.OUT, initial=self._gpio.LOW, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.reset_pin, self._gpio.OUT, initial=self._gpio.HIGH, pull_up_down=self._gpio.PUD_OFF)\n self._gpio.setup(self.busy_pin, self._gpio.IN, pull_up_down=self._gpio.PUD_OFF)\n\n if self._spi_bus is None:\n import spidev\n self._spi_bus = spidev.SpiDev()\n\n self._spi_bus.open(0, self.cs_channel)\n self._spi_bus.no_cs = True\n self._spi_bus.max_speed_hz = 3000000\n\n self._gpio_setup = True\n\n self._gpio.output(self.reset_pin, self._gpio.LOW)\n time.sleep(0.1)\n self._gpio.output(self.reset_pin, self._gpio.HIGH)\n time.sleep(0.1)\n\n self._busy_wait()\n\n # Resolution Setting\n # 10bit horizontal followed by a 10bit vertical resolution\n # we'll let struct.pack do the work here and send 16bit values\n # life is too short for manual bit wrangling\n self._send_command(\n UC8159_TRES,\n struct.pack(\">HH\", self.width, self.height))\n\n # Panel Setting\n # 0b11000000 = Resolution select, 0b00 = 640x480, our panel is 0b11 = 600x448\n # 0b00100000 = LUT selection, 0 = ext flash, 1 = registers, we use ext flash\n # 0b00010000 = Ignore\n # 0b00001000 = Gate scan direction, 0 = down, 1 = up (default)\n # 0b00000100 = Source shift direction, 0 = left, 1 = right (default)\n # 0b00000010 = DC-DC converter, 0 = off, 1 = on\n # 0b00000001 = Soft reset, 0 = Reset, 1 = Normal (Default)\n self._send_command(\n UC8159_PSR,\n [\n 0b11101111, # See above for more magic numbers\n 0x08 # display_colours == UC8159_7C\n ]\n )\n\n # Power Settings\n self._send_command(\n UC8159_PWR,\n [\n (0x06 << 3) | # ??? - not documented in UC8159 datasheet\n (0x01 << 2) | # SOURCE_INTERNAL_DC_DC\n (0x01 << 1) | # GATE_INTERNAL_DC_DC\n (0x01), # LV_SOURCE_INTERNAL_DC_DC\n 0x00, # VGx_20V\n 0x23, # UC8159_7C\n 0x23 # UC8159_7C\n ]\n )\n\n # Set the PLL clock frequency to 50Hz\n # 0b11000000 = Ignore\n # 0b00111000 = M\n # 0b00000111 = N\n # PLL = 2MHz * (M / N)\n # PLL = 2MHz * (7 / 4)\n # PLL = 2,800,000 ???\n self._send_command(UC8159_PLL, [0x3C]) # 0b00111100\n\n # Send the TSE register to the display\n self._send_command(UC8159_TSE, [0x00]) # Colour\n\n # VCOM and Data Interval setting\n # 0b11100000 = Vborder control (0b001 = LUTB voltage)\n # 0b00010000 = Data polarity\n # 0b00001111 = Vcom and data interval (0b0111 = 10, default)\n cdi = (self.border_colour << 5) | 0x17\n self._send_command(UC8159_CDI, [cdi]) # 0b00110111\n\n # Gate/Source non-overlap period\n # 0b11110000 = Source to Gate (0b0010 = 12nS, default)\n # 0b00001111 = Gate to Source\n self._send_command(UC8159_TCON, [0x22]) # 0b00100010\n\n # Disable external flash\n self._send_command(UC8159_DAM, [0x00])\n\n # UC8159_7C\n self._send_command(UC8159_PWS, [0xAA])\n\n # Power off sequence\n # 0b00110000 = power off sequence of VDH and VDL, 0b00 = 1 frame (default)\n # All other bits ignored?\n self._send_command(\n UC8159_PFS, [0x00] # PFS_1_FRAME\n )\n\n def _busy_wait(self, timeout=15.0):\n \"\"\"Wait for busy/wait pin.\"\"\"\n t_start = time.time()\n while not self._gpio.input(self.busy_pin):\n time.sleep(0.01)\n if time.time() - t_start >= timeout:\n raise RuntimeError(\"Timeout waiting for busy signal to clear.\")\n\n def _update(self, buf, busy_wait=True):\n \"\"\"Update display.\n\n Dispatches display update to correct driver.\n\n :param buf_a: Black/White pixels\n :param buf_b: Yellow/Red pixels\n\n \"\"\"\n self.setup()\n\n self._send_command(UC8159_DTM1, buf)\n self._busy_wait()\n\n self._send_command(UC8159_PON)\n self._busy_wait()\n\n self._send_command(UC8159_DRF)\n self._busy_wait()\n\n self._send_command(UC8159_POF)\n self._busy_wait()\n\n def set_pixel(self, x, y, v):\n \"\"\"Set a single pixel.\n\n :param x: x position on display\n :param y: y position on display\n :param v: colour to set\n\n \"\"\"\n self.buf[y][x] = v & 0x07\n\n def show(self, busy_wait=True):\n \"\"\"Show buffer on display.\n\n :param busy_wait: If True, wait for display update to finish before returning.\n\n \"\"\"\n region = self.buf\n\n if self.v_flip:\n region = numpy.fliplr(region)\n\n if self.h_flip:\n region = numpy.flipud(region)\n\n if self.rotation:\n region = numpy.rot90(region, self.rotation // 90)\n\n buf = region.flatten()\n\n buf = ((buf[::2] << 4) & 0xF0) | (buf[1::2] & 0x0F)\n\n self._update(buf.astype('uint8').tolist(), busy_wait=busy_wait)\n\n def set_border(self, colour):\n \"\"\"Set the border colour.\"\"\"\n if colour in (BLACK, WHITE, GREEN, BLUE, RED, YELLOW, ORANGE, CLEAN):\n self.border_colour = colour\n\n def set_image(self, image, saturation=0.5):\n \"\"\"Copy an image to the display.\n\n :param image: PIL image to copy, must be 600x448\n :param saturation: Saturation for quantization palette - higher value results in a more saturated image\n\n \"\"\"\n if not image.size == (self.width, self.height):\n raise ValueError(\"Image must be ({}x{}) pixels!\".format(self.width, self.height))\n if not image.mode == \"P\":\n if Image is None:\n raise RuntimeError(\"PIL is required for converting images: sudo apt install python-pil python3-pil\")\n palette = self._palette_blend(saturation)\n # Image size doesn't matter since it's just the palette we're using\n palette_image = Image.new(\"P\", (1, 1))\n # Set our 7 colour palette (+ clear) and zero out the other 247 colours\n palette_image.putpalette(palette + [0, 0, 0] * 248)\n # Force source image data to be loaded for `.im` to work\n image.load()\n image = image.im.convert(\"P\", True, palette_image.im)\n self.buf = numpy.array(image, dtype=numpy.uint8).reshape((self.cols, self.rows))\n\n def _spi_write(self, dc, values):\n \"\"\"Write values over SPI.\n\n :param dc: whether to write as data or command\n :param values: list of values to write\n\n \"\"\"\n self._gpio.output(self.cs_pin, 0)\n self._gpio.output(self.dc_pin, dc)\n\n if type(values) is str:\n values = [ord(c) for c in values]\n\n try:\n self._spi_bus.xfer3(values)\n except AttributeError:\n for x in range(((len(values) - 1) // _SPI_CHUNK_SIZE) + 1):\n offset = x * _SPI_CHUNK_SIZE\n self._spi_bus.xfer(values[offset:offset + _SPI_CHUNK_SIZE])\n self._gpio.output(self.cs_pin, 1)\n\n def _send_command(self, command, data=None):\n \"\"\"Send command over SPI.\n\n :param command: command byte\n :param data: optional list of values\n\n \"\"\"\n self._spi_write(_SPI_COMMAND, [command])\n if data is not None:\n self._send_data(data)\n\n def _send_data(self, data):\n \"\"\"Send data over SPI.\n\n :param data: list of values\n\n \"\"\"\n if isinstance(data, int):\n data = [data]\n self._spi_write(_SPI_DATA, data)\n" ]
[ [ "numpy.rot90", "numpy.fliplr", "numpy.flipud", "numpy.array", "numpy.zeros" ] ]
BeaAnn/pyfoamsetup
[ "5908aea41bfbde3fad7d6957feab58a50d58e262" ]
[ "pyfoamsetup/PropellerSimulation/PropellerSimulation.py" ]
[ "import numpy as np\nimport os\nimport subprocess\nimport shutil\nimport multiprocessing\nimport sys\nimport copy\nfrom collections import OrderedDict\n\nfrom pyfoamsetup.coreLibrary import *\nimport pyfoamsetup.coreLibrary.CaseSetup as CaseSetup\n\nclass PropellerSimulation(CaseSetup.CaseSetup):\n\tdef __init__(self, runName, c, D, Re, J, fluid='air', rotationAxis='right', pushPull='push'):\n\t\t# Default environment settings\n\t\tif fluid == 'air':\n\t\t\trho = 1.226\n\t\t\tnu = 1.45e-05\n\t\telif fluid == 'water':\n\t\t\trho = 1000\n\t\t\tnu = 1.19e-6\n\n\t\tself.D = D\n\t\tself.r = D/2\n\t\tself.J = J\n\t\tself.n = Re*nu/(np.sqrt((J*D)**2 + (0.7*self.r*2*np.pi)**2)*c)\n\t\tself.omega = 2*np.pi*self.n\n\t\tself.U_r = 0.7*(D/2)*self.omega\n\t\t\n\t\tU = self.J*self.n*self.D\n\t\tA = np.pi*(D/2)**2\n\n\t\tself.rotationAxis = rotationAxis\n\n\t\tpatchList = ['propeller']\n\n\t\t# Call init from base class\n\t\tself.homePath = os.path.dirname(os.path.realpath(__file__))\n\t\tsuper().__init__(runName, patchList, c, U, A, nu, rho, 'PropellerSimulation')\n\n\t\t# Reset reynolds number from input\n\t\tself.Re = Re\n\n\t\t# Time step settings\n\t\tself.maxDegreesPrTimeStep = 2\n\t\tself.numberOfRevolutions = 4\n\n\t\tself.baseSize = 1\n\t\tself.domainWake = 6\n\t\tself.domainFront = 4\n\t\tself.domainWidth = 4\n\n\t\tself.rotatingCylinderRadius = 0.75\n\t\tself.rotatingCylinderLength = 1\n\n\t\tself.setMeshSettings()\n\t\tself.nrLayers = 0\n\n\t\tself.setSolver('pimpleDyMFoam')\n\t\tself.adjustTimeStep = False\n\n\tdef setDefaultCellLengths(self):\n\t\tsuper().setDefaultCellLengths()\n\t\t\n\t\tself.maxBaseSize = 0.1*self.D # Ensure that the geometry is captured!\n\t\tself.maxSmallestSize = 0.01*self.L\n\t\tself.viscousLength = 0.02*self.D\n\t\t\n\tdef writeBlockMesh(self):\n\t\tblockMesh = BlockMesh.Dict()\n\n\t\t# Calculate minimum values for domain size\n\t\txBack = self.domainWake*self.D\n\t\txFront = -self.domainFront*self.D\n\n\t\tyRight = self.domainWidth*self.D\n\t\tyLeft = -self.domainWidth*self.D\n\n\t\tzHeight = self.domainWidth*self.D\n\t\tzDepth = -self.domainWidth*self.D\n\n\t\t# Calculate number of cells in each direction\n\t\tx_nrCells = np.ceil((xBack - xFront)/self.baseSize)\n\t\ty_nrCells = np.ceil((yRight - yLeft)/self.baseSize)\n\t\tz_nrCells = np.ceil((zHeight - zDepth)/self.baseSize)\n\n\t\t# Readjust domain size to fit nr cells\n\t\txLength = self.baseSize*x_nrCells\n\t\tyLength = self.baseSize*y_nrCells\n\t\tzLength = self.baseSize*z_nrCells\n\n\t\twakeFraction = (self.domainWake/(self.domainWake + self.domainFront))\n\t\tfrontFraction = (self.domainFront/(self.domainWake + self.domainFront))\n\t\txFront = -xLength*frontFraction\n\t\txBack = xLength*wakeFraction\n\t\n\t\tyRight = yLength/2\n\t\tyLeft = -yLength/2\n\n\t\t# Add data to blockmesh and write\n\t\tblockMesh.addVertex([xFront, yLeft, zDepth])\n\t\tblockMesh.addVertex([xBack, yLeft, zDepth])\n\t\tblockMesh.addVertex([xBack, yRight, zDepth])\n\t\tblockMesh.addVertex([xFront, yRight, zDepth])\n\n\t\tblockMesh.addVertex([xFront, yLeft, zHeight])\n\t\tblockMesh.addVertex([xBack, yLeft, zHeight])\n\t\tblockMesh.addVertex([xBack, yRight, zHeight])\n\t\tblockMesh.addVertex([xFront, yRight, zHeight])\n\n\t\tblockMesh.addBlock([x_nrCells, y_nrCells, z_nrCells])\n\t\n\t\tblockMesh.addBoundary('inlet', 'patch', [[0, 4, 7, 3],[3, 2, 6, 7], [4, 5, 6, 7], [0, 1, 5, 4], [0, 3, 2, 1]])\n\n\t\tblockMesh.addBoundary('outlet', 'patch', [[2, 6, 5, 1]])\n\n\t\tblockMesh.write(self.systemFolder)\n\n\tdef writeMesh(self):\n\t\tself.calculateBaseSize()\n\t\tself.writeBlockMesh()\n\n\t\t# Add geometry\n\t\tself.snappyDict.addGeometry('propeller.obj', 'triSurfaceMesh', {'name':'propeller'})\n\t\tself.snappyDict.addRefinementSurface('propeller', self.maxRefinementLevel-1, self.maxRefinementLevel, self.nrLayers)\n\t\tself.snappyDict.addFeature('propeller.eMesh', self.maxRefinementLevel)\n\n\t\tself.snappyDict.addGeometry('propellerStem.obj', 'triSurfaceMesh', {'name':'propellerStem'})\n\t\tself.snappyDict.addRefinementSurface('propellerStem', self.maxRefinementLevel-3, self.maxRefinementLevel-3, 0)\n\n\t\t# Add cylinders\n\t\tname = 'rotatingCylinder'\n\t\tlength = self.rotatingCylinderLength*self.D\n\t\tradius = self.rotatingCylinderRadius*self.D\n\n\t\tx0 = 0\n\t\tlevel = self.maxRefinementLevel-2\n\n\t\tpoint1String = '({:.6f} {:.6f} {:.6f})'.format(x0, 0, 0)\n\t\tpoint2String = '({:.6f} {:.6f} {:.6f})'.format(x0+length, 0, 0)\n\t\tradiusString = '{:.6f}'.format(radius)\n\n\t\textraArgumentDict = OrderedDict()\n\t\textraArgumentDict['faceType'] = 'boundary'\n\t\textraArgumentDict['cellZone'] = name\n\t\textraArgumentDict['faceZone'] = name\n\t\textraArgumentDict['cellZoneInside'] = 'inside'\n\n\t\tself.snappyDict.addGeometry(name, 'searchableCylinder', {'point1':point1String, 'point2':point2String, 'radius':radiusString})\n\t\tself.snappyDict.addRefinementSurface(name, level, level, 0, extraArgumentDict=extraArgumentDict)\n\t\tself.snappyDict.addRefinementRegion(name, 'inside', np.array([1, level]))\n\n\t\t# Set up layer settings\n\t\tself.snappyDict.addLayersControls['relativeSizes'] = 'false'\n\t\tself.snappyDict.addLayersControls['finalLayerThickness'] = self.t_final\n\t\tself.snappyDict.addLayersControls['minThickness'] = 0.5*self.t_final\n\t\tself.snappyDict.addLayersControls['expansionRatio'] = self.layerExpansion\n\n\t\tself.snappyDict.castellatedMeshControls['locationInMesh'] = '({:.3f} {:.3f} {:.3f})'.format(-1.03*self.D, 1.04*self.D, 1.3*self.D)\n\n\t\tself.snappyDict.castellatedMeshControls['nCellsBetweenLevels'] = int(self.nCellsBetweenLevels)\n\n\t\tself.snappyDict.write(self.systemFolder)\n\t\tself.snappyDict.writeSurfaceFeatureExtractDict(self.systemFolder, 'propeller.obj')\n\n\tdef writeCaseFiles(self):\n\t\t# Recalculate time stepping\n\t\tself.deltaT = np.round(self.maxDegreesPrTimeStep/(self.n*360), decimals=8)\n\t\tself.maxDeltaT = np.round(self.maxDegreesPrTimeStep/(self.n*360), decimals=8)\n\t\tself.endTime = np.round(self.numberOfRevolutions/self.n, decimals=8)\n\t\tself.writeInterval = np.round(self.endTime/10, decimals = 8)\n\n\t\tsuper().writeCaseFiles()\n\n\t\tFileHandling.changeLine(self.constantFolder+'dynamicMeshDict', 'omega', '\\t\\tomega {:.6f};'.format(self.omega))\n\n\t\tif self.rotationAxis == 'left':\n\t\t\tFileHandling.changeLine(self.constantFolder+'dynamicMeshDict', 'axis', '\\t\\taxis (-1 0 0);')\n\n\t\tself.writePropInfo()\n\n\t\tcreatePatchDict = createPatch.Dict()\n\t\tcreatePatchDict.addPatch('AMI1', 'rotatingCylinder', 'AMI2')\n\t\tcreatePatchDict.addPatch('AMI2', 'rotatingCylinder_slave', 'AMI1')\n\t\tcreatePatchDict.write(self.systemFolder)\n\n\tdef writeScripts(self):\n\t\t# ------ Mesh --------------------\n\t\tf = open(self.runFolder+'/mesh.sh', 'w')\n\n\t\tf.write('#!/bin/bash\\n\\n')\n\n\t\tif self.snappyDict.snapControls['explicitFeatureSnap'] == 'true':\n\t\t\tf.write('surfaceFeatureExtract\\n')\n\n\t\tf.write('blockMesh\\n')\n\n\t\tf.write('mv system/decomposeParDict system/decomposeParDict.sim\\n')\n\t\tf.write('mv system/decomposeParDict.mesh system/decomposeParDict\\n')\n\t\tf.write('decomposePar\\n')\n\n\t\tf.write('mpirun -np {:.0f} snappyHexMesh -overwrite -parallel\\n'.format(self.nCPUs_mesh))\n\n\t\tf.write('reconstructParMesh -constant\\n')\n\t\tf.write('rm -fr processor*\\n')\n\n\t\tf.write('createPatch -overwrite\\n')\n\n\t\tf.write('mv system/decomposeParDict system/decomposeParDict.mesh\\n')\n\t\tf.write('mv system/decomposeParDict.sim system/decomposeParDict\\n')\n\n\t\tf.write('renumberMesh -overwrite\\n')\n\n\t\tif len(self.topoSetList) > 0:\n\t\t\tf.write('topoSet\\n')\n\n\t\tf.close()\n\n\t\t# ------- Simulation ---------------------\n\t\tf = open(self.runFolder + '/runSim.sh', 'w')\n\n\t\tf.write('#!/bin/bash\\n\\n')\n\t\t\n\t\tf.write('decomposePar\\n')\n\n\t\tif self.vilje:\n\t\t\tf.write('mpiexec ' + self.solver + ' -parallel\\n')\n\t\telse:\n\t\t\tf.write('mpirun -np {:.0f} '.format(self.nCPUs) + self.solver + ' -parallel\\n')\n\n\t\tf.write('reconstructPar\\n')\n\n\t\tf.write('rm -fr processor*\\n')\n\n\t\tf.close()\n\n\tdef addViscousWake(self, x0, y0, z0, lengthFactor = 4, radiusFactor = 1.0, expansion = 2):\n\t\t# Ensure that mesh size is calculated\n\t\tself.turbulence = Turbulence.Properties(self.U, self.L, self.nu, self.turbulenceModel, self.turbulenceType)\n\t\tself.turbulence.calculateInitialValues()\n\t\tself.calculateBaseSize()\n\n\t\tmaxLevel = int(np.floor(np.log(self.baseSize/self.viscousLength)/np.log(2)))\n\t\tprint(maxLevel)\n\n\t\tradius0 = radiusFactor*self.D\n\t\tlength0 = lengthFactor*self.D \n\n\t\tlevel = maxLevel\n\t\tfor i in range(maxLevel):\n\t\t\tcellLength = self.baseSize/(2**level+1)\n\n\t\t\tname = 'viscWake{:.0f}'.format(i+1)\n\t\t\tlength = length0*expansion**(i)\n\t\t\tradius = radius0*expansion**(i)\n\n\t\t\tpoint1String = '({:.6f} {:.6f} {:.6f})'.format(x0, y0, z0)\n\t\t\tpoint2String = '({:.6f} {:.6f} {:.6f})'.format(x0+length, y0, z0)\n\t\t\tradiusString = '{:.6f}'.format(radius)\n\n\t\t\tself.snappyDict.addGeometry(name, 'searchableCylinder', {'point1':point1String, 'point2':point2String, 'radius':radiusString})\n\t\t\tself.snappyDict.addRefinementRegion(name, 'inside', np.array([1, level]))\n\n\t\t\tlevel -= 1\n\n\tdef writePropInfo(self):\n\t\tf = open(self.runFolder + 'propInfo.txt', 'w')\n\n\t\tf.write('D {:.6f}\\n'.format(self.D))\n\t\tf.write('c {:.6f}\\n'.format(self.L))\n\t\tf.write('Re {:.6f}\\n'.format(self.Re))\n\t\tf.write('J {:.6f}\\n'.format(self.J))\n\t\tf.write('n {:.6f}\\n'.format(self.n))\n\t\tf.write('omega {:.6f}\\n'.format(self.omega))\n\t\tf.write('rho {:.6f}\\n'.format(self.rho))\n\t\tf.write('U {:.6f}\\n'.format(self.U))\n\t\tf.write('U_R {:.6f}\\n'.format(self.U_r))\n\n\t\tf.close()\n\nclass ActuatorDisk(CaseSetup):\n\tdef __init__(self, runName, U, D, CT, CQ=0.0, rh_factor=0.1, alpha=0, fluid='air', meshSetting='medium', vilje=False):\n\t\t# Default environment settings\n\t\tif fluid == 'air':\n\t\t\trho = 1.226\n\t\t\tnu = 1.45e-05\n\t\telif fluid == 'water':\n\t\t\trho = 1000\n\t\t\tnu = 1.19e-6\n\n\t\tself.D = D\n\t\tself.r = D/2\n\t\tself.r_h = rh_factor*self.r\n\t\tself.CT = CT\n\t\tself.CQ = CQ\n\t\tself.alpha = alpha\n\t\t\n\t\tA = np.pi*self.r**2\n\n\t\tpatchList = []\n\n\t\t# Call init from base class\n\t\tsuper().__init__(runName, patchList, 0.5*self.r, U, A, nu, rho, vilje)\n\n\t\tself.Thrust = 0.5*self.A*self.CT*self.U**2\n\t\tself.Torque = 0.5*self.A*self.CQ*self.U**2*self.D\n\n\t\t# Essential folder paths\n\t\tself.foamPath = os.environ['FOAM_RUN']\n\t\tself.mainRunFolder = self.foamPath + '/PropellerSimulation'\n\t\tself.homePath = os.path.dirname(os.path.realpath(__file__))\n\t\tself.setFolderPaths()\n\n\t\tself.maxBaseSize = 0.5*self.D\n\t\tself.maxSmallestSize = 0.01*self.D \n\t\tself.actuatorDiskLength = 0.05*self.D\n\t\tself.viscousLength = 0.02*self.D\n\n\t\t# Default mesh settings\n\t\tif meshSetting == 'fine':\n\t\t\tself.maxBaseSize /= np.sqrt(2)\n\t\t\tself.maxSmallestSize /= np.sqrt(2)\n\t\t\tself.viscousLength /= np.sqrt(2)\n\t\telif meshSetting == 'veryFine':\n\t\t\tself.maxBaseSize /= 2\n\t\t\tself.maxSmallestSize /= 2\n\t\t\tself.viscousLength /= 2\n\t\telif meshSetting == 'coarse':\n\t\t\tself.maxBaseSize *= np.sqrt(2)\n\t\t\tself.maxSmallestSize *= np.sqrt(2)\n\t\t\tself.viscousLength *= np.sqrt(2)\n\t\telif meshSetting == 'veryCoarse':\n\t\t\tself.maxBaseSize *= 2\n\t\t\tself.maxSmallestSize *= 2\n\t\t\tself.viscousLength *= 2\n\n\t\tself.baseSize = 1\n\t\tself.domainWake = 10\n\t\tself.domainFront = 5\n\t\tself.domainWidth = 5\n\n\t\tself.setMeshSettings()\n\t\tself.setSolver('simpleFoam')\n\n\tdef calculateBaseSize(self):\n\t\tself.smallestSize = self.maxSmallestSize\n\n\t\tself.baseSize = self.smallestSize*2**(self.maxRefinementLevel)\n\t\twhile self.baseSize > self.maxBaseSize and self.maxRefinementLevel > self.minRefinementLevel:\n\t\t\tself.maxRefinementLevel -= 1\n\t\t\tself.baseSize = self.smallestSize*2**(self.maxRefinementLevel)\n\n\tdef writeBlockMesh(self):\n\t\tblockMesh = BlockMesh.Dict()\n\n\t\t# Calculate minimum values for domain size\n\t\txBack = self.domainWake*self.D\n\t\txFront = -self.domainFront*self.D\n\n\t\tyRight = self.domainWidth*self.D\n\t\tyLeft = -self.domainWidth*self.D\n\n\t\tzHeight = self.domainWidth*self.D\n\t\tzDepth = -self.domainWidth*self.D\n\n\t\t# Calculate number of cells in each direction\n\t\tx_nrCells = np.ceil((xBack - xFront)/self.baseSize)\n\t\ty_nrCells = np.ceil((yRight - yLeft)/self.baseSize)\n\t\tz_nrCells = np.ceil((zHeight - zDepth)/self.baseSize)\n\n\t\t# Readjust domain size to fit nr cells\n\t\txLength = self.baseSize*x_nrCells\n\t\tyLength = self.baseSize*y_nrCells\n\t\tzLength = self.baseSize*z_nrCells\n\n\t\twakeFraction = (self.domainWake/(self.domainWake + self.domainFront))\n\t\tfrontFraction = (self.domainFront/(self.domainWake + self.domainFront))\n\t\txFront = -xLength*frontFraction\n\t\txBack = xLength*wakeFraction\n\t\n\t\tyRight = yLength/2\n\t\tyLeft = -yLength/2\n\n\t\t# Add data to blockmesh and write\n\t\tblockMesh.addVertex([xFront, yLeft, zDepth])\n\t\tblockMesh.addVertex([xBack, yLeft, zDepth])\n\t\tblockMesh.addVertex([xBack, yRight, zDepth])\n\t\tblockMesh.addVertex([xFront, yRight, zDepth])\n\n\t\tblockMesh.addVertex([xFront, yLeft, zHeight])\n\t\tblockMesh.addVertex([xBack, yLeft, zHeight])\n\t\tblockMesh.addVertex([xBack, yRight, zHeight])\n\t\tblockMesh.addVertex([xFront, yRight, zHeight])\n\n\t\tblockMesh.addBlock([x_nrCells, y_nrCells, z_nrCells])\n\t\n\t\tblockMesh.addBoundary('inlet', 'patch', [[0, 4, 7, 3],[3, 2, 6, 7], [4, 5, 6, 7], [0, 1, 5, 4], [0, 3, 2, 1]])\n\n\t\tblockMesh.addBoundary('outlet', 'patch', [[2, 6, 5, 1]])\n\n\t\tblockMesh.write(self.systemFolder)\n\n\tdef writeMesh(self):\n\t\tself.calculateBaseSize()\n\t\tself.writeBlockMesh()\n\n\t\tname = 'actuatorDisk'\n\t\tp1 = np.array([0, 0, 0])\n\t\tr = self.r\n\t\tdiskDir = np.array([np.cos(self.alpha), -np.sin(self.alpha), 0])\n\t\tdiskThickness = self.actuatorDiskLength\n\n\t\tself.addPropellerActuatorDisk(name, p1, r, diskDir, diskThickness, self.Thrust, self.Torque, self.smallestSize)\n\n\t\tself.snappyDict.write(self.systemFolder)\n\n\tdef writeCaseFiles(self):\n\t\tsuper().writeCaseFiles()\n\n\tdef addViscousWake(self, x0, y0, z0, lengthFactor = 4, radiusFactor = 1.0, expansion = 2):\n\t\t# Ensure that mesh size is calculated\n\t\tself.calculateBaseSize()\n\n\t\tmaxLevel = int(np.floor(np.log(self.baseSize/self.viscousLength)/np.log(2)))\n\n\t\tradius0 = radiusFactor*self.D\n\t\tlength0 = lengthFactor*self.D \n\n\t\tlevel = maxLevel\n\t\tfor i in range(maxLevel):\n\t\t\tcellLength = self.baseSize/(2**level+1)\n\n\t\t\tname = 'viscWake{:.0f}'.format(i+1)\n\t\t\tlength = length0*expansion**(i)\n\t\t\tradius = radius0*expansion**(i)\n\n\t\t\tpoint1String = '({:.6f} {:.6f} {:.6f})'.format(x0, y0, z0)\n\t\t\tpoint2String = '({:.6f} {:.6f} {:.6f})'.format(x0+length, y0, z0)\n\t\t\tradiusString = '{:.6f}'.format(radius)\n\n\t\t\tself.snappyDict.addGeometry(name, 'searchableCylinder', {'point1':point1String, 'point2':point2String, 'radius':radiusString})\n\t\t\tself.snappyDict.addRefinementRegion(name, 'inside', np.array([1, level]))\n\n\t\t\tlevel -= 1\n\n\tdef writeScripts(self):\n\t\t# ------ Mesh --------------------\n\t\tf = open(self.runFolder+'/mesh.sh', 'w')\n\n\t\tf.write('#!/bin/bash\\n\\n')\n\n\t\tf.write('blockMesh\\n')\n\n\t\tf.write('mv system/decomposeParDict system/decomposeParDict.sim\\n')\n\t\tf.write('mv system/decomposeParDict.mesh system/decomposeParDict\\n')\n\t\tf.write('decomposePar\\n')\n\n\t\tf.write('mpirun -np {:.0f} snappyHexMesh -overwrite -parallel\\n'.format(self.nCPUs_mesh))\n\n\t\tf.write('reconstructParMesh -constant\\n')\n\t\tf.write('rm -fr processor*\\n')\n\n\t\tf.write('mv system/decomposeParDict system/decomposeParDict.mesh\\n')\n\t\tf.write('mv system/decomposeParDict.sim system/decomposeParDict\\n')\n\n\t\tf.write('createPatch -overwrite\\n')\n\n\t\tif len(self.topoSetList) > 0:\n\t\t\tf.write('topoSet\\n')\n\n\t\tf.close()\n\n\t\t# ------- Simulation ---------------------\n\t\tf = open(self.runFolder + '/runSim.sh', 'w')\n\n\t\tf.write('#!/bin/bash\\n\\n')\n\t\t\n\t\tf.write('decomposePar\\n')\n\n\t\tif self.vilje:\n\t\t\tf.write('mpiexec ' + self.solver + ' -parallel\\n')\n\t\telse:\n\t\t\tf.write('mpirun -np {:.0f} '.format(self.nCPUs) + self.solver + ' -parallel\\n')\n\n\t\tf.write('reconstructPar\\n')\n\n\t\tf.write('rm -fr processor*\\n')\n\n\t\tf.close()\n\ndef writeRunScripts(caseNameList, propSim, folderName=''):\n\t# Write run script\n\tf = open('run.sh', 'w')\n\tf.write('#!/bin/bash\\n\\n')\n\tf.write('cd $FOAM_RUN/PropellerSimulation\\n\\n')\n\n\tfor i in range(len(caseNameList)):\n\t\tf.write('cd {0}\\n'.format(caseNameList[i]))\n\t\tf.write('bash mesh.sh\\n')\n\t\tf.write('bash runSim.sh\\n')\n\t\tf.write('cd $FOAM_RUN/TowingTank\\n\\n')\n\n\tf.close()\n" ]
[ [ "numpy.log", "numpy.sqrt", "numpy.cos", "numpy.sin", "numpy.round", "numpy.ceil", "numpy.array" ] ]
adehecq/xdem
[ "674c94785df07097da1aeedbdb60ae87392a6636" ]
[ "tests/test_dem.py" ]
[ "\"\"\" Functions to test the DEM tools.\"\"\"\nimport inspect\nimport os\nimport warnings\n\nimport geoutils.georaster as gr\nimport geoutils.satimg as si\nimport numpy as np\nimport pyproj\nimport pytest\n\nimport xdem\nfrom xdem.dem import DEM\n\nDO_PLOT = False\n\n\nclass TestDEM:\n\n def test_init(self):\n \"\"\"Test that inputs work properly in DEM class init.\"\"\"\n fn_img = xdem.examples.get_path(\"longyearbyen_ref_dem\")\n\n # From filename\n dem = DEM(fn_img)\n assert isinstance(dem, DEM)\n\n # From DEM\n dem2 = DEM(dem)\n assert isinstance(dem2, DEM)\n\n # From Raster\n r = gr.Raster(fn_img)\n dem3 = DEM(r)\n assert isinstance(dem3, DEM)\n\n # From SatelliteImage\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"Parse metadata from file not implemented\")\n img = si.SatelliteImage(fn_img)\n dem4 = DEM(img)\n assert isinstance(dem4, DEM)\n\n list_dem = [dem, dem2, dem3, dem4]\n\n # Check all attributes\n attrs = [at for at in r._get_rio_attrs() if at not in ['name', 'dataset_mask', 'driver']]\n all_attrs = attrs + si.satimg_attrs + xdem.dem.dem_attrs\n for attr in all_attrs:\n attrs_per_dem = [idem.__getattribute__(attr) for idem in list_dem]\n assert all(at == attrs_per_dem[0] for at in attrs_per_dem)\n\n assert np.logical_and.reduce((np.array_equal(dem.data, dem2.data, equal_nan=True),\n np.array_equal(dem2.data, dem3.data, equal_nan=True),\n np.array_equal(dem3.data, dem4.data, equal_nan=True)))\n\n assert np.logical_and.reduce((np.all(dem.data.mask == dem2.data.mask),\n np.all(dem2.data.mask == dem3.data.mask),\n np.all(dem3.data.mask == dem4.data.mask)))\n\n def test_copy(self):\n \"\"\"\n Test that the copy method works as expected for DEM. In particular\n when copying r to r2:\n - if r.data is modified and r copied, the updated data is copied\n - if r is copied, r.data changed, r2.data should be unchanged\n \"\"\"\n # Open dataset, update data and make a copy\n r = xdem.dem.DEM(xdem.examples.get_path(\"longyearbyen_ref_dem\"))\n r.data += 5\n r2 = r.copy()\n\n # Objects should be different (not pointing to the same memory)\n assert r is not r2\n\n # Check the object is a DEM\n assert isinstance(r2, xdem.dem.DEM)\n\n # Check all immutable attributes are equal\n # georaster_attrs = ['bounds', 'count', 'crs', 'dtypes', 'height', 'indexes', 'nodata',\n # 'res', 'shape', 'transform', 'width']\n # satimg_attrs = ['satellite', 'sensor', 'product', 'version', 'tile_name', 'datetime']\n # dem_attrs = ['vref', 'vref_grid', 'ccrs']\n\n # using list directly available in Class\n attrs = [at for at in r._get_rio_attrs() if at not in ['name', 'dataset_mask', 'driver']]\n all_attrs = attrs + si.satimg_attrs + xdem.dem.dem_attrs\n for attr in all_attrs:\n assert r.__getattribute__(attr) == r2.__getattribute__(attr)\n\n # Check data array\n assert np.array_equal(r.data, r2.data, equal_nan=True)\n\n # Check dataset_mask array\n assert np.all(r.data.mask == r2.data.mask)\n\n # Check that if r.data is modified, it does not affect r2.data\n r.data += 5\n assert not np.array_equal(r.data, r2.data, equal_nan=True)\n\n # Check that the new_array argument indeed modifies the raster\n r3 = r.copy(new_array=r2.data)\n\n assert np.array_equal(r3.data, r2.data)\n\n def test_set_vref(self):\n \"\"\"Tests to set the vertical reference\"\"\"\n\n fn_img = xdem.examples.get_path(\"longyearbyen_ref_dem\")\n img = DEM(fn_img)\n\n # Check setting WGS84\n img.set_vref(vref_name='WGS84')\n assert img.vref == 'WGS84'\n assert img.vref_grid is None\n\n # Check setting EGM96\n img.set_vref(vref_name='EGM96')\n assert img.vref == 'EGM96'\n assert img.vref_grid == 'us_nga_egm96_15.tif'\n # The grid argument should have priority over name and parse the right vref name\n img.set_vref(vref_name='WGS84', vref_grid='us_nga_egm96_15.tif')\n assert img.vref == 'EGM96'\n\n # Check setting EGM08\n img.set_vref(vref_name='EGM08')\n assert img.vref == 'EGM08'\n assert img.vref_grid == 'us_nga_egm08_25.tif'\n # The grid argument should have priority over name and parse the right vref name\n img.set_vref(vref_name='best ref in the entire world, or any string', vref_grid='us_nga_egm08_25.tif')\n assert img.vref == 'EGM08'\n\n # Check that other existing grids are well detected in the pyproj.datadir\n img.set_vref(vref_grid='is_lmi_Icegeoid_ISN93.tif')\n\n # Check that non-existing grids raise errors\n with pytest.raises(ValueError):\n img.set_vref(vref_grid='the best grid in the entire world, or any non-existing string')\n\n def test_to_vref(self):\n \"\"\"Tests to convert vertical references\"\"\"\n\n # First, we use test points to test the vertical transform\n # Let's start with Chile\n lat = 43.70012234\n lng = -79.41629234\n z = 100\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", module=\"pyproj\")\n # init is deprecated by\n ellipsoid = pyproj.Proj(init=\"EPSG:4326\") # WGS84 datum ellipsoid height\n # EGM96 geoid in Chile, we expect ~30 m difference\n geoid = pyproj.Proj(init=\"EPSG:4326\", geoidgrids='us_nga_egm96_15.tif')\n transformer = pyproj.Transformer.from_proj(ellipsoid, geoid)\n z_out = transformer.transform(lng, lat, z)[2]\n\n # Check that the final elevation is finite, and higher than ellipsoid by less than 40 m (typical geoid in Chile)\n assert np.logical_and.reduce((np.isfinite(z_out), np.greater(z_out, z), np.less(np.abs(z_out-z), 40)))\n\n # With the EGM2008 (catch warnings as this use of init is depecrated)\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", module=\"pyproj\")\n ellipsoid = pyproj.Proj(init=\"EPSG:4326\") # WGS84 datum ellipsoid height\n geoid = pyproj.Proj(init=\"EPSG:4326\", geoidgrids='us_nga_egm08_25.tif')\n transformer = pyproj.Transformer.from_proj(ellipsoid, geoid)\n z_out = transformer.transform(lng, lat, z)[2]\n\n # Check final elevation is finite, higher than ellipsoid with less than 40 m difference (typical geoid in Chile)\n assert np.logical_and.reduce((np.isfinite(z_out), np.greater(z_out, z), np.less(np.abs(z_out-z), 40)))\n\n # With GEOID2006 for Alaska\n lat = 65\n lng = -140\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", module=\"pyproj\")\n # init is deprecated by\n ellipsoid = pyproj.Proj(init=\"EPSG:4326\") # WGS84 datum ellipsoid height\n geoid = pyproj.Proj(init=\"EPSG:4326\", geoidgrids='us_noaa_geoid06_ak.tif')\n transformer = pyproj.Transformer.from_proj(ellipsoid, geoid)\n z_out = transformer.transform(lng, lat, z)[2]\n\n # Check that the final elevation is finite, lower than ellipsoid by less than 20 m (typical geoid in Alaska)\n assert np.logical_and.reduce((np.isfinite(z_out), np.less(z_out, z), np.less(np.abs(z_out-z), 20)))\n\n # With ISN1993 for Iceland\n lat = 65\n lng = -18\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", module=\"pyproj\")\n # init is deprecated by\n ellipsoid = pyproj.Proj(init=\"EPSG:4326\") # WGS84 datum ellipsoid height\n # Iceland, we expect a ~70m difference\n geoid = pyproj.Proj(init=\"EPSG:4326\", geoidgrids='is_lmi_Icegeoid_ISN93.tif')\n transformer = pyproj.Transformer.from_proj(ellipsoid, geoid)\n z_out = transformer.transform(lng, lat, z)[2]\n\n # Check that the final elevation is finite, lower than ellipsoid by less than 100 m (typical geoid in Iceland)\n assert np.logical_and.reduce((np.isfinite(z_out), np.less(z_out, z), np.less(np.abs(z_out-z), 100)))\n\n # Check that the function does not run without a reference set\n fn_img = xdem.examples.get_path(\"longyearbyen_ref_dem\")\n img = DEM(fn_img)\n with pytest.raises(ValueError):\n img.to_vref(vref_name='EGM96')\n\n # Check that the function properly runs with a reference set\n img.set_vref(vref_name='WGS84')\n mean_ellips = np.nanmean(img.data)\n img.to_vref(vref_name='EGM96')\n mean_geoid_96 = np.nanmean(img.data)\n assert img.vref == 'EGM96'\n assert img.vref_grid == 'us_nga_egm96_15.tif'\n # Check that the geoid is lower than ellipsoid, less than 35 m difference (Svalbard)\n assert np.greater(mean_ellips, mean_geoid_96)\n assert np.less(np.abs(mean_ellips - mean_geoid_96), 35.)\n\n # Check in the other direction\n img = DEM(fn_img)\n img.set_vref(vref_name='EGM96')\n mean_geoid_96 = np.nanmean(img.data)\n img.to_vref(vref_name='WGS84')\n mean_ellips = np.nanmean(img.data)\n assert img.vref == 'WGS84'\n assert img.vref_grid is None\n # Check that the geoid is lower than ellipsoid, less than 35 m difference (Svalbard)\n assert np.greater(mean_ellips, mean_geoid_96)\n assert np.less(np.abs(mean_ellips - mean_geoid_96), 35.)\n\n" ]
[ [ "numpy.abs", "numpy.greater", "numpy.array_equal", "numpy.isfinite", "numpy.less", "numpy.all", "numpy.nanmean" ] ]
QianWanghhu/IES-FF
[ "c57d25806a034b1d0478a94715ade4ad5c96accd" ]
[ "src/plot_gp.py" ]
[ "#!/usr/bin/env python\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pickle\nimport seaborn as sns\n\nfrom scipy import stats\n# from scipy.optimize import root\nfrom pyapprox import generate_independent_random_samples\nimport matplotlib as mpl\nfrom scipy import stats\nfrom scipy.stats import spearmanr\n\nmpl.rcParams['font.size'] = 16\nmpl.rcParams['lines.linewidth'] = 3\nmpl.rcParams['text.usetex'] = False # use latex for all text handling\nmpl.rcParams['savefig.bbox'] = 'tight'\nmpl.rcParams['savefig.format'] = 'png' # gives best resolution plots\nmpl.rcParams['axes.labelsize'] = 20\nmpl.rcParams['axes.titlesize'] = 20\nmpl.rcParams['xtick.labelsize'] = 20\nmpl.rcParams['ytick.labelsize'] = 20\nmpl.rcParams['legend.fontsize'] = 16\n# mpl.rc('xtick', labelsize=20)\n# mpl.rc('ytick', labelsize=20)\n# print mpl.rcParams.keys()\nmpl.rcParams['text.latex.preamble'] = \\\n r'\\usepackage{siunitx}\\usepackage{amsmath}\\usepackage{amssymb}'\n\nfrom funcs.read_data import file_settings, variables_prep\nfrom adaptive_gp_model import *\n\n# Calculate the ratio of samples in the subregion\ndef ratio_subreg(gp):\n \"\"\"\n Function to calculate the ratio of samples in the subregion in the adaptive procedure.\n Parameters:\n ===========\n gp: Gaussian Process object\n\n Return:\n =======\n ration_df: pd.DataFrame, dataframe of the ratios at each iteration.\n \"\"\"\n y_training = gp.y_train_\n # num_new_samples = np.asarray([0]+[20]+[8]*10+[16]*20+[24]*16+[40]*14)\n num_new_samples = np.asarray([20]+[8]*10+[16]*20+[24]*20+[40]*18)\n num_samples = np.cumsum(num_new_samples)\n ratio_samples = np.zeros(shape=(num_new_samples.shape[0]-2, 2))\n ratio_sum = 0\n for ii in range(num_new_samples.shape[0] - 2):\n num_subreg = np.where(y_training[num_samples[ii]: num_samples[ii+1]]>0)[0].shape[0]\n ratio_sum = ratio_sum + num_subreg\n ratio_samples[ii, 0] = num_subreg / num_new_samples[ii+1]\n ratio_samples[ii, 1] = ratio_sum / num_samples[ii+1]\n\n ratio_df = pd.DataFrame(data=ratio_samples, \n index=np.arange(ratio_samples.shape[0]), columns=['Subregion', 'FullSpace'])\n ratio_df['num_samples'] = num_samples[1:-1]\n return ratio_df\n# END ratio_subreg()\n\nfrom funcs.utils import define_constants\ndef choose_fixed_point(plot_range, dot_samples, samples_opt, dot_vals):\n \"\"\"\n Function used to set the nomial point for fixing parameters at.\n Parameters:\n ===========\n plot_range: str, decide which type of nomial values to use.\n dot_samples: np.ndarray, of shape D*N where D is the number of parameters, \n the initial parameter samples for calculation objective functions\n samples_opt: np.ndarray, of shape D*M where D is the number of parameters,\n parameter samples resulting in objective functions above the threshold\n dot_vals: np.ndarray, objective function values from dot_samples\n\n Return:\n ===========\n x_default: list, the nominal values for all D parameters\n fig_path: str, the dir defined by the type of nominal values for results to save\n \"\"\"\n if plot_range == 'full_mean':\n x_default = define_constants(dot_samples, 13, stats = np.mean)\n fig_path = 'fix_mean_full'\n elif plot_range == 'sub_median':\n samples_opt = dot_samples[:, np.where(dot_vals>0.382)[0]]\n x_default = define_constants(samples_opt, 13, stats = np.median)\n fig_path = 'fix_median_subreg'\n elif plot_range == 'sub_mean':\n samples_opt = dot_samples[:, np.where(dot_vals>0.382)[0]]\n x_default = define_constants(samples_opt, 13, stats = np.mean)\n fig_path = 'fix_mean_subreg'\n elif plot_range == 'sub_rand':\n x_default = dot_samples[:, np.where(dot_vals>0.382)[0]][:, 38] # 8 for analytic, 38 for sample\n fig_path = 'fix_rand_subreg'\n elif plot_range == 'full_rand':\n breakpoint()\n x_default = dot_samples[:, np.where(dot_vals>0.382)[0]][:, 8] # 8 for analytic, 38 for sample\n fig_path = 'fix_rand_subreg'\n elif (plot_range == 'sub_max')|(plot_range == 'full_max'):\n x_default = dot_samples[:, np.where(dot_vals>=dot_vals.max())[0]]\n fig_path = 'fix_max_subreg'\n else:\n AssertionError\n return x_default, fig_path\n\ndef cal_stats(vals_opt, vals_dict, re_eval):\n \"\"\"\n Function used to calculate the statstics of the objective values VS parameter fixing.\n \n Parameters:\n ===========\n vals_dict: dict, containing the objective function values with parameters being fixed\n vals_opt: np.ndarray, objective function values used to calculate the statistics\n re_eval: Bool, re-evaluate the OBJ using the whole samples if True, \n else using the optimal set only for parameter fixing\n\n Return:\n ===========\n df_stats: pd.DataFrame, of statistics\n \"\"\"\n # PDF plot\n df_stats = pd.DataFrame(columns=['mean', 'std', 'qlow','qup'])\n if re_eval:\n df_stats.loc['full_set', ['mean', 'std']] = [vals_opt[vals_opt>0.382].mean(), vals_opt[vals_opt>0.382].std()]\n df_stats.loc['full_set', 'qlow':'qup'] = np.quantile(vals_opt[vals_opt>0.382], [0.025, 0.957])\n else:\n df_stats.loc['full_set', ['mean', 'std']] = [vals_opt.mean(), vals_opt.std()]\n df_stats.loc['full_set', 'qlow':'qup'] = np.quantile(vals_opt, [0.025, 0.957])\n\n for key, value in vals_dict.items():\n if key != 'fix_13':\n if re_eval:\n value = value[value>0.382]\n\n df_stats.loc[key, 'mean'] = value.mean()\n df_stats.loc[key, 'std'] = value.std()\n df_stats.loc[key, 'qlow':'qup'] = np.quantile(value, [0.025, 0.975])\n\n return df_stats\n\ndef cal_prop_optimal(vals_dict, dot_vals, fig_path):\n \"\"\"\n Used to calculate the ratio of optimal values.\n Parameters:\n ===========\n fig_path: str, dir to save the result formed into a pd.DataFrame\n \"\"\"\n pct_optimal = {}\n for key, value in vals_dict.items():\n pct_optimal[key] = value[value>0.382].shape[0] / dot_vals.shape[0]\n pct_optimal = pd.DataFrame.from_dict(pct_optimal, orient='index', columns=['Proportion'])\n pct_optimal.to_csv(f'{fig_path}/Proportion_optimal.csv')\n# END cal_prop_optimal()\n\ndef plot_pdf(vals_opt, vals_dict, re_eval, fig_path):\n \"\"\"\n Used to generate the plot of probability distribution function.\n \"\"\"\n fig, axes = plt.subplots(1, 3, figsize=(20, 6), sharex=True)\n sns.distplot(vals_opt.flatten(), hist=False, ax=axes[0])\n\n k = 0\n for key, value in vals_dict.items(): \n if key != 'fix_13':\n if re_eval:\n value = value[value>0.382]\n sns.distplot(value.flatten(), hist=False, ax=axes[k//4]);\n k += 1\n\n axes[0].legend(['full_set', *list(vals_dict.keys())[0:4]])\n axes[1].set_xlabel('F')\n axes[1].set_ylabel('')\n axes[1].legend(list(vals_dict.keys())[4:8])\n axes[2].legend(list(vals_dict.keys())[8:])\n axes[2].set_ylabel('')\n for ii in range(3):\n axes[ii].axvline(0.382, color='grey', linestyle='--', alpha=0.7)\n plt.savefig(f'{fig_path}/objective_dist.png', dpi=300)\n\ndef box_plot(vals_dict, vals_opt, num_fix, fig_path,fig_name, y_label='1/(2-F)', y_norm=True):\n \"\"\"\n Used to generate the boxplot of objective values.\n \"\"\"\n fig2 = plt.figure(figsize=(8, 6))\n df = pd.DataFrame.from_dict(vals_dict)\n df['fix_0'] = vals_opt.flatten()\n df.columns = [*num_fix, 0]\n df = df[[0, *num_fix]]\n if y_norm:\n df_filter = df\n else:\n df_filter = df.where(df>0.382)\n\n ax = sns.boxplot(data=df_filter, saturation=0.5, linewidth=1, whis=0.5)\n if y_norm == True:\n ax.axhline(1/(2 - 0.382), color='orange', linestyle='--', alpha=1 , linewidth=1)\n ax.set_ylim(0, 0.8)\n else:\n ax.axhline(0.382, color='orange', linestyle='--', alpha=1 , linewidth=1)\n ax.set_ylim(0.3, 0.8)\n ax.set_xlabel('Number of fixed parameters')\n ax.set_ylabel(y_label)\n plt.savefig(f'{fig_path}/{fig_name}.png', dpi=300)\n\ndef spr_coef(dot_samples, dot_vals, fsave):\n \"\"\"\n Calculate the spearman-rank correlation.\n \"\"\"\n samples_opt = dot_samples[:, np.where(dot_vals>0.382)[0]]\n coef_dict = pd.DataFrame(index=np.arange(0, 13), columns=np.arange(0, 13))\n p_dict = pd.DataFrame(index=np.arange(0, 13), columns=np.arange(0, 13))\n for ii in range(13):\n for jj in range(ii+1, 13):\n coef_dict.loc[ii, jj], p_dict.loc[ii, jj] = spearmanr(samples_opt[ii], samples_opt[jj])\n coef_dict.to_csv(fsave+'spearman_coeff.csv')\n p_dict.to_csv(fsave+'spearman_p.csv')\n\ndef corner_pot(samples_dict, vals_dict, x_opt, y_opt, index_fix, y_lab='F'):\n \"\"\"\n Create dotty plots for the model inputs and outputs. \n Only part of the results will be plotted and shown in the paper due to the space available in a page.\n Parameteres:\n ============\n samples_dict: dict, collection of parameter samples with and without FF;\n vals_dict: dict\n x_opt: np.ndarray, parameter data points resulting in the selected optima\n y_opt: np.ndarray, output values of the selected optima corresponding to x_opt\n index_fix: list, the index of parameters ranked according to sensitivities.\n y_lab: str, the label of y-axis\n \n Returns:\n ========\n fig\n \"\"\"\n fig, axes = plt.subplots(9, 9, figsize = (6*9, 5*9), sharey=True)\n num_param_start = 5\n for key, x_value in samples_dict.items():\n num_fix = int(key.split('_')[1])\n if num_fix > (num_param_start-1):\n x_value_opt = x_value[:, np.where(vals_dict[key]>0.382)[0]]\n y_value_opt = vals_dict[key][vals_dict[key]>0.382]\n k = num_fix - num_param_start\n for ii in index_fix[num_fix-1:]:\n sns.scatterplot(x=x_opt[ii, :], y=y_opt.flatten(), ax=axes[k, num_fix-num_param_start], color='royalblue', s=20, alpha=0.8)\n sns.scatterplot(x=x_value_opt[ii, :], y=y_value_opt.flatten(), ax=axes[k, num_fix-num_param_start], color='orange', s=20, alpha=0.5)\n axes[k, num_fix-num_param_start].xaxis.set_tick_params(labelsize=40)\n axes[k, num_fix-num_param_start].yaxis.set_tick_params(labelsize=40)\n k += 1\n axes[num_fix-num_param_start, 0].set_ylabel(y_lab, fontsize=40)\n fig.set_tight_layout(True)\n\n return fig\n\n# define the order to fix parameters\ndef fix_plot(gp, fsave, param_names, ind_vars, sa_cal_type, variables_full, \n variable_temp, plot_range='full', param_range='full', re_eval=False, norm_y=False):\n \"\"\"\n Used to fix parameter sequentially and obtaining unconditional outputs,\n as well as boxplot and scatterplots.\n Parameters:\n ===========\n gp: Gaussian Process object\n variables: variable\n fsave: the outer dir for saving results of, for example, spearman correlation\n param_names: list, parameter names\n ind_vars: individual parameter variable\n sa_cal_type: str, the type of SA to conduct. Should be from ['analytic', 'sampling']\n plot_range: str, defining the set of validation samples to use.\n Use global samples if \"full\", else local. Default is \"full\".\n re_eval: Bool\n norm_y: Bool, whether to normalize objective functions when sensitive analysis\n \n Return:\n ========\n dot_vals: np.ndarray, objective function values from dot_samples\n vals_dict: dict, containing the objective function values with parameters being fixed\n index_fix: list, the ordered index of fixed parameters\n \"\"\"\n from funcs.utils import fix_sample_set, dotty_plot\n if re_eval:\n eval_bool = 'reeval'\n else:\n eval_bool = 'no_reeval'\n\n dot_fn = f'{file_settings()[0]}gp_run_1117/dotty_samples_{param_range}.txt'\n if not os.path.exists(dot_fn):\n dot_samples = generate_independent_random_samples(variable_temp, 150000)\n np.savetxt(dot_fn, dot_samples)\n else:\n dot_samples = np.loadtxt(dot_fn)\n\n dot_vals = np.zeros(shape=(dot_samples.shape[1], 1))\n for ii in range(15):\n dot_vals[10000*ii:(ii+1)*10000] = gp.predict(dot_samples[:, 10000*ii:(ii+1)*10000].T)\n \n # Whether to re-evaluate the optimal values.\n if re_eval:\n samples_opt = dot_samples \n vals_opt = dot_vals \n else:\n samples_opt = dot_samples[:, np.where(dot_vals>0.382)[0]]\n vals_opt = dot_vals[dot_vals>0.382]\n\n # Choose the fixed values\n print(f'Number of values beyond the threshold: {samples_opt.shape[1]}')\n x_default, fig_path = choose_fixed_point(plot_range, dot_samples, samples_opt, dot_vals)\n fig_path = fsave + fig_path\n y_default = gp.predict(x_default.reshape(x_default.shape[0], 1).T)[0]\n print(f'F of the point with default values: {y_default}')\n x_default = np.append(x_default, y_default)\n if not os.path.exists(fig_path):\n os.makedirs(fig_path)\n\n # calculate / import parameter rankings\n from sensitivity_settings import sa_gp\n if sa_cal_type == 'analytic':\n vars = variables_full\n else:\n vars = variable_temp\n _, ST = sa_gp(fsave, gp, ind_vars, vars, param_names, \n cal_type=sa_cal_type, save_values=True, norm_y=norm_y)\n par_rank = np.argsort(ST['ST'].values)\n index_sort = {ii:par_rank[12-ii] for ii in range(13)}\n\n num_fix = []\n vals_dict = {}\n samples_dict = {}\n index_fix = np.array([], dtype=int)\n for ii in range(max(index_sort.keys()), -1, -1):\n index_fix = np.append(index_fix, index_sort[ii])\n num_fix.append(index_fix.shape[0])\n print(f'Fix {index_fix.shape[0]} parameters')\n print(f'index: {index_fix}')\n samples_fix = fix_sample_set(index_fix, samples_opt, x_default)\n vals_fix = np.zeros_like(vals_opt)\n \n # calculate with surrogate \n if re_eval == True:\n for ii in range(15):\n vals_fix[10000*ii:(ii+1)*10000] = gp.predict(samples_fix[:, 10000*ii:(ii+1)*10000].T)\n else:\n vals_fix = gp.predict(samples_fix.T)\n\n # if num_fix[-1] == 2:\n # np.savetxt(f'{fig_path}/samples_fix_{num_fix[-1]}_{param_range}.txt', samples_fix) \n # np.savetxt(f'{fig_path}/values_fix_{num_fix[-1]}_{param_range}.txt', vals_fix)\n\n # select points statisfying the optima\n if not re_eval:\n samples_opt_fix = samples_fix\n vals_opt_fix = vals_fix\n vals_dict[f'fix_{len(index_fix)}'] = vals_fix.flatten() \n samples_dict[f'fix_{len(index_fix)}'] = samples_fix \n # plot \n samples_opt_no_fix = samples_opt\n vals_opt_no_fix = vals_opt\n else:\n index_opt_fix = np.where(vals_fix.flatten() >= 0.382)[0]\n samples_opt_fix = samples_fix[:, index_opt_fix]\n vals_opt_fix = vals_fix[index_opt_fix]\n vals_dict[f'fix_{len(index_fix)}'] = vals_fix.flatten() \n samples_dict[f'fix_{len(index_fix)}'] = samples_fix \n # plot \n index_opt = np.where(vals_opt.flatten() >= 0.382)[0]\n samples_opt_no_fix = samples_opt[:, index_opt]\n vals_opt_no_fix = vals_opt[index_opt]\n\n fig = dotty_plot(samples_opt_no_fix, vals_opt_no_fix.flatten(), samples_opt_fix, vals_opt_fix.flatten(), \n param_names, 'F'); #, orig_x_opt=samples_fix, orig_y_opt=vals_fix\n \n # plt.savefig(f'{fig_path}/{len(index_fix)}_{param_range}_{eval_bool}.png', dpi=300)\n\n # Calculate the stats of objectives vs. Parameter Fixing\n # cal_prop_optimal(vals_dict, dot_vals, fig_path)\n # df_stats = cal_stats(vals_opt, vals_dict, re_eval)\n # df_stats.to_csv(f'{fig_path}/F_stats_{param_range}.csv')\n # np.savetxt(f'{fig_path}/fixed_values_{plot_range}.txt', x_default)\n \n # Calculate the Spearman correlation between parameters\n # spr_coef(dot_samples, dot_vals, fsave)\n\n # corner plot\n fig = corner_pot(samples_dict, vals_dict, samples_opt_no_fix, vals_opt_no_fix.flatten(), index_fix, y_lab='F')\n plt.savefig(f'{fig_path}/corner_plot_sub_{param_range}_{eval_bool}.png', dpi=300)\n\n # Box plot\n # normalize the vals in vals_dict so as to well distinguish the feasible F.\n vals_dict_norm = {}\n for key, v in vals_dict.items():\n vals_dict_norm[key] = 1 / (2 - v)\n vals_opt_norm = 1 / (2 - vals_opt)\n # box_plot(vals_dict_norm, vals_opt_norm, num_fix, fig_path, f'boxplot_{param_range}_norm_{eval_bool}', y_label='1/(2-F)', y_norm=True)\n # box_plot(vals_dict_feasible_norm, vals_feasible_norm, num_fix, fig_path, 'boxplot_feasible_norm', y_label='1/(2-F)', y_norm=True)\n # box_plot(vals_dict, vals_opt, num_fix, fig_path, f'boxplot_feasible_{param_range}_{eval_bool}', y_label='F', y_norm=False)\n return dot_vals, vals_dict, index_fix\n # END fix_plot() #_no_reeval\n\n\n# import GP\ndef run_fix(fpath):\n\n # Get the feasible region\n def define_variable(x_samples, y_vals, y_threshold, num_pars):\n \"\"\"\n The function is used to identify the parameter ranges constrained by a given threshold.\n Parameters:\n ===========\n x_samples: np.ndarray, of the shape (N, D), \n where N is the sample size and D is the number of parameters.\n y_vals: np.ndarray, of the shape (N, 1). \n The output corresponds to x_samples.\n y_threshold: float, the value used to constrain parameter ranges.\n\n Return:\n =======\n variable_feasible: pyapprox.IndependentMultivariateRandomVariable\n \"\"\"\n if x_samples.shape[0] == num_pars:\n x_samples = x_samples.T\n x_temp_select = x_samples[np.where(y_vals > y_threshold)[0], :]\n x_temp_range = x_temp_select.max(axis=0)\n univariable_feasible = [stats.uniform(0, x_temp_range[ii]) for ii in range(0, x_temp_range.shape[0])]\n variable_feasible = pyapprox.IndependentMultivariateRandomVariable(univariable_feasible)\n return variable_feasible\n\n gp = pickle.load(open(f'{fpath}gp_0.pkl', \"rb\"))\n x_training = gp.X_train_\n y_training = gp.y_train_\n\n # visualization of the effects of factor fixing\n # define the variables for PCE\n param_file = file_settings()[-1]\n ind_vars, variables_full = variables_prep(param_file, product_uniform='uniform', dummy=False)\n var_trans = AffineRandomVariableTransformation(variables_full, enforce_bounds=True)\n param_names = pd.read_csv(param_file, usecols=[2]).values.flatten()\n \n # Resample in the ranges where the objective values are above -10\n variable_temp = define_variable(x_training, y_training, -5, num_pars=13)\n\n # Identify the parameter ranges with output value satisfying a given criteria\n dot_fn = f'{file_settings()[0]}gp_run_1117/dotty_parameter_range.txt'\n if not os.path.exists(dot_fn):\n variable_temp_range = define_variable(x_training, y_training, 0, num_pars=13)\n dot_samples = generate_independent_random_samples(variable_temp_range, 40000)\n np.savetxt(dot_fn, dot_samples)\n else:\n dot_samples = np.loadtxt(dot_fn)\n\n dot_vals = gp.predict(dot_samples.T)\n variable_feasible= define_variable(dot_samples, dot_vals, 0.382, num_pars=13)\n\n # Calculate the ratio of calibrating samples in the sub-region\n if not os.path.exists(f'{fpath}ratio_cali_subreg.csv'):\n df = ratio_subreg(gp)\n df.to_csv(f'{fpath}ratio_cali_subreg.csv')\n\n # Calculate results with and create plots VS fixing parameters\n fsave = fpath + 'analytic-sa/' # if sampling, use variable_feasible; else, use variable_temp\n norm_y = False\n param_range = 'full'\n vals_fix_dict = {}\n dot_vals, vals_fix_dict['sub_mean'], index_fix = fix_plot(gp, fsave, param_names,ind_vars, 'analytic', \n variables_full, variable_feasible, plot_range='sub_mean', param_range=param_range, re_eval=False, norm_y = norm_y)\n _, vals_fix_dict['full_rand'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'analytic', \n variables_full, variable_feasible, plot_range='full_rand', param_range=param_range, re_eval=False, norm_y = norm_y)\n _, vals_fix_dict['full_max'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'analytic', \n variables_full, variable_feasible, plot_range='full_max', param_range=param_range, re_eval=False, norm_y = norm_y)\n \n dot_vals, vals_fix_dict['sub_mean'], index_fix = fix_plot(gp, fsave, param_names,ind_vars, 'analytic', \n variables_full, variable_feasible, plot_range='sub_mean', param_range=param_range, re_eval=True, norm_y = norm_y)\n _, vals_fix_dict['full_rand'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'analytic', \n variables_full, variable_feasible, plot_range='full_rand', param_range=param_range, re_eval=True, norm_y = norm_y)\n _, vals_fix_dict['full_max'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'analytic', \n variables_full, variable_feasible, plot_range='full_max', param_range=param_range, re_eval=True, norm_y = norm_y)\n \n fsave = fpath + 'sampling-sa/'\n norm_y = False\n param_range = 'sub'\n vals_fix_dict = {}\n dot_vals, vals_fix_dict['sub_mean'], index_fix = fix_plot(gp, fsave, param_names,ind_vars, 'sampling', \n variables_full, variable_feasible, plot_range='sub_mean', param_range=param_range, re_eval=False, norm_y = norm_y)\n _, vals_fix_dict['sub_rand'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'sampling', \n variables_full, variable_feasible, plot_range='sub_rand', param_range=param_range, re_eval=False, norm_y = norm_y)\n _, vals_fix_dict['sub_max'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'sampling', \n variables_full, variable_feasible, plot_range='sub_max', param_range=param_range, re_eval=False, norm_y = norm_y)\n\n dot_vals, vals_fix_dict['sub_mean'], index_fix = fix_plot(gp, fsave, param_names,ind_vars, 'sampling', \n variables_full, variable_feasible, plot_range='sub_mean', param_range=param_range, re_eval=True, norm_y = norm_y)\n _, vals_fix_dict['sub_rand'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'sampling', \n variables_full, variable_feasible, plot_range='sub_rand', param_range=param_range, re_eval=True, norm_y = norm_y)\n _, vals_fix_dict['sub_max'], _ = fix_plot(gp, fsave, param_names, ind_vars, 'sampling', \n variables_full, variable_feasible, plot_range='sub_max', param_range=param_range, re_eval=True, norm_y = norm_y)\n # END run_fix()\n\n\ndef plot_validation(fpath, xlabel, ylabel, plot_range='full', save_fig=False, comp=False):\n from sklearn.metrics import mean_squared_error\n from sklearn.metrics import r2_score\n from math import sqrt\n \n def plot(gp, vali_samples, fpath, xlabel, ylabel, plot_range='full', save_fig=False):\n \"\"\"\n Function used to plot the figures of GP validation.\n Parameters:\n ===========\n gp: Gaussian Process object\n fpath: str, path to save figures\n plot_range: str, defining the set of validation samples to use.\n Use global samples if \"full\", else local. Default is \"full\".\n save_vali: Bool, save figures if true. Default is False.\n\n \"\"\"\n if plot_range == 'full':\n y_hat = gp.predict(vali_samples[0:13, 100:].T)\n y_eval = vali_samples[13, 100:]\n else:\n y_hat = gp.predict(vali_samples[0:13, 0:100].T)\n y_eval = vali_samples[13, 0:100] \n\n # l2 = np.linalg.norm(y_hat.flatten() - y_eval.flatten()) / np.linalg.norm(y_eval.flatten())\n r2 = r2_score(y_eval.flatten(), y_hat.flatten())\n rmse = sqrt(mean_squared_error(y_eval.flatten(), y_hat.flatten()))\n fig, ax = plt.subplots(1, 1, figsize=(8, 6))\n ax.plot(y_eval.flatten(), y_hat.flatten(), linestyle='', marker='o', ms=8)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n y_eval_opt = y_eval[y_eval>0.382]\n y_hat_opt = y_hat[y_eval>0.382]\n ax.plot(y_eval_opt.flatten(), y_hat_opt.flatten(), linestyle='', \n marker='o', color='darkorange', alpha=0.7, ms=8)\n ax.plot(np.linspace(y_eval.min(), 0.8, 100), np.linspace(y_eval.min(), 0.8, 100), \n linestyle='--', color='slategrey', alpha=0.5)\n # ax.text(-950, -100, r'$R^2 = %.3f$'%r2)\n # ax.text(-950, -200, r'$RMSE = %.3f$'%rmse)\n ax.text(0.05, 0.75, r'$R^2 = %.3f$'%r2, transform=ax.transAxes)\n ax.text(0.05, 0.65, r'$RMSE = %.3f$'%rmse, transform=ax.transAxes)\n # plt.show()\n if save_fig:\n plt.savefig(f'{fpath}figs/gpr_validation_{plot_range}_range_text.png', dpi=300)\n # END plot() \n\n def vali_samples_subreg(gp, variable, variable_const, num_candidate_samples=40000):\n \"\"\"\n Function used to generate validation samples.\n \"\"\"\n import random\n random.seed(666)\n candidates_samples = generate_independent_random_samples(variable=variable, \n num_samples = num_candidate_samples)\n\n candidates_samples_const = generate_independent_random_samples(variable=variable_const, \n num_samples = num_candidate_samples)\n y_pred_full = gp.predict(candidates_samples.T)\n y_pred_const = gp.predict(candidates_samples_const.T)\n\n samples_vali_subreg1 = candidates_samples_const[:, np.where(y_pred_const>0.382)[0][0:20]]\n samples_vali_subreg2 = candidates_samples_const[:, np.where(y_pred_const>0)[0]]\n y_sub1 = gp.predict(samples_vali_subreg2.T)\n samples_vali_subreg2 = samples_vali_subreg2[:, np.where(y_sub1<=0.382)[0][0:80]]\n samples_vali_full1 = candidates_samples[:, np.where(y_pred_full>-200)[0][0:180]]\n samples_vali_full2 = candidates_samples[:, np.where((y_pred_full>-1000)&(y_pred_full<-200))[0][0:20]]\n vali_samples = np.zeros(shape=(14, 300))\n vali_samples[0:13, 0:20] = samples_vali_subreg1\n vali_samples[0:13, 20:100] = samples_vali_subreg2\n vali_samples[0:13, 100:280] = samples_vali_full1\n vali_samples[0:13, 280:300] = samples_vali_full2\n vali_samples[13, :] = gp.predict(vali_samples[0:13, :].T).flatten()\n return vali_samples\n # END vali_samples_subreg()\n\n # Obtain validation samples\n def vali_samples_save(gp):\n # Resample in the ranges where the objective values are above 0\n x_select = x_training[np.where(y_training>0)[0], :]\n x_range = x_select.max(axis=0)\n univariable_temp = [stats.uniform(0, x_range[ii]) for ii in range(0, x_range.shape[0])]\n variable_temp = pyapprox.IndependentMultivariateRandomVariable(univariable_temp)\n\n x_select2 = x_training[np.where(y_training>-200)[0], :]\n x_range2 = x_select2.max(axis=0)\n univariable_temp2 = [stats.uniform(0, x_range2[ii]) for ii in range(0, x_range2.shape[0])]\n variable_temp2 = pyapprox.IndependentMultivariateRandomVariable(univariable_temp2)\n\n # validation plot\n vali_samples = vali_samples_subreg(gp, variable_temp2, variable_temp, 20000)\n np.savetxt(f'{fpath}vali_samples.txt', vali_samples)\n\n # import GP\n gp = pickle.load(open(f'{fpath}gp_0.pkl', \"rb\"))\n x_training = gp.X_train_\n y_training = gp.y_train_\n num_new_samples = np.asarray([20]+[8]*10+[16]*20+[24]*20+[40]*18)\n num_sample_cum = np.cumsum(num_new_samples)\n x_training = gp.X_train_\n y_training = gp.y_train_\n\n # Plot the validation plots using two independent sample set\n if not os.path.exists(fpath+'vali_samples.txt'):\n print(\"There is no validation samples and will generate.\")\n vali_samples_save(gp)\n else:\n vali_samples = np.loadtxt(fpath+'vali_samples.txt')\n y_gp = gp.predict(vali_samples[0:13, :].T).flatten()\n # plt.scatter(vali_samples[-1, :], y_gp)\n # plt.show()\n plot(gp, vali_samples, fpath, xlabel, ylabel, plot_range=plot_range, save_fig=save_fig)\n \n # Calculate the errors due vs increasing samples\n if os.path.exists('error_df.csv'):\n error_df = pd.DataFrame(index=num_sample_cum, columns=['r2_full', 'r2_sub', 'rmse_full', 'rmse_sub'])\n for ntrain in num_sample_cum: \n print(f'-------------{ntrain} training samples------------')\n gp_temp = gp.fit(x_training[0:ntrain, :].T, y_training[0:ntrain])\n y_hat = gp_temp.predict(vali_samples[0:13, :].T).flatten()\n error_df.loc[ntrain, 'r2_sub'] = r2_score(vali_samples[-1, 0:100], y_hat[0:100])\n error_df.loc[ntrain, 'r2_full'] = r2_score(vali_samples[-1, 100:], y_hat[100:])\n error_df.loc[ntrain, 'rmse_full'] = sqrt(mean_squared_error(vali_samples[-1, 100:], y_hat[100:]))\n error_df.loc[ntrain, 'rmse_sub'] = sqrt(mean_squared_error(vali_samples[-1, 0:100], y_hat[0:100]))\n \n error_df.to_csv(f'{fpath}error_df.csv')\n\n if comp:\n # Compare the accuracy of adaptive and non-adaptive GP:\n fpaths = ['../output/gp_run_1117/', '../output/gp_run_20220107/']\n error_adaptive = pd.read_csv(f'{fpaths[0]}error_df.csv', index_col='Unnamed: 0')\n error_nonadaptive = pd.read_csv(f'{fpaths[1]}error_df.csv', index_col='Unnamed: 0')\n sns.set_style('whitegrid')\n fig, axes = plt.subplots(1, 2, figsize=(6*2, 5), sharey=True, sharex=False)\n error_adaptive.loc[:, ['rmse_full']].plot(logy=True, logx=True, ax=axes[0])\n error_nonadaptive.loc[:, ['rmse_full']].plot(logy=True, logx=True, ax=axes[0])\n axes[0].legend(['Adaptive GP', 'Non-adaptive GP'])\n axes[0].set_title('(a)')\n error_adaptive.loc[:, ['rmse_sub']].plot(logy=True, logx=True, ax=axes[1])\n error_nonadaptive.loc[:, ['rmse_sub']].plot(logy=True, logx=True, ax=axes[1])\n axes[1].set_title('(b)')\n axes[1].legend(['Adaptive GP', 'Non-adaptive GP'])\n plt.savefig(f'{fpaths[0]}figs/GP_compare.png', dpi=300, format='png') \n # END plot_validation()\n\n\n# plot_validation(fpath='../output/gp_run_20220107/', xlabel='Model outputs', \n# ylabel='GP simulation', plot_range='sub', save_fig=True, comp=True)\n\n# run_fix(fpath = '../output/gp_run_1117/')\n\n\n\n\n\n\n" ]
[ [ "sklearn.metrics.r2_score", "numpy.asarray", "numpy.cumsum", "pandas.DataFrame", "sklearn.metrics.mean_squared_error", "numpy.zeros_like", "scipy.stats.spearmanr", "numpy.where", "pandas.read_csv", "numpy.arange", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.quantile", "matplotlib.pyplot.savefig", "numpy.append", "scipy.stats.uniform", "pandas.DataFrame.from_dict", "numpy.savetxt", "numpy.argsort", "numpy.array", "matplotlib.pyplot.subplots", "numpy.loadtxt" ] ]
Shigangli/mindspore
[ "351ce03fb2721335695afd77e8535d15670571f4" ]
[ "tests/ut/python/dataset/test_random_auto_contrast.py" ]
[ "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"\nTesting RandomAutoContrast op in DE\n\"\"\"\nimport numpy as np\n\nimport mindspore.dataset as ds\nimport mindspore.dataset.vision.c_transforms as c_vision\nfrom mindspore import log as logger\nfrom util import visualize_list, visualize_image, diff_mse\n\nimage_file = \"../data/dataset/testImageNetData/train/class1/1_1.jpg\"\ndata_dir = \"../data/dataset/testImageNetData/train/\"\n\n\ndef test_random_auto_contrast_pipeline(plot=False):\n \"\"\"\n Test RandomAutoContrast pipeline\n \"\"\"\n logger.info(\"Test RandomAutoContrast pipeline\")\n\n # Original Images\n data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n transforms_original = [c_vision.Decode(), c_vision.Resize(size=[224, 224])]\n ds_original = data_set.map(operations=transforms_original, input_columns=\"image\")\n ds_original = ds_original.batch(512)\n\n for idx, (image, _) in enumerate(ds_original):\n if idx == 0:\n images_original = image.asnumpy()\n else:\n images_original = np.append(images_original,\n image.asnumpy(),\n axis=0)\n\n # Randomly Automatically Contrasted Images\n data_set1 = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n transform_random_auto_contrast = [c_vision.Decode(),\n c_vision.Resize(size=[224, 224]),\n c_vision.RandomAutoContrast(prob=0.6)]\n ds_random_auto_contrast = data_set1.map(operations=transform_random_auto_contrast, input_columns=\"image\")\n ds_random_auto_contrast = ds_random_auto_contrast.batch(512)\n for idx, (image, _) in enumerate(ds_random_auto_contrast):\n if idx == 0:\n images_random_auto_contrast = image.asnumpy()\n else:\n images_random_auto_contrast = np.append(images_random_auto_contrast,\n image.asnumpy(),\n axis=0)\n if plot:\n visualize_list(images_original, images_random_auto_contrast)\n\n num_samples = images_original.shape[0]\n mse = np.zeros(num_samples)\n for i in range(num_samples):\n mse[i] = diff_mse(images_random_auto_contrast[i], images_original[i])\n logger.info(\"MSE= {}\".format(str(np.mean(mse))))\n\n\ndef test_random_auto_contrast_eager():\n \"\"\"\n Test RandomAutoContrast eager.\n \"\"\"\n img = np.fromfile(image_file, dtype=np.uint8)\n logger.info(\"Image.type: {}, Image.shape: {}\".format(type(img), img.shape))\n\n img = c_vision.Decode()(img)\n img_auto_contrast = c_vision.AutoContrast(1.0, None)(img)\n img_random_auto_contrast = c_vision.RandomAutoContrast(1.0, None, 1.0)(img)\n logger.info(\"Image.type: {}, Image.shape: {}\".format(type(img_auto_contrast), img_random_auto_contrast.shape))\n\n assert img_auto_contrast.all() == img_random_auto_contrast.all()\n\n\ndef test_random_auto_contrast_comp(plot=False):\n \"\"\"\n Test RandomAutoContrast op compared with AutoContrast op.\n \"\"\"\n random_auto_contrast_op = c_vision.RandomAutoContrast(prob=1.0)\n auto_contrast_op = c_vision.AutoContrast()\n\n dataset1 = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True)\n for item in dataset1.create_dict_iterator(num_epochs=1, output_numpy=True):\n image = item['image']\n dataset1.map(operations=random_auto_contrast_op, input_columns=['image'])\n dataset2 = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True)\n dataset2.map(operations=auto_contrast_op, input_columns=['image'])\n for item1, item2 in zip(dataset1.create_dict_iterator(num_epochs=1, output_numpy=True),\n dataset2.create_dict_iterator(num_epochs=1, output_numpy=True)):\n image_random_auto_contrast = item1['image']\n image_auto_contrast = item2['image']\n\n mse = diff_mse(image_auto_contrast, image_random_auto_contrast)\n assert mse == 0\n logger.info(\"mse: {}\".format(mse))\n if plot:\n visualize_image(image, image_random_auto_contrast, mse, image_auto_contrast)\n\n\ndef test_random_auto_contrast_invalid_prob():\n \"\"\"\n Test RandomAutoContrast Op with invalid prob parameter.\n \"\"\"\n logger.info(\"test_random_auto_contrast_invalid_prob\")\n dataset = ds.ImageFolderDataset(data_dir, 1, shuffle=False, decode=True)\n try:\n random_auto_contrast_op = c_vision.RandomAutoContrast(prob=1.5)\n dataset = dataset.map(operations=random_auto_contrast_op, input_columns=['image'])\n except ValueError as e:\n logger.info(\"Got an exception in DE: {}\".format(str(e)))\n assert \"Input prob is not within the required interval of [0.0, 1.0].\" in str(e)\n\n\ndef test_random_auto_contrast_invalid_ignore():\n \"\"\"\n Test RandomAutoContrast Op with invalid ignore parameter.\n \"\"\"\n logger.info(\"test_random_auto_contrast_invalid_ignore\")\n try:\n data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data_set = data_set.map(operations=[c_vision.Decode(),\n c_vision.Resize((224, 224)),\n lambda img: np.array(img[:, :, 0])], input_columns=[\"image\"])\n # invalid ignore\n data_set = data_set.map(operations=c_vision.RandomAutoContrast(ignore=255.5), input_columns=\"image\")\n except TypeError as error:\n logger.info(\"Got an exception in DE: {}\".format(str(error)))\n assert \"Argument ignore with value 255.5 is not of type\" in str(error)\n try:\n data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data_set = data_set.map(operations=[c_vision.Decode(), c_vision.Resize((224, 224)),\n lambda img: np.array(img[:, :, 0])], input_columns=[\"image\"])\n # invalid ignore\n data_set = data_set.map(operations=c_vision.RandomAutoContrast(ignore=(10, 100)), input_columns=\"image\")\n except TypeError as error:\n logger.info(\"Got an exception in DE: {}\".format(str(error)))\n assert \"Argument ignore with value (10,100) is not of type\" in str(error)\n\n\ndef test_random_auto_contrast_invalid_cutoff():\n \"\"\"\n Test RandomAutoContrast Op with invalid cutoff parameter.\n \"\"\"\n logger.info(\"test_random_auto_contrast_invalid_cutoff\")\n try:\n data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data_set = data_set.map(operations=[c_vision.Decode(),\n c_vision.Resize((224, 224)),\n lambda img: np.array(img[:, :, 0])], input_columns=[\"image\"])\n # invalid cutoff\n data_set = data_set.map(operations=c_vision.RandomAutoContrast(cutoff=-10.0), input_columns=\"image\")\n except ValueError as error:\n logger.info(\"Got an exception in DE: {}\".format(str(error)))\n assert \"Input cutoff is not within the required interval of [0, 50).\" in str(error)\n try:\n data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data_set = data_set.map(operations=[c_vision.Decode(),\n c_vision.Resize((224, 224)),\n lambda img: np.array(img[:, :, 0])], input_columns=[\"image\"])\n # invalid cutoff\n data_set = data_set.map(operations=c_vision.RandomAutoContrast(cutoff=120.0), input_columns=\"image\")\n except ValueError as error:\n logger.info(\"Got an exception in DE: {}\".format(str(error)))\n assert \"Input cutoff is not within the required interval of [0, 50).\" in str(error)\n\n\ndef test_random_auto_contrast_one_channel():\n \"\"\"\n Feature: RandomAutoContrast\n Description: test with one channel images\n Expectation: raise errors as expected\n \"\"\"\n logger.info(\"test_random_auto_contrast_one_channel\")\n\n c_op = c_vision.RandomAutoContrast()\n\n try:\n data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data_set = data_set.map(operations=[c_vision.Decode(), c_vision.Resize((224, 224)),\n lambda img: np.array(img[:, :, 0])], input_columns=[\"image\"])\n\n data_set = data_set.map(operations=c_op, input_columns=\"image\")\n\n except RuntimeError as e:\n logger.info(\"Got an exception in DE: {}\".format(str(e)))\n assert \"image shape is incorrect, expected num of channels is 3.\" in str(e)\n\n\ndef test_random_auto_contrast_four_dim():\n \"\"\"\n Feature: RandomAutoContrast\n Description: test with four dimension images\n Expectation: raise errors as expected\n \"\"\"\n logger.info(\"test_random_auto_contrast_four_dim\")\n\n c_op = c_vision.RandomAutoContrast()\n\n try:\n data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data_set = data_set.map(operations=[c_vision.Decode(), c_vision.Resize((224, 224)),\n lambda img: np.array(img[2, 200, 10, 32])], input_columns=[\"image\"])\n\n data_set = data_set.map(operations=c_op, input_columns=\"image\")\n\n except ValueError as e:\n logger.info(\"Got an exception in DE: {}\".format(str(e)))\n assert \"image shape is not <H,W,C>\" in str(e)\n\n\ndef test_random_auto_contrast_invalid_input():\n \"\"\"\n Feature: RandomAutoContrast\n Description: test with images in uint32 type\n Expectation: raise errors as expected\n \"\"\"\n logger.info(\"test_random_invert_invalid_input\")\n\n c_op = c_vision.RandomAutoContrast()\n\n try:\n data_set = ds.ImageFolderDataset(dataset_dir=data_dir, shuffle=False)\n data_set = data_set.map(operations=[c_vision.Decode(), c_vision.Resize((224, 224)),\n lambda img: np.array(img[2, 32, 3], dtype=uint32)], input_columns=[\"image\"])\n data_set = data_set.map(operations=c_op, input_columns=\"image\")\n\n except TypeError as e:\n logger.info(\"Got an exception in DE: {}\".format(str(e)))\n assert \"Cannot convert from OpenCV type, unknown CV type\" in str(e)\n\n\nif __name__ == \"__main__\":\n test_random_auto_contrast_pipeline(plot=True)\n test_random_auto_contrast_eager()\n test_random_auto_contrast_comp(plot=True)\n test_random_auto_contrast_invalid_prob()\n test_random_auto_contrast_invalid_ignore()\n test_random_auto_contrast_invalid_cutoff()\n test_random_auto_contrast_one_channel()\n test_random_auto_contrast_four_dim()\n test_random_auto_contrast_invalid_input()\n" ]
[ [ "numpy.array", "numpy.fromfile", "numpy.zeros", "numpy.mean" ] ]
bernhard-42/jupyrter-cadquery
[ "ff773855f480624f30ae7bf874e728ec9ebddd68" ]
[ "jupyter_cadquery/tessellator.py" ]
[ "\"\"\"Tessellator class\"\"\"\n\nimport os\nimport sys\n\nfrom cachetools import LRUCache, cached\n\nimport numpy as np\n\n# pylint: disable=no-name-in-module,import-error\nfrom OCP.gp import gp_Vec, gp_Pnt\nfrom OCP.BRep import BRep_Tool\nfrom OCP.BRepTools import BRepTools\nfrom OCP.BRepGProp import BRepGProp_Face\nfrom OCP.BRepMesh import BRepMesh_IncrementalMesh\nfrom OCP.TopLoc import TopLoc_Location\nfrom OCP.TopAbs import TopAbs_Orientation\nfrom OCP.TopTools import TopTools_IndexedDataMapOfShapeListOfShape, TopTools_IndexedMapOfShape\nfrom OCP.TopExp import TopExp, TopExp_Explorer\nfrom OCP.TopAbs import TopAbs_EDGE, TopAbs_FACE, TopAbs_SOLID\nfrom OCP.TopoDS import TopoDS\nfrom OCP.BRepAdaptor import BRepAdaptor_Curve\nfrom OCP.GCPnts import GCPnts_QuasiUniformDeflection\n\n\nfrom cadquery.occ_impl.shapes import Compound\nfrom jupyter_cadquery.utils import Timer, round_sig\nfrom jupyter_cadquery.ocp_utils import get_faces, np_bbox, loc_to_tq\n\nMAX_HASH_KEY = 2147483647\n\n\n#\n# Caching helpers\n#\n\n\ndef make_key(\n shape, loc, deviation, quality, angular_tolerance, compute_edges=True, compute_faces=True, debug=False\n): # pylint: disable=unused-argument\n # quality is a measure of bounding box and deviation, hence can be ignored (and should due to accuracy issues\n # of non optimal bounding boxes. debug and progress are also irrelevant for tessellation results)\n if not isinstance(shape, (tuple, list)):\n shape = [shape]\n\n key = (\n tuple((s.HashCode(MAX_HASH_KEY) for s in shape)),\n loc,\n deviation,\n angular_tolerance,\n compute_edges,\n compute_faces,\n )\n return key\n\n\ndef get_size(obj):\n size = sys.getsizeof(obj)\n if isinstance(obj, dict):\n size += sum([get_size(v) + len(k) for k, v in obj.items()])\n elif isinstance(obj, np.ndarray):\n size += obj.size * obj.dtype.itemsize\n elif isinstance(obj, (tuple, list)):\n size += sum([get_size(i) for i in obj])\n return size\n\n\ncache_size = os.environ.get(\"JCQ_CACHE_SIZE_MB\")\nif cache_size is None:\n cache_size = 1024 * 1024 * 1024\nelse:\n cache_size = int(cache_size) * 1024 * 1024\ncache = LRUCache(maxsize=cache_size, getsizeof=get_size)\n\n\nclass Tessellator:\n def __init__(self):\n self.vertices = np.empty((0, 3), dtype=\"float32\")\n self.triangles = np.empty((0,), dtype=\"uint32\")\n self.normals = np.empty((0, 3), dtype=\"float32\")\n self.normals = np.empty((0, 2, 3), dtype=\"float32\")\n self.shape = None\n self.edges = []\n\n def number_solids(self, shape):\n count = 0\n e = TopExp_Explorer(shape, TopAbs_SOLID)\n while e.More():\n count += 1\n e.Next()\n return count\n\n def compute(\n self,\n shape,\n quality,\n angular_tolerance,\n compute_faces=True,\n compute_edges=True,\n debug=False,\n ):\n\n self.shape = shape\n\n count = self.number_solids(shape)\n with Timer(debug, \"\", f\"mesh incrementally {'(parallel)' if count > 1 else ''}\", 3):\n # Remove previous mesh data\n BRepTools.Clean_s(shape)\n BRepMesh_IncrementalMesh(shape, quality, False, angular_tolerance, count > 1)\n\n if compute_faces:\n with Timer(debug, \"\", \"get nodes, triangles and normals\", 3):\n self.tessellate()\n\n if compute_edges:\n with Timer(debug, \"\", \"get edges\", 3):\n self.compute_edges()\n\n # Remove mesh data again\n # BRepTools.Clean_s(shape)\n\n def tessellate(self):\n self.vertices = []\n self.triangles = []\n self.normals = []\n\n # global buffers\n p_buf = gp_Pnt()\n n_buf = gp_Vec()\n loc_buf = TopLoc_Location()\n\n offset = -1\n\n # every line below is selected for performance. Do not introduce functions to \"beautify\" the code\n\n for face in get_faces(self.shape):\n if face.Orientation() == TopAbs_Orientation.TopAbs_REVERSED:\n i1, i2 = 2, 1\n else:\n i1, i2 = 1, 2\n\n internal = face.Orientation() == TopAbs_Orientation.TopAbs_INTERNAL\n\n poly = BRep_Tool.Triangulation_s(face, loc_buf)\n if poly is not None:\n Trsf = loc_buf.Transformation()\n\n # add vertices\n flat = []\n for i in range(1, poly.NbNodes() + 1):\n flat.extend(poly.Node(i).Transformed(Trsf).Coord())\n self.vertices.extend(flat)\n\n # add triangles\n flat = []\n for i in range(1, poly.NbTriangles() + 1):\n coord = poly.Triangle(i).Get()\n flat.extend((coord[0] + offset, coord[i1] + offset, coord[i2] + offset))\n self.triangles.extend(flat)\n\n # add normals\n if poly.HasUVNodes():\n prop = BRepGProp_Face(face)\n flat = []\n for i in range(1, poly.NbNodes() + 1):\n u, v = poly.UVNode(i).Coord()\n prop.Normal(u, v, p_buf, n_buf)\n if n_buf.SquareMagnitude() > 0:\n n_buf.Normalize()\n flat.extend(n_buf.Reverse().Coord() if internal else n_buf.Coord())\n self.normals.extend(flat)\n\n offset += poly.NbNodes()\n\n def compute_edges(self):\n edge_map = TopTools_IndexedMapOfShape()\n face_map = TopTools_IndexedDataMapOfShapeListOfShape()\n\n TopExp.MapShapes_s(self.shape, TopAbs_EDGE, edge_map)\n TopExp.MapShapesAndAncestors_s(self.shape, TopAbs_EDGE, TopAbs_FACE, face_map)\n\n for i in range(1, edge_map.Extent() + 1):\n edge = TopoDS.Edge_s(edge_map.FindKey(i))\n\n face_list = face_map.FindFromKey(edge)\n if face_list.Extent() == 0:\n # print(\"no faces\")\n continue\n\n loc = TopLoc_Location()\n\n face = TopoDS.Face_s(face_list.First())\n triangle = BRep_Tool.Triangulation_s(face, loc)\n poly = BRep_Tool.PolygonOnTriangulation_s(edge, triangle, loc)\n\n if poly is None:\n continue\n\n if hasattr(poly, \"Node\"): # OCCT > 7.5\n nrange = range(1, poly.NbNodes() + 1)\n index = poly.Node\n else: # OCCT == 7.5\n indices = poly.Nodes()\n nrange = range(indices.Lower(), indices.Upper() + 1)\n index = indices.Value\n\n transf = loc.Transformation()\n v1 = None\n for j in nrange:\n v2 = triangle.Node(index(j)).Transformed(transf).Coord()\n if v1 is not None:\n self.edges.append((v1, v2))\n v1 = v2\n\n def get_vertices(self):\n return np.asarray(self.vertices, dtype=np.float32)\n\n def get_triangles(self):\n return np.asarray(self.triangles, dtype=np.int32)\n\n def get_normals(self):\n return np.asarray(self.normals, dtype=np.float32)\n\n def get_edges(self):\n return np.asarray(self.edges, dtype=np.float32)\n\n\ndef compute_quality(bb, deviation=0.1):\n # Since tessellation caching depends on quality, try to come up with stable a quality value\n quality = round_sig(\n (round_sig(bb.xsize, 3) + round_sig(bb.ysize, 3) + round_sig(bb.zsize, 3)) / 300 * deviation, 3\n )\n return quality\n\n\n# cache key: (shape.hash, deviaton, angular_tolerance, compute_edges, compute_faces)\n@cached(cache, key=make_key)\ndef tessellate(\n shapes,\n loc,\n # only provided for managing cache:\n deviation: float, # pylint: disable=unused-argument\n quality: float,\n angular_tolerance: float,\n compute_faces=True,\n compute_edges=True,\n debug=False,\n):\n compound = Compound._makeCompound(shapes) if len(shapes) > 1 else shapes[0] # pylint: disable=protected-access\n tess = Tessellator()\n tess.compute(compound, quality, angular_tolerance, compute_faces, compute_edges, debug)\n vertices = tess.get_vertices()\n return (\n {\n \"vertices\": vertices,\n \"triangles\": tess.get_triangles(),\n \"normals\": tess.get_normals(),\n \"edges\": tess.get_edges(),\n },\n np_bbox(vertices, *loc),\n )\n\n\ndef discretize_edge(edge, deflection=0.1):\n curve_adaptator = BRepAdaptor_Curve(edge)\n\n discretizer = GCPnts_QuasiUniformDeflection()\n discretizer.Initialize(\n curve_adaptator, deflection, curve_adaptator.FirstParameter(), curve_adaptator.LastParameter()\n )\n\n if not discretizer.IsDone():\n raise AssertionError(\"Discretizer not done.\")\n\n points = [curve_adaptator.Value(discretizer.Parameter(i)).Coord() for i in range(1, discretizer.NbPoints() + 1)]\n\n # return tuples representing the single lines of the egde\n edges = []\n for i in range(len(points) - 1):\n edges.append((points[i], points[i + 1]))\n\n return np.asarray(edges, dtype=np.float32)\n\n\ndef bbox_edges(bb):\n return np.asarray(\n [\n bb[\"xmax\"],\n bb[\"ymax\"],\n bb[\"zmin\"],\n bb[\"xmax\"],\n bb[\"ymax\"],\n bb[\"zmax\"],\n bb[\"xmax\"],\n bb[\"ymin\"],\n bb[\"zmax\"],\n bb[\"xmax\"],\n bb[\"ymax\"],\n bb[\"zmax\"],\n bb[\"xmax\"],\n bb[\"ymin\"],\n bb[\"zmin\"],\n bb[\"xmax\"],\n bb[\"ymax\"],\n bb[\"zmin\"],\n bb[\"xmax\"],\n bb[\"ymin\"],\n bb[\"zmin\"],\n bb[\"xmax\"],\n bb[\"ymin\"],\n bb[\"zmax\"],\n bb[\"xmin\"],\n bb[\"ymax\"],\n bb[\"zmax\"],\n bb[\"xmax\"],\n bb[\"ymax\"],\n bb[\"zmax\"],\n bb[\"xmin\"],\n bb[\"ymax\"],\n bb[\"zmin\"],\n bb[\"xmax\"],\n bb[\"ymax\"],\n bb[\"zmin\"],\n bb[\"xmin\"],\n bb[\"ymax\"],\n bb[\"zmin\"],\n bb[\"xmin\"],\n bb[\"ymax\"],\n bb[\"zmax\"],\n bb[\"xmin\"],\n bb[\"ymin\"],\n bb[\"zmax\"],\n bb[\"xmax\"],\n bb[\"ymin\"],\n bb[\"zmax\"],\n bb[\"xmin\"],\n bb[\"ymin\"],\n bb[\"zmax\"],\n bb[\"xmin\"],\n bb[\"ymax\"],\n bb[\"zmax\"],\n bb[\"xmin\"],\n bb[\"ymin\"],\n bb[\"zmin\"],\n bb[\"xmax\"],\n bb[\"ymin\"],\n bb[\"zmin\"],\n bb[\"xmin\"],\n bb[\"ymin\"],\n bb[\"zmin\"],\n bb[\"xmin\"],\n bb[\"ymax\"],\n bb[\"zmin\"],\n bb[\"xmin\"],\n bb[\"ymin\"],\n bb[\"zmin\"],\n bb[\"xmin\"],\n bb[\"ymin\"],\n bb[\"zmax\"],\n ],\n dtype=\"float32\",\n )\n" ]
[ [ "numpy.asarray", "numpy.empty" ] ]
CharlesThut/sportsipy
[ "eaed3442ca7bf8836cacedba00c051d58a5ca539" ]
[ "sportsipy/ncaab/boxscore.py" ]
[ "import pandas as pd\nimport re\nfrom datetime import timedelta\nfrom pyquery import PyQuery as pq\nfrom urllib.error import HTTPError\nfrom .. import utils\nfrom ..constants import AWAY, HOME\nfrom ..decorators import float_property_decorator, int_property_decorator\nfrom .constants import (BOXSCORE_ELEMENT_INDEX,\n BOXSCORE_SCHEME,\n BOXSCORE_URL,\n BOXSCORES_URL)\nfrom .player import AbstractPlayer, _int_property_decorator\n\n\nclass BoxscorePlayer(AbstractPlayer):\n \"\"\"\n Get player stats for an individual game.\n\n Given a player ID, such as 'carsen-edwards-1' for Carsen Edwards, their\n full name, and all associated stats from the Boxscore page in HTML format,\n parse the HTML and extract only the relevant stats for the specified player\n and assign them to readable properties.\n\n This class inherits the ``AbstractPlayer`` class. As a result, all\n properties associated with ``AbstractPlayer`` can also be read directly\n from this class.\n\n As this class is instantiated from within the Boxscore class, it should not\n be called directly and should instead be queried using the appropriate\n players properties from the Boxscore class.\n\n Parameters\n ----------\n player_id : string\n A player's ID accorsing to sports-reference.com, such as\n 'carsen-edwards-1' for Carsen Edwards. The player ID can be found by\n navigating to the player's stats page and getting the string between\n the final slash and the '.html' in the URL. In general, the ID is in\n the format 'first-last-N' where 'first' is the player's first name in\n lowercase, 'last' is the player's last name in lowercase, and 'N' is a\n number starting at '1' for the first time that player ID has been used\n and increments by 1 for every successive player.\n player_name : string\n A string representing the player's first and last name, such as 'Carsen\n Edwards'.\n player_data : string\n A string representation of the player's HTML data from the Boxscore\n page. If the player appears in multiple tables, all of their\n information will appear in one single string concatenated together.\n \"\"\"\n def __init__(self, player_id, player_name, player_data):\n self._index = 0\n self._player_id = player_id\n self._defensive_rating = None\n self._offensive_rating = None\n AbstractPlayer.__init__(self, player_id, player_name, player_data)\n\n @property\n def dataframe(self):\n \"\"\"\n Returns a ``pandas DataFrame`` containing all other relevant class\n properties and values for the specified game.\n \"\"\"\n fields_to_include = {\n 'assist_percentage': self.assist_percentage,\n 'assists': self.assists,\n 'block_percentage': self.block_percentage,\n 'blocks': self.blocks,\n 'defensive_rating': self.defensive_rating,\n 'defensive_rebound_percentage': self.defensive_rebound_percentage,\n 'defensive_rebounds': self.defensive_rebounds,\n 'effective_field_goal_percentage':\n self.effective_field_goal_percentage,\n 'field_goal_attempts': self.field_goal_attempts,\n 'field_goal_percentage': self.field_goal_percentage,\n 'field_goals': self.field_goals,\n 'free_throw_attempt_rate': self.free_throw_attempt_rate,\n 'free_throw_attempts': self.free_throw_attempts,\n 'free_throw_percentage': self.free_throw_percentage,\n 'free_throws': self.free_throws,\n 'minutes_played': self.minutes_played,\n 'offensive_rating': self.offensive_rating,\n 'offensive_rebound_percentage': self.offensive_rebound_percentage,\n 'offensive_rebounds': self.offensive_rebounds,\n 'personal_fouls': self.personal_fouls,\n 'points': self.points,\n 'steal_percentage': self.steal_percentage,\n 'steals': self.steals,\n 'three_point_attempt_rate': self.three_point_attempt_rate,\n 'three_point_attempts': self.three_point_attempts,\n 'three_point_percentage': self.three_point_percentage,\n 'three_pointers': self.three_pointers,\n 'total_rebound_percentage': self.total_rebound_percentage,\n 'total_rebounds': self.total_rebounds,\n 'true_shooting_percentage': self.true_shooting_percentage,\n 'turnover_percentage': self.turnover_percentage,\n 'turnovers': self.turnovers,\n 'two_point_attempts': self.two_point_attempts,\n 'two_point_percentage': self.two_point_percentage,\n 'two_pointers': self.two_pointers,\n 'usage_percentage': self.usage_percentage\n }\n return pd.DataFrame([fields_to_include], index=[self._player_id])\n\n @_int_property_decorator\n def offensive_rating(self):\n \"\"\"\n Returns an ``int`` of the player's offensive rating as measured by the\n points produced per 100 possessions.\n \"\"\"\n return self._offensive_rating\n\n @_int_property_decorator\n def defensive_rating(self):\n \"\"\"\n Returns an ``int`` of the player's defensive rating as measured by the\n points allowed per 100 possesions.\n \"\"\"\n return self._defensive_rating\n\n\nclass Boxscore:\n \"\"\"\n Detailed information about the final statistics for a game.\n\n Stores all relevant metrics for a game such as the date, time, location,\n result, and more advanced metrics such as the effective field goal rate,\n the true shooting percentage, the game's pace, and much more.\n\n Parameters\n ----------\n uri : string\n The relative link to the boxscore HTML page, such as\n '2017-11-10-21-kansas'.\n \"\"\"\n def __init__(self, uri):\n self._uri = uri\n self._date = None\n self._location = None\n self._home_name = None\n self._away_name = None\n self._winner = None\n self._winning_name = None\n self._winning_abbr = None\n self._losing_name = None\n self._losing_abbr = None\n self._pace = None\n self._summary = None\n self._away_ranking = None\n self._away_record = None\n self._away_minutes_played = None\n self._away_field_goals = None\n self._away_field_goal_attempts = None\n self._away_field_goal_percentage = None\n self._away_two_point_field_goals = None\n self._away_two_point_field_goal_attempts = None\n self._away_two_point_field_goal_percentage = None\n self._away_three_point_field_goals = None\n self._away_three_point_field_goal_attempts = None\n self._away_three_point_field_goal_percentage = None\n self._away_free_throws = None\n self._away_free_throw_attempts = None\n self._away_free_throw_percentage = None\n self._away_offensive_rebounds = None\n self._away_defensive_rebounds = None\n self._away_total_rebounds = None\n self._away_assists = None\n self._away_steals = None\n self._away_blocks = None\n self._away_turnovers = None\n self._away_personal_fouls = None\n self._away_points = None\n self._away_true_shooting_percentage = None\n self._away_effective_field_goal_percentage = None\n self._away_three_point_attempt_rate = None\n self._away_free_throw_attempt_rate = None\n self._away_offensive_rebound_percentage = None\n self._away_defensive_rebound_percentage = None\n self._away_total_rebound_percentage = None\n self._away_assist_percentage = None\n self._away_steal_percentage = None\n self._away_block_percentage = None\n self._away_turnover_percentage = None\n self._away_offensive_rating = None\n self._away_defensive_rating = None\n self._home_ranking = None\n self._home_record = None\n self._home_minutes_played = None\n self._home_field_goals = None\n self._home_field_goal_attempts = None\n self._home_field_goal_percentage = None\n self._home_two_point_field_goals = None\n self._home_two_point_field_goal_attempts = None\n self._home_two_point_field_goal_percentage = None\n self._home_three_point_field_goals = None\n self._home_three_point_field_goal_attempts = None\n self._home_three_point_field_goal_percentage = None\n self._home_free_throws = None\n self._home_free_throw_attempts = None\n self._home_free_throw_percentage = None\n self._home_offensive_rebounds = None\n self._home_defensive_rebounds = None\n self._home_total_rebounds = None\n self._home_assists = None\n self._home_steals = None\n self._home_blocks = None\n self._home_turnovers = None\n self._home_personal_fouls = None\n self._home_points = None\n self._home_true_shooting_percentage = None\n self._home_effective_field_goal_percentage = None\n self._home_three_point_attempt_rate = None\n self._home_free_throw_attempt_rate = None\n self._home_offensive_rebound_percentage = None\n self._home_defensive_rebound_percentage = None\n self._home_total_rebound_percentage = None\n self._home_assist_percentage = None\n self._home_steal_percentage = None\n self._home_block_percentage = None\n self._home_turnover_percentage = None\n self._home_offensive_rating = None\n self._home_defensive_rating = None\n\n self._parse_game_data(uri)\n\n def __str__(self):\n \"\"\"\n Return the string representation of the class.\n \"\"\"\n return (f'Boxscore for {self._away_name.text()} at '\n f'{self._home_name.text()} ({self.date})')\n\n def __repr__(self):\n \"\"\"\n Return the string representation of the class.\n \"\"\"\n return self.__str__()\n\n def _retrieve_html_page(self, uri):\n \"\"\"\n Download the requested HTML page.\n\n Given a relative link, download the requested page and strip it of all\n comment tags before returning a pyquery object which will be used to\n parse the data.\n\n Parameters\n ----------\n uri : string\n The relative link to the boxscore HTML page, such as\n '2017-11-10-21-kansas'.\n\n Returns\n -------\n PyQuery object\n The requested page is returned as a queriable PyQuery object with\n the comment tags removed.\n \"\"\"\n url = BOXSCORE_URL % uri\n try:\n url_data = pq(url)\n except HTTPError:\n return None\n return pq(utils._remove_html_comment_tags(url_data))\n\n def _parse_game_date_and_location(self, field, boxscore):\n \"\"\"\n Retrieve the game's date and location.\n\n The date and location of the game follow a more complicated parsing\n scheme and should be handled differently from other tags. Both fields\n are separated by a newline character ('\\n') with the first line being\n the date and the second being the location.\n\n Parameters\n ----------\n field : string\n The name of the attribute to parse\n boxscore : PyQuery object\n A PyQuery object containing all of the HTML data from the boxscore.\n\n Returns\n -------\n string\n Depending on the requested field, returns a text representation of\n either the date or location of the game.\n \"\"\"\n scheme = BOXSCORE_SCHEME[field]\n items = [i.text() for i in boxscore(scheme).items()]\n game_info = items[0].split('\\n')\n if len(game_info) < 3 and field == 'location':\n return None\n return game_info[BOXSCORE_ELEMENT_INDEX[field]]\n\n def _parse_name(self, field, boxscore):\n \"\"\"\n Retrieve the team's complete name tag.\n\n Both the team's full name (embedded in the tag's text) and the team's\n abbreviation are stored in the name tag which can be used to parse\n the winning and losing team's information.\n\n Parameters\n ----------\n field : string\n The name of the attribute to parse\n boxscore : PyQuery object\n A PyQuery object containing all of the HTML data from the boxscore.\n\n Returns\n -------\n PyQuery object\n The complete text for the requested tag.\n \"\"\"\n scheme = BOXSCORE_SCHEME[field]\n name = boxscore(scheme)\n if 'cbb/schools' not in str(name):\n name = re.sub(r'.*name\">', '', str(name))\n name = re.sub(r'<.*', '', str(name))\n return name\n\n def _parse_ranking(self, field, boxscore):\n \"\"\"\n Parse each team's rank if applicable.\n\n Retrieve the team's rank according to the rankings published each week.\n The ranking for the week is only located in the scores section at\n the top of the page and not in the actual boxscore information. The\n rank is after the team name inside a parenthesis with a special\n 'pollrank' attribute. If this is not in the team's boxscore\n information, the team is assumed to not have a rank and will return a\n value of None.\n\n Parameters\n ----------\n field : string\n The name of the attribute to parse.\n boxscore : PyQuery object\n A PyQuery obejct containing all of the HTML data from the boxscore.\n\n Returns\n -------\n int\n An int representing the team's ranking or None if the team is not\n ranked.\n \"\"\"\n ranking = None\n index = BOXSCORE_ELEMENT_INDEX[field]\n teams_boxscore = boxscore(BOXSCORE_SCHEME[field])\n # Occasionally, the list of boxscores for the day won't be saved on the\n # page. If that's the case, return the default ranking.\n if str(teams_boxscore) == '':\n return ranking\n team = pq(teams_boxscore[index])\n if 'pollrank' in str(team):\n rank_str = re.findall(r'\\(\\d+\\)', str(team))\n if len(rank_str) == 1:\n ranking = int(rank_str[0].replace('(', '').replace(')', ''))\n return ranking\n\n def _parse_record(self, field, boxscore, index):\n \"\"\"\n Parse each team's record.\n\n Find the record for both the home and away teams which are listed above\n the basic boxscore stats tables. Depending on whether or not the\n advanced stats table is included on the page (generally only for more\n recent matchups), a blank header is added to the list which should be\n removed. With all blank headers removed, the home and away team records\n can be easily parsed by specifying which team is desired.\n\n Parameters\n ----------\n field : string\n The name of the attribute to parse.\n boxscore : PyQuery object\n A PyQuery object containing all of the HTML data from the boxscore.\n index : int\n An int of the index to pull the record from, as specified in the\n BOXSCORE_ELEMENT_INDEX dictionary.\n\n Returns\n -------\n string\n A string of the team's record in the format 'Team Name (W-L)'.\n \"\"\"\n records = boxscore(BOXSCORE_SCHEME[field]).items()\n records = [x.text() for x in records if x.text() != '']\n return records[index]\n\n def _find_boxscore_tables(self, boxscore):\n \"\"\"\n Find all tables with boxscore information on the page.\n\n Iterate through all tables on the page and see if any of them are\n boxscore pages by checking if the ID is prefixed with 'box-score-'. If\n so, add it to a list and return the final list at the end.\n\n Parameters\n ----------\n boxscore : PyQuery object\n A PyQuery object containing all of the HTML data from the boxscore.\n\n Returns\n -------\n list\n Returns a ``list`` of the PyQuery objects where each object\n represents a boxscore table.\n \"\"\"\n tables = []\n\n for table in boxscore('table').items():\n try:\n if 'box-score-' in table.attr['id']:\n tables.append(table)\n except (KeyError, TypeError):\n continue\n return tables\n\n def _find_player_id(self, row):\n \"\"\"\n Find the player's ID.\n\n Find the player's ID as embedded in the 'data-append-csv' attibute,\n such as 'carsen-edwards-1' for Carsen Edwards.\n\n Parameters\n ----------\n row : PyQuery object\n A PyQuery object representing a single row in a boxscore table for\n a single player.\n\n Returns\n -------\n str\n Returns a ``string`` of the player's ID, such as 'carsen-edwards-1'\n for Carsen Edwards.\n \"\"\"\n return row('th').attr('data-append-csv')\n\n def _find_player_name(self, row):\n \"\"\"\n Find the player's full name.\n\n Find the player's full name, such as 'Carsen Edwards'. The name is the\n text displayed for a link to the player's individual stats page.\n\n Parameters\n ----------\n row : PyQuery object\n A PyQuery object representing a single row in a boxscore table for\n a single player.\n\n Returns\n -------\n str\n Returns a ``string`` of the player's full name, such as 'Carsen\n Edwards'.\n \"\"\"\n return row('a').text()\n\n def _extract_player_stats(self, table, player_dict, home_or_away):\n \"\"\"\n Combine all player stats into a single object.\n\n Since each player generally has a couple of rows worth of stats (one\n for basic stats and another for advanced stats) on the boxscore page,\n both rows should be combined into a single string object to easily\n query all fields from a single object instead of determining which row\n to pull metrics from.\n\n Parameters\n ----------\n table : PyQuery object\n A PyQuery object of a single boxscore table, such as the home\n team's advanced stats or the away team's basic stats.\n player_dict : dictionary\n A dictionary where each key is a string of the player's ID and each\n value is a dictionary where the values contain the player's name,\n HTML data, and a string constant indicating which team the player\n is a member of.\n home_or_away : string constant\n A string constant indicating whether the player plays for the home\n or away team.\n\n Returns\n -------\n dictionary\n Returns a ``dictionary`` where each key is a string of the player's\n ID and each value is a dictionary where the values contain the\n player's name, HTML data, and a string constant indicating which\n team the player is a member of.\n \"\"\"\n for row in table('tbody tr').items():\n player_id = self._find_player_id(row)\n # Occurs when a header row is identified instead of a player.\n if not player_id:\n continue\n name = self._find_player_name(row)\n try:\n player_dict[player_id]['data'] += str(row).strip()\n except KeyError:\n player_dict[player_id] = {\n 'name': name,\n 'data': str(row).strip(),\n 'team': home_or_away\n }\n return player_dict\n\n def _instantiate_players(self, player_dict):\n \"\"\"\n Create a list of player instances for both the home and away teams.\n\n For every player listed on the boxscores page, create an instance of\n the BoxscorePlayer class for that player and add them to a list of\n players for their respective team.\n\n Parameters\n ----------\n player_dict : dictionary\n A dictionary containing information for every player on the\n boxscores page. Each key is a string containing the player's ID\n and each value is a dictionary with the player's full name, a\n string representation of their HTML stats, and a string constant\n denoting which team they play for as the values.\n\n Returns\n -------\n tuple\n Returns a ``tuple`` in the format (away_players, home_players)\n where each element is a list of player instances for the away and\n home teams, respectively.\n \"\"\"\n home_players = []\n away_players = []\n for player_id, details in player_dict.items():\n player = BoxscorePlayer(player_id,\n details['name'],\n details['data'])\n if details['team'] == HOME:\n home_players.append(player)\n else:\n away_players.append(player)\n return away_players, home_players\n\n def _find_players(self, boxscore):\n \"\"\"\n Find all players for each team.\n\n Iterate through every player for both teams as found in the boxscore\n tables and create a list of instances of the BoxscorePlayer class for\n each player. Return lists of player instances comprising the away and\n home team players, respectively.\n\n Parameters\n ----------\n boxscore : PyQuery object\n A PyQuery object containing all of the HTML data from the boxscore.\n\n Returns\n -------\n tuple\n Returns a ``tuple`` in the format (away_players, home_players)\n where each element is a list of player instances for the away and\n home teams, respectively.\n \"\"\"\n player_dict = {}\n table_count = 0\n\n tables = self._find_boxscore_tables(boxscore)\n for table in tables:\n home_or_away = HOME\n # There are two tables per team with the first two tables belonging\n # to the home team.\n if table_count < 2:\n home_or_away = AWAY\n player_dict = self._extract_player_stats(table,\n player_dict,\n home_or_away)\n table_count += 1\n away_players, home_players = self._instantiate_players(player_dict)\n return away_players, home_players\n\n def _parse_summary(self, boxscore):\n \"\"\"\n Find the game summary including score in each quarter.\n\n The game summary provides further information on the points scored\n during each half, including the final score and any overtimes if\n applicable. The final output will be in a dictionary with two keys,\n 'away' and 'home'. The value of each key will be a list for each\n respective team's score by order of the half, with the first element\n belonging to the first half, similar to the following:\n\n {\n 'away': [22, 31],\n 'home': [40, 41]\n }\n\n Parameters\n ----------\n boxscore : PyQuery object\n A PyQuery object containing all of the HTML from the boxscore.\n\n Returns\n -------\n dict\n Returns a ``dictionary`` representing the score for each team in\n each quarter of the game.\n \"\"\"\n team = ['away', 'home']\n summary = {'away': [], 'home': []}\n game_summary = boxscore(BOXSCORE_SCHEME['summary'])\n for ind, team_info in enumerate(game_summary('tr').items()):\n # Only pull the first N-1 items as the last element is the final\n # score for each team which is already stored in an attribute, and\n # shouldn't be duplicated.\n for half in list(team_info('td[class=\"right\"]').items())[:-1]:\n ind = ind % 2\n try:\n summary[team[ind]].append(int(half.text()))\n except ValueError:\n summary[team[ind]].append(None)\n return summary\n\n def _parse_game_data(self, uri):\n \"\"\"\n Parses a value for every attribute.\n\n This function looks through every attribute and retrieves the value\n according to the parsing scheme and index of the attribute from the\n passed HTML data. Once the value is retrieved, the attribute's value is\n updated with the returned result.\n\n Note that this method is called directly once Boxscore is invoked and\n does not need to be called manually.\n\n Parameters\n ----------\n uri : string\n The relative link to the boxscore HTML page, such as\n '2017-11-10-21-kansas'.\n \"\"\"\n boxscore = self._retrieve_html_page(uri)\n # If the boxscore is None, the game likely hasn't been played yet and\n # no information can be gathered. As there is nothing to grab, the\n # class instance should just be empty.\n if not boxscore:\n return\n\n for field in self.__dict__:\n # Remove the '_' from the name\n short_field = str(field)[1:]\n if short_field == 'winner' or \\\n short_field == 'uri':\n continue\n if short_field == 'location' or \\\n short_field == 'date':\n value = self._parse_game_date_and_location(short_field,\n boxscore)\n setattr(self, field, value)\n continue\n if short_field == 'away_name' or \\\n short_field == 'home_name':\n value = self._parse_name(short_field, boxscore)\n setattr(self, field, value)\n continue\n if short_field == 'away_ranking' or \\\n short_field == 'home_ranking':\n value = self._parse_ranking(short_field, boxscore)\n setattr(self, field, value)\n continue\n if short_field == 'summary':\n value = self._parse_summary(boxscore)\n setattr(self, field, value)\n continue\n index = 0\n if short_field in BOXSCORE_ELEMENT_INDEX.keys():\n index = BOXSCORE_ELEMENT_INDEX[short_field]\n if short_field == 'away_record' or \\\n short_field == 'home_record':\n value = self._parse_record(short_field, boxscore, index)\n setattr(self, field, value)\n continue\n value = utils._parse_field(BOXSCORE_SCHEME,\n boxscore,\n short_field,\n index)\n setattr(self, field, value)\n self._away_players, self._home_players = self._find_players(boxscore)\n\n @property\n def dataframe(self):\n \"\"\"\n Returns a pandas DataFrame containing all other class properties and\n values. The index for the DataFrame is the string URI that is used to\n instantiate the class, such as '2017-11-10-21-kansas'.\n \"\"\"\n if self._away_points is None and self._home_points is None:\n return None\n fields_to_include = {\n 'away_assist_percentage': self.away_assist_percentage,\n 'away_assists': self.away_assists,\n 'away_block_percentage': self.away_block_percentage,\n 'away_blocks': self.away_blocks,\n 'away_defensive_rating': self.away_defensive_rating,\n 'away_defensive_rebound_percentage':\n self.away_defensive_rebound_percentage,\n 'away_defensive_rebounds': self.away_defensive_rebounds,\n 'away_effective_field_goal_percentage':\n self.away_effective_field_goal_percentage,\n 'away_field_goal_attempts': self.away_field_goal_attempts,\n 'away_field_goal_percentage': self.away_field_goal_percentage,\n 'away_field_goals': self.away_field_goals,\n 'away_free_throw_attempt_rate': self.away_free_throw_attempt_rate,\n 'away_free_throw_attempts': self.away_free_throw_attempts,\n 'away_free_throw_percentage': self.away_free_throw_percentage,\n 'away_free_throws': self.away_free_throws,\n 'away_losses': self.away_losses,\n 'away_minutes_played': self.away_minutes_played,\n 'away_offensive_rating': self.away_offensive_rating,\n 'away_offensive_rebound_percentage':\n self.away_offensive_rebound_percentage,\n 'away_offensive_rebounds': self.away_offensive_rebounds,\n 'away_personal_fouls': self.away_personal_fouls,\n 'away_points': self.away_points,\n 'away_ranking': self.away_ranking,\n 'away_steal_percentage': self.away_steal_percentage,\n 'away_steals': self.away_steals,\n 'away_three_point_attempt_rate':\n self.away_three_point_attempt_rate,\n 'away_three_point_field_goal_attempts':\n self.away_three_point_field_goal_attempts,\n 'away_three_point_field_goal_percentage':\n self.away_three_point_field_goal_percentage,\n 'away_three_point_field_goals': self.away_three_point_field_goals,\n 'away_total_rebound_percentage':\n self.away_total_rebound_percentage,\n 'away_total_rebounds': self.away_total_rebounds,\n 'away_true_shooting_percentage':\n self.away_true_shooting_percentage,\n 'away_turnover_percentage': self.away_turnover_percentage,\n 'away_turnovers': self.away_turnovers,\n 'away_two_point_field_goal_attempts':\n self.away_two_point_field_goal_attempts,\n 'away_two_point_field_goal_percentage':\n self.away_two_point_field_goal_percentage,\n 'away_two_point_field_goals': self.away_two_point_field_goals,\n 'away_win_percentage': self.away_win_percentage,\n 'away_wins': self.away_wins,\n 'date': self.date,\n 'home_assist_percentage': self.home_assist_percentage,\n 'home_assists': self.home_assists,\n 'home_block_percentage': self.home_block_percentage,\n 'home_blocks': self.home_blocks,\n 'home_defensive_rating': self.home_defensive_rating,\n 'home_defensive_rebound_percentage':\n self.home_defensive_rebound_percentage,\n 'home_defensive_rebounds': self.home_defensive_rebounds,\n 'home_effective_field_goal_percentage':\n self.home_effective_field_goal_percentage,\n 'home_field_goal_attempts': self.home_field_goal_attempts,\n 'home_field_goal_percentage': self.home_field_goal_percentage,\n 'home_field_goals': self.home_field_goals,\n 'home_free_throw_attempt_rate': self.home_free_throw_attempt_rate,\n 'home_free_throw_attempts': self.home_free_throw_attempts,\n 'home_free_throw_percentage': self.home_free_throw_percentage,\n 'home_free_throws': self.home_free_throws,\n 'home_losses': self.home_losses,\n 'home_minutes_played': self.home_minutes_played,\n 'home_offensive_rating': self.home_offensive_rating,\n 'home_offensive_rebound_percentage':\n self.home_offensive_rebound_percentage,\n 'home_offensive_rebounds': self.home_offensive_rebounds,\n 'home_personal_fouls': self.home_personal_fouls,\n 'home_points': self.home_points,\n 'home_ranking': self.home_ranking,\n 'home_steal_percentage': self.home_steal_percentage,\n 'home_steals': self.home_steals,\n 'home_three_point_attempt_rate':\n self.home_three_point_attempt_rate,\n 'home_three_point_field_goal_attempts':\n self.home_three_point_field_goal_attempts,\n 'home_three_point_field_goal_percentage':\n self.home_three_point_field_goal_percentage,\n 'home_three_point_field_goals': self.home_three_point_field_goals,\n 'home_total_rebound_percentage':\n self.home_total_rebound_percentage,\n 'home_total_rebounds': self.home_total_rebounds,\n 'home_true_shooting_percentage':\n self.home_true_shooting_percentage,\n 'home_turnover_percentage': self.home_turnover_percentage,\n 'home_turnovers': self.home_turnovers,\n 'home_two_point_field_goal_attempts':\n self.home_two_point_field_goal_attempts,\n 'home_two_point_field_goal_percentage':\n self.home_two_point_field_goal_percentage,\n 'home_two_point_field_goals': self.home_two_point_field_goals,\n 'home_win_percentage': self.home_win_percentage,\n 'home_wins': self.home_wins,\n 'location': self.location,\n 'losing_abbr': self.losing_abbr,\n 'losing_name': self.losing_name,\n 'pace': self.pace,\n 'winner': self.winner,\n 'winning_abbr': self.winning_abbr,\n 'winning_name': self.winning_name\n }\n return pd.DataFrame([fields_to_include], index=[self._uri])\n\n @property\n def date(self):\n \"\"\"\n Returns a ``string`` of the date the game took place.\n \"\"\"\n return self._date\n\n @property\n def away_players(self):\n \"\"\"\n Returns a ``list`` of ``BoxscorePlayer`` class instances for each\n player on the away team.\n \"\"\"\n return self._away_players\n\n @property\n def home_players(self):\n \"\"\"\n Returns a ``list`` of ``BoxscorePlayer`` class instances for each\n player on the home team.\n \"\"\"\n return self._home_players\n\n @property\n def location(self):\n \"\"\"\n Returns a ``string`` of the name of the venue where the game was\n played.\n \"\"\"\n return self._location\n\n @property\n def summary(self):\n \"\"\"\n Returns a ``dictionary`` with two keys, 'away' and 'home'. The value of\n each key will be a list for each respective team's score by order of\n the half, with the first element belonging to the first half, similar\n to the following:\n\n {\n 'away': [22, 31],\n 'home': [40, 41]\n }\n \"\"\"\n return self._summary\n\n @property\n def winner(self):\n \"\"\"\n Returns a ``string`` constant indicating whether the home or away team\n won.\n \"\"\"\n if self.home_points > self.away_points:\n return HOME\n return AWAY\n\n @property\n def winning_name(self):\n \"\"\"\n Returns a ``string`` of the winning team's name, such as 'Purdue\n Boilermakers'.\n \"\"\"\n if self.winner == HOME:\n if 'cbb/schools' not in str(self._home_name):\n return str(self._home_name)\n return self._home_name.text()\n if 'cbb/schools' not in str(self._away_name):\n return str(self._away_name)\n return self._away_name.text()\n\n @property\n def winning_abbr(self):\n \"\"\"\n Returns a ``string`` of the winning team's abbreviation, such as\n 'PURDUE' for the Purdue Boilermakers.\n \"\"\"\n if self.winner == HOME:\n if 'cbb/schools' not in str(self._home_name):\n return str(self._home_name)\n return utils._parse_abbreviation(self._home_name)\n if 'cbb/schools' not in str(self._away_name):\n return str(self._away_name)\n return utils._parse_abbreviation(self._away_name)\n\n @property\n def losing_name(self):\n \"\"\"\n Returns a ``string`` of the losing team's name, such as 'Indiana'\n Hoosiers'.\n \"\"\"\n if self.winner == HOME:\n if 'cbb/schools' not in str(self._away_name):\n return str(self._away_name)\n return self._away_name.text()\n if 'cbb/schools' not in str(self._home_name):\n return str(self._home_name)\n return self._home_name.text()\n\n @property\n def losing_abbr(self):\n \"\"\"\n Returns a ``string`` of the losing team's abbreviation, such as\n 'INDIANA' for the Indiana Hoosiers.\n \"\"\"\n if self.winner == HOME:\n if 'cbb/schools' not in str(self._away_name):\n return str(self._away_name)\n return utils._parse_abbreviation(self._away_name)\n if 'cbb/schools' not in str(self._home_name):\n return str(self._home_name)\n return utils._parse_abbreviation(self._home_name)\n\n @float_property_decorator\n def pace(self):\n \"\"\"\n Returns a ``float`` of the game's overall pace, measured by the number\n of possessions per 40 minutes.\n \"\"\"\n return self._pace\n\n @int_property_decorator\n def away_ranking(self):\n \"\"\"\n Returns an ``int`` of the away team's ranking during the week, or\n ``None`` if the team wasn't ranked.\n \"\"\"\n return self._away_ranking\n\n @float_property_decorator\n def away_win_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of games the away team has won\n after the conclusion of the game. Percentage ranges from 0-1.\n \"\"\"\n try:\n result = float(self.away_wins) / \\\n float(self.away_wins + self.away_losses)\n return round(result, 3)\n except ZeroDivisionError:\n return 0.0\n\n @int_property_decorator\n def away_wins(self):\n \"\"\"\n Returns an ``int`` of the number of games the team has won after the\n conclusion of the game.\n \"\"\"\n try:\n wins, losses = re.findall(r'\\d+', self._away_record)\n return wins\n except (ValueError, TypeError):\n return 0\n\n @int_property_decorator\n def away_losses(self):\n \"\"\"\n Returns an ``int`` of the number of games the team has lost after the\n conclusion of the game.\n \"\"\"\n try:\n wins, losses = re.findall(r'\\d+', self._away_record)\n return losses\n except (ValueError, TypeError):\n return 0\n\n @int_property_decorator\n def away_minutes_played(self):\n \"\"\"\n Returns an ``int`` of the total number of minutes the team played\n during the game.\n \"\"\"\n return self._away_minutes_played\n\n @int_property_decorator\n def away_field_goals(self):\n \"\"\"\n Returns an ``int`` of the total number of field goals made by the away\n team.\n \"\"\"\n return self._away_field_goals\n\n @int_property_decorator\n def away_field_goal_attempts(self):\n \"\"\"\n Returns an ``int`` of the total number of field goal attempts by the\n away team.\n \"\"\"\n return self._away_field_goal_attempts\n\n @float_property_decorator\n def away_field_goal_percentage(self):\n \"\"\"\n Returns a ``float`` of the number of field goals made divided by the\n total number of field goal attempts by the away team. Percentage ranges\n from 0-1.\n \"\"\"\n return self._away_field_goal_percentage\n\n @int_property_decorator\n def away_three_point_field_goals(self):\n \"\"\"\n Returns an ``int`` of the total number of three point field goals made\n by the away team.\n \"\"\"\n return self._away_three_point_field_goals\n\n @int_property_decorator\n def away_three_point_field_goal_attempts(self):\n \"\"\"\n Returns an ``int`` of the total number of three point field goal\n attempts by the away team.\n \"\"\"\n return self._away_three_point_field_goal_attempts\n\n @float_property_decorator\n def away_three_point_field_goal_percentage(self):\n \"\"\"\n Returns a ``float`` of the number of three point field goals made\n divided by the number of three point field goal attempts by the away\n team. Percentage ranges from 0-1.\n \"\"\"\n return self._away_three_point_field_goal_percentage\n\n @int_property_decorator\n def away_two_point_field_goals(self):\n \"\"\"\n Returns an ``int`` of the total number of two point field goals made\n by the away team.\n \"\"\"\n return self._away_two_point_field_goals\n\n @int_property_decorator\n def away_two_point_field_goal_attempts(self):\n \"\"\"\n Returns an ``int`` of the total number of two point field goal attempts\n by the away team.\n \"\"\"\n return self._away_two_point_field_goal_attempts\n\n @float_property_decorator\n def away_two_point_field_goal_percentage(self):\n \"\"\"\n Returns a ``float`` of the number of two point field goals made divided\n by the number of two point field goal attempts by the away team.\n Percentage ranges from 0-1.\n \"\"\"\n return self._away_two_point_field_goal_percentage\n\n @int_property_decorator\n def away_free_throws(self):\n \"\"\"\n Returns an ``int`` of the total number of free throws made by the away\n team.\n \"\"\"\n return self._away_free_throws\n\n @int_property_decorator\n def away_free_throw_attempts(self):\n \"\"\"\n Returns an ``int`` of the total number of free throw attempts by the\n away team.\n \"\"\"\n return self._away_free_throw_attempts\n\n @float_property_decorator\n def away_free_throw_percentage(self):\n \"\"\"\n Returns a ``float`` of the number of free throws made divided by the\n number of free throw attempts by the away team.\n \"\"\"\n return self._away_free_throw_percentage\n\n @int_property_decorator\n def away_offensive_rebounds(self):\n \"\"\"\n Returns an ``int`` of the total number of offensive rebounds by the\n away team.\n \"\"\"\n return self._away_offensive_rebounds\n\n @int_property_decorator\n def away_defensive_rebounds(self):\n \"\"\"\n Returns an ``int`` of the total number of defensive rebounds by the\n away team.\n \"\"\"\n return self._away_defensive_rebounds\n\n @int_property_decorator\n def away_total_rebounds(self):\n \"\"\"\n Returns an ``int`` of the total number of rebounds by the away team.\n \"\"\"\n return self._away_total_rebounds\n\n @int_property_decorator\n def away_assists(self):\n \"\"\"\n Returns an ``int`` of the total number of assists by the away team.\n \"\"\"\n return self._away_assists\n\n @int_property_decorator\n def away_steals(self):\n \"\"\"\n Returns an ``int`` of the total number of steals by the away team.\n \"\"\"\n return self._away_steals\n\n @int_property_decorator\n def away_blocks(self):\n \"\"\"\n Returns an ``int`` of the total number of blocks by the away team.\n \"\"\"\n return self._away_blocks\n\n @int_property_decorator\n def away_turnovers(self):\n \"\"\"\n Returns an ``int`` of the total number of turnovers by the away team.\n \"\"\"\n return self._away_turnovers\n\n @int_property_decorator\n def away_personal_fouls(self):\n \"\"\"\n Returns an ``int`` of the total number of personal fouls by the away\n team.\n \"\"\"\n return self._away_personal_fouls\n\n @int_property_decorator\n def away_points(self):\n \"\"\"\n Returns an ``int`` of the number of points the away team scored.\n \"\"\"\n return self._away_points\n\n @float_property_decorator\n def away_true_shooting_percentage(self):\n \"\"\"\n Returns a ``float`` of the away team's true shooting percentage which\n considers free throws, 2-point field goals, and 3-point field goals.\n Percentage ranges from 0-1.\n \"\"\"\n return self._away_true_shooting_percentage\n\n @float_property_decorator\n def away_effective_field_goal_percentage(self):\n \"\"\"\n Returns a ``float`` of the away team's field goal percentage while\n giving extra weight to 3-point field goals. Percentage ranges from 0-1.\n \"\"\"\n return self._away_effective_field_goal_percentage\n\n @float_property_decorator\n def away_three_point_attempt_rate(self):\n \"\"\"\n Returns a ``float`` of the percentage of field goal attempts from\n 3-point range by the away team. Percentage ranges from 0-1.\n \"\"\"\n return self._away_three_point_attempt_rate\n\n @float_property_decorator\n def away_free_throw_attempt_rate(self):\n \"\"\"\n Returns a ``float`` of the average number of free throw attempts per\n field goal attempt by the away team.\n \"\"\"\n return self._away_free_throw_attempt_rate\n\n @float_property_decorator\n def away_offensive_rebound_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of available offensive rebounds\n the away team grabbed. Percentage ranges from 0-100.\n \"\"\"\n return self._away_offensive_rebound_percentage\n\n @float_property_decorator\n def away_defensive_rebound_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of available defensive rebounds\n the away team grabbed. Percentage ranges from 0-100.\n \"\"\"\n return self._away_defensive_rebound_percentage\n\n @float_property_decorator\n def away_total_rebound_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of available rebounds the away\n team grabbed. Percentage ranges from 0-100.\n \"\"\"\n return self._away_total_rebound_percentage\n\n @float_property_decorator\n def away_assist_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of the away team's field goals\n that were assisted. Percentage ranges from 0-100.\n \"\"\"\n return self._away_assist_percentage\n\n @float_property_decorator\n def away_steal_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of possessions that ended in a\n steal by the away team. Percentage ranges from 0-100.\n \"\"\"\n return self._away_steal_percentage\n\n @float_property_decorator\n def away_block_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of 2-point field goals that were\n blocked by the away team. Percentage ranges from 0-100.\n \"\"\"\n return self._away_block_percentage\n\n @float_property_decorator\n def away_turnover_percentage(self):\n \"\"\"\n Returns a ``float`` of the number of times the away team turned the\n ball over per 100 possessions.\n \"\"\"\n return self._away_turnover_percentage\n\n @float_property_decorator\n def away_offensive_rating(self):\n \"\"\"\n Returns a ``float`` of the average number of points scored per 100\n possessions by the away team.\n \"\"\"\n return self._away_offensive_rating\n\n @float_property_decorator\n def away_defensive_rating(self):\n \"\"\"\n Returns a ``float`` of the average number of points scored per 100\n possessions by the away team.\n \"\"\"\n return self._away_defensive_rating\n\n @int_property_decorator\n def home_ranking(self):\n \"\"\"\n Returns an ``int`` of the home team's ranking during the week, or\n ``None`` if they were not ranked.\n \"\"\"\n return self._home_ranking\n\n @float_property_decorator\n def home_win_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of games the home team has won\n after the conclusion of the game. Percentage ranges from 0-1.\n \"\"\"\n try:\n result = float(self.home_wins) / \\\n float(self.home_wins + self.home_losses)\n return round(result, 3)\n except ZeroDivisionError:\n return 0.0\n\n @int_property_decorator\n def home_wins(self):\n \"\"\"\n Returns an ``int`` of the number of games the home team won after the\n conclusion of the game.\n \"\"\"\n try:\n wins, losses = re.findall(r'\\d+', self._home_record)\n return wins\n except (ValueError, TypeError):\n return 0\n\n @int_property_decorator\n def home_losses(self):\n \"\"\"\n Returns an ``int`` of the number of games the home team lost after the\n conclusion of the game.\n \"\"\"\n try:\n wins, losses = re.findall(r'\\d+', self._home_record)\n return losses\n except (ValueError, TypeError):\n return 0\n\n @int_property_decorator\n def home_minutes_played(self):\n \"\"\"\n Returns an ``int`` of the total number of minutes the team played\n during the game.\n \"\"\"\n return self._home_minutes_played\n\n @int_property_decorator\n def home_field_goals(self):\n \"\"\"\n Returns an ``int`` of the total number of field goals made by the home\n team.\n \"\"\"\n return self._home_field_goals\n\n @int_property_decorator\n def home_field_goal_attempts(self):\n \"\"\"\n Returns an ``int`` of the total number of field goal attempts by the\n home team.\n \"\"\"\n return self._home_field_goal_attempts\n\n @float_property_decorator\n def home_field_goal_percentage(self):\n \"\"\"\n Returns a ``float`` of the number of field goals made divided by the\n total number of field goal attempts by the home team. Percentage ranges\n from 0-1.\n \"\"\"\n return self._home_field_goal_percentage\n\n @int_property_decorator\n def home_three_point_field_goals(self):\n \"\"\"\n Returns an ``int`` of the total number of three point field goals made\n by the home team.\n \"\"\"\n return self._home_three_point_field_goals\n\n @int_property_decorator\n def home_three_point_field_goal_attempts(self):\n \"\"\"\n Returns an ``int`` of the total number of three point field goal\n attempts by the home team.\n \"\"\"\n return self._home_three_point_field_goal_attempts\n\n @float_property_decorator\n def home_three_point_field_goal_percentage(self):\n \"\"\"\n Returns a ``float`` of the number of three point field goals made\n divided by the number of three point field goal attempts by the home\n team. Percentage ranges from 0-1.\n \"\"\"\n return self._home_three_point_field_goal_percentage\n\n @int_property_decorator\n def home_two_point_field_goals(self):\n \"\"\"\n Returns an ``int`` of the total number of two point field goals made\n by the home team.\n \"\"\"\n return self._home_two_point_field_goals\n\n @int_property_decorator\n def home_two_point_field_goal_attempts(self):\n \"\"\"\n Returns an ``int`` of the total number of two point field goal attempts\n by the home team.\n \"\"\"\n return self._home_two_point_field_goal_attempts\n\n @float_property_decorator\n def home_two_point_field_goal_percentage(self):\n \"\"\"\n Returns a ``float`` of the number of two point field goals made divided\n by the number of two point field goal attempts by the home team.\n Percentage ranges from 0-1.\n \"\"\"\n return self._home_two_point_field_goal_percentage\n\n @int_property_decorator\n def home_free_throws(self):\n \"\"\"\n Returns an ``int`` of the total number of free throws made by the home\n team.\n \"\"\"\n return self._home_free_throws\n\n @int_property_decorator\n def home_free_throw_attempts(self):\n \"\"\"\n Returns an ``int`` of the total number of free throw attempts by the\n home team.\n \"\"\"\n return self._home_free_throw_attempts\n\n @float_property_decorator\n def home_free_throw_percentage(self):\n \"\"\"\n Returns a ``float`` of the number of free throws made divided by the\n number of free throw attempts by the home team.\n \"\"\"\n return self._home_free_throw_percentage\n\n @int_property_decorator\n def home_offensive_rebounds(self):\n \"\"\"\n Returns an ``int`` of the total number of offensive rebounds by the\n home team.\n \"\"\"\n return self._home_offensive_rebounds\n\n @int_property_decorator\n def home_defensive_rebounds(self):\n \"\"\"\n Returns an ``int`` of the total number of defensive rebounds by the\n home team.\n \"\"\"\n return self._home_defensive_rebounds\n\n @int_property_decorator\n def home_total_rebounds(self):\n \"\"\"\n Returns an ``int`` of the total number of rebounds by the home team.\n \"\"\"\n return self._home_total_rebounds\n\n @int_property_decorator\n def home_assists(self):\n \"\"\"\n Returns an ``int`` of the total number of assists by the home team.\n \"\"\"\n return self._home_assists\n\n @int_property_decorator\n def home_steals(self):\n \"\"\"\n Returns an ``int`` of the total number of steals by the home team.\n \"\"\"\n return self._home_steals\n\n @int_property_decorator\n def home_blocks(self):\n \"\"\"\n Returns an ``int`` of the total number of blocks by the home team.\n \"\"\"\n return self._home_blocks\n\n @int_property_decorator\n def home_turnovers(self):\n \"\"\"\n Returns an ``int`` of the total number of turnovers by the home team.\n \"\"\"\n return self._home_turnovers\n\n @int_property_decorator\n def home_personal_fouls(self):\n \"\"\"\n Returns an ``int`` of the total number of personal fouls by the home\n team.\n \"\"\"\n return self._home_personal_fouls\n\n @int_property_decorator\n def home_points(self):\n \"\"\"\n Returns an ``int`` of the number of points the home team scored.\n \"\"\"\n return self._home_points\n\n @float_property_decorator\n def home_true_shooting_percentage(self):\n \"\"\"\n Returns a ``float`` of the home team's true shooting percentage which\n considers free throws, 2-point field goals, and 3-point field goals.\n Percentage ranges from 0-1.\n \"\"\"\n return self._home_true_shooting_percentage\n\n @float_property_decorator\n def home_effective_field_goal_percentage(self):\n \"\"\"\n Returns a ``float`` of the home team's field goal percentage while\n giving extra weight to 3-point field goals. Percentage ranges from 0-1.\n \"\"\"\n return self._home_effective_field_goal_percentage\n\n @float_property_decorator\n def home_three_point_attempt_rate(self):\n \"\"\"\n Returns a ``float`` of the percentage of field goal attempts from\n 3-point range by the home team. Percentage ranges from 0-1.\n \"\"\"\n return self._home_three_point_attempt_rate\n\n @float_property_decorator\n def home_free_throw_attempt_rate(self):\n \"\"\"\n Returns a ``float`` of the average number of free throw attempts per\n field goal attempt by the home team.\n \"\"\"\n return self._home_free_throw_attempt_rate\n\n @float_property_decorator\n def home_offensive_rebound_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of available offensive rebounds\n the home team grabbed. Percentage ranges from 0-100.\n \"\"\"\n return self._home_offensive_rebound_percentage\n\n @float_property_decorator\n def home_defensive_rebound_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of available defensive rebounds\n the home team grabbed. Percentage ranges from 0-100.\n \"\"\"\n return self._home_defensive_rebound_percentage\n\n @float_property_decorator\n def home_total_rebound_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of available rebounds the home\n team grabbed. Percentage ranges from 0-100.\n \"\"\"\n return self._home_total_rebound_percentage\n\n @float_property_decorator\n def home_assist_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of the home team's field goals\n that were assisted. Percentage ranges from 0-100.\n \"\"\"\n return self._home_assist_percentage\n\n @float_property_decorator\n def home_steal_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of possessions that ended in a\n steal by the home team. Percentage ranges from 0-100.\n \"\"\"\n return self._home_steal_percentage\n\n @float_property_decorator\n def home_block_percentage(self):\n \"\"\"\n Returns a ``float`` of the percentage of 2-point field goals that were\n blocked by the home team. Percentage ranges from 0-100.\n \"\"\"\n return self._home_block_percentage\n\n @float_property_decorator\n def home_turnover_percentage(self):\n \"\"\"\n Returns a ``float`` of the number of times the home team turned the\n ball over per 100 possessions.\n \"\"\"\n return self._home_turnover_percentage\n\n @float_property_decorator\n def home_offensive_rating(self):\n \"\"\"\n Returns a ``float`` of the average number of points scored per 100\n possessions by the home team.\n \"\"\"\n return self._home_offensive_rating\n\n @float_property_decorator\n def home_defensive_rating(self):\n \"\"\"\n Returns a ``float`` of the average number of points scored per 100\n possessions by the away team.\n \"\"\"\n return self._home_defensive_rating\n\n\nclass Boxscores:\n \"\"\"\n Search for NCAAB games taking place on a particular day.\n\n Retrieve a dictionary which contains a list of all games being played on a\n particular day. Output includes a link to the boxscore, a boolean value\n which indicates if the game is between two Division-I teams or not, and the\n names and abbreviations for both the home teams. If no games are played on\n a particular day, the list will be empty.\n\n Parameters\n ----------\n date : datetime object\n The date to search for any matches. The month, day, and year are\n required for the search, but time is not factored into the search.\n end_date : datetime object\n Optionally specify an end date to iterate until. All boxscores starting\n from the date specified in the 'date' parameter up to and including the\n boxscores specified in the 'end_date' parameter will be pulled. If left\n empty, or if 'end_date' is prior to 'date', only the games from the day\n specified in the 'date' parameter will be saved.\n \"\"\"\n def __init__(self, date, end_date=None):\n self._boxscores = {}\n\n self._find_games(date, end_date)\n\n def __str__(self):\n \"\"\"\n Return the string representation of the class.\n \"\"\"\n return f\"NCAAB games for {', '.join(self._boxscores.keys())}\"\n\n def __repr__(self):\n \"\"\"\n Return the string representation of the class.\n \"\"\"\n return self.__str__()\n\n @property\n def games(self):\n \"\"\"\n Returns a ``dictionary`` object representing all of the games played on\n the requested day. Dictionary is in the following format::\n\n {'date' : [ # 'date' is the string date in format 'MM-DD-YYYY'\n {\n 'home_name': Name of the home team, such as 'Purdue\n Boilermakers' (`str`),\n 'home_abbr': Abbreviation for the home team, such as\n 'PURDUE' (`str`),\n 'away_name': Name of the away team, such as 'Indiana\n Hoosiers' (`str`),\n 'away_abbr': Abbreviation for the away team, such as\n 'INDIANA' (`str`),\n 'boxscore': String representing the boxscore URI, such as\n '2018-01-28-15-indiana' (`str`),\n 'non_di': Boolean value which evaluates to True when at\n least one of the teams does not compete in NCAA\n Division-I basketball (`bool`),\n 'top_25': Boolean value which evaluates to True when at\n least one of the teams is ranked in the AP Top 25\n polls (`bool`),\n 'winning_name': Full name of the winning team, such as\n 'Purdue Boilermakers' (`str`),\n 'winning_abbr': Abbreviation for the winning team, such as\n 'PURDUE' (`str`),\n 'losing_name': Full name of the losing team, such as\n 'Indiana Hoosiers' (`str`),\n 'losing_abbr': Abbreviation for the losing team, such as\n 'INDIANA' (`str`),\n 'home_score': Integer score for the home team (`int`),\n 'home_rank': Integer representing the home team's rank\n (`int`),\n 'away_score': Integer score for the away team (`int`),\n 'away_rank': Integer representing the away team's rank\n (`int`)\n },\n { ... },\n ...\n ]\n }\n\n If no games were played on 'date', the list for ['date'] will be empty.\n \"\"\"\n return self._boxscores\n\n def _create_url(self, date):\n \"\"\"\n Build the URL based on the passed datetime object.\n\n In order to get the proper boxscore page, the URL needs to include the\n requested month, day, and year.\n\n Parameters\n ----------\n date : datetime object\n The date to search for any matches. The month, day, and year are\n required for the search, but time is not factored into the search.\n\n Returns\n -------\n string\n Returns a ``string`` of the boxscore URL including the requested\n date.\n \"\"\"\n return BOXSCORES_URL % (date.month, date.day, date.year)\n\n def _get_requested_page(self, url):\n \"\"\"\n Get the requested page.\n\n Download the requested page given the created URL and return a PyQuery\n object.\n\n Parameters\n ----------\n url : string\n The URL containing the boxscores to find.\n\n Returns\n -------\n PyQuery object\n A PyQuery object containing the HTML contents of the requested\n page.\n \"\"\"\n return pq(url)\n\n def _get_boxscore_uri(self, url):\n \"\"\"\n Find the boxscore URI.\n\n Given the boxscore tag for a game, parse the embedded URI for the\n boxscore.\n\n Parameters\n ----------\n url : PyQuery object\n A PyQuery object containing the game's boxscore tag which has the\n boxscore URI embedded within it.\n\n Returns\n -------\n string\n Returns a ``string`` containing the link to the game's boxscore\n page.\n \"\"\"\n uri = re.sub(r'.*cbb/boxscores/', '', str(url))\n uri = re.sub(r'\\.html.*', '', uri).strip()\n return uri\n\n def _parse_abbreviation(self, abbr):\n \"\"\"\n Parse a team's abbreviation.\n\n Given the team's HTML name tag, parse their abbreviation.\n\n Parameters\n ----------\n abbr : string\n A string of a team's HTML name tag.\n\n Returns\n -------\n string\n Returns a ``string`` of the team's abbreviation.\n \"\"\"\n if 'cbb/schools' not in str(abbr):\n return None\n abbr = re.sub(r'.*/schools/', '', str(abbr))\n abbr = re.sub(r'/.*', '', abbr)\n return abbr\n\n def _get_name(self, name):\n \"\"\"\n Find a team's name and abbreviation.\n\n Given the team's HTML name tag, determine their name, abbreviation, and\n whether or not they compete in Division-I.\n\n Parameters\n ----------\n name : PyQuery object\n A PyQuery object of a team's HTML name tag in the boxscore.\n\n Returns\n -------\n tuple\n Returns a tuple containing the name, abbreviation, and whether or\n not the team participates in Division-I. Tuple is in the following\n order: Team Name, Team Abbreviation, boolean which evaluates to\n True if the team does not participate in Division-I.\n \"\"\"\n team_name = name.text()\n abbr = self._parse_abbreviation(name)\n non_di = False\n if not abbr:\n abbr = team_name\n non_di = True\n return team_name, abbr, non_di\n\n def _get_score(self, score_link):\n \"\"\"\n Find a team's final score.\n\n Given an HTML string of a team's boxscore, extract the integer\n representing the final score and return the number.\n\n Parameters\n ----------\n score_link : string\n An HTML string representing a team's final score in the format\n '<td class=\"right\">NN</td>' where 'NN' is the team's score.\n\n Returns\n -------\n int\n Returns an int representing the team's final score in runs.\n \"\"\"\n score = score_link.replace('<td class=\"right\">', '')\n score = score.replace('</td>', '')\n return int(score)\n\n def _get_rank(self, team):\n \"\"\"\n Find the team's rank when applicable.\n\n If a team is ranked, it will showup in a separate <span> tag with the\n actual rank embedded between parentheses. When a team is ranked, the\n integer value representing their ranking should be returned. For teams\n that are not ranked, None should be returned.\n\n Parameters\n ----------\n team : PyQuery object\n A PyQuery object of a team's HTML tag in the boxscore.\n\n Returns\n -------\n int\n Returns an integer representing the team's ranking when applicable,\n or None if the team is not ranked.\n \"\"\"\n rank = None\n rank_field = team('span[class=\"pollrank\"]')\n if len(rank_field) > 0:\n rank = re.findall(r'\\(\\d+\\)', str(rank_field))[0]\n rank = int(rank.replace('(', '').replace(')', ''))\n return rank\n\n def _get_team_names(self, game):\n \"\"\"\n Find the names and abbreviations for both teams in a game.\n\n Using the HTML contents in a boxscore, find the name and abbreviation\n for both teams and determine wether or not this is a matchup between\n two Division-I teams.\n\n Parameters\n ----------\n game : PyQuery object\n A PyQuery object of a single boxscore containing information about\n both teams.\n\n Returns\n -------\n tuple\n Returns a tuple containing the names and abbreviations of both\n teams in the following order: Away Name, Away Abbreviation, Away\n Score, Away Ranking, Home Name, Home Abbreviation, Home Score, Home\n Ranking, a boolean which evaluates to True if either team does not\n participate in Division-I athletics, and a boolean which evalutes\n to True if either team is currently ranked.\n \"\"\"\n # Grab the first <td...> tag for each <tr> row in the boxscore,\n # representing the name for each participating team.\n links = [g('td:first') for g in game('tr').items()\n if 'class=\"desc\"' not in str(g('td:first'))]\n # The away team is the first link in the boxscore\n away = links[0]\n # The home team is the last (3rd) link in the boxscore\n home = links[-1]\n non_di = False\n scores = re.findall(r'<td class=\"right\">\\d+</td>', str(game))\n away_score = None\n home_score = None\n # If the game hasn't started or hasn't been updated on sports-reference\n # yet, no score will be shown and therefore can't be parsed.\n if len(scores) == 2:\n away_score = self._get_score(scores[0])\n home_score = self._get_score(scores[1])\n away_name, away_abbr, away_non_di = self._get_name(away('a'))\n home_name, home_abbr, home_non_di = self._get_name(home('a'))\n non_di = away_non_di or home_non_di\n away_rank = self._get_rank(away)\n home_rank = self._get_rank(home)\n top_25 = bool(away_rank or home_rank)\n return (away_name, away_abbr, away_score, away_rank, home_name,\n home_abbr, home_score, home_rank, non_di, top_25)\n\n def _get_team_results(self, away_name, away_abbr, away_score, home_name,\n home_abbr, home_score):\n \"\"\"\n Determine the winner and loser of the game.\n\n If the game has been completed and sports-reference has been updated\n with the score, determine the winner and loser and return their\n respective names and abbreviations.\n\n Parameters\n ----------\n away_name : string\n The name of the away team, such as 'Indiana'.\n away_abbr : string\n The abbreviation of the away team, such as 'indiana'.\n away_score : int\n The number of points the away team scored, or None if the game\n hasn't completed yet.\n home_score : string\n The name of the home team, such as 'Purdue'.\n home_abbr : string\n The abbreviation of the home team, such as 'purdue'.\n home_score : int\n The number of points the home team scored, or None if the game\n hasn't completed yet.\n\n Returns\n -------\n tuple, tuple\n Returns two tuples, each containing the name followed by the\n abbreviation of the winning and losing team, respectively. If the\n game doesn't have a score associated with it yet, both tuples will\n be None.\n \"\"\"\n if not away_score or not home_score:\n return None, None\n if away_score > home_score:\n return (away_name, away_abbr), (home_name, home_abbr)\n else:\n return (home_name, home_abbr), (away_name, away_abbr)\n\n def _extract_game_info(self, games):\n \"\"\"\n Parse game information from all boxscores.\n\n Find the major game information for all boxscores listed on a\n particular boxscores webpage and return the results in a list.\n\n Parameters\n ----------\n games : generator\n A generator where each element points to a boxscore on the parsed\n boxscores webpage.\n\n Returns\n -------\n list\n Returns a ``list`` of dictionaries where each dictionary contains\n the name and abbreviations for both the home and away teams, a\n boolean value indicating whether or not both teams compete in\n Division-I, and a link to the boxscore.\n \"\"\"\n all_boxscores = []\n\n for game in games:\n names = self._get_team_names(game)\n away_name, away_abbr, away_score, away_rank, home_name, \\\n home_abbr, home_score, home_rank, non_di, top_25 = names\n boxscore_url = game('td[class=\"right gamelink\"] a')\n boxscore_uri = self._get_boxscore_uri(boxscore_url)\n winning_name = None\n winning_abbr = None\n losing_name = None\n losing_abbr = None\n winner, loser = self._get_team_results(away_name, away_abbr,\n away_score, home_name,\n home_abbr, home_score)\n if winner and loser:\n winning_name, winning_abbr = winner\n losing_name, losing_abbr = loser\n game_info = {\n 'boxscore': boxscore_uri,\n 'away_name': away_name,\n 'away_abbr': away_abbr,\n 'away_score': away_score,\n 'away_rank': away_rank,\n 'home_name': home_name,\n 'home_abbr': home_abbr,\n 'home_score': home_score,\n 'home_rank': home_rank,\n 'non_di': non_di,\n 'top_25': top_25,\n 'winning_name': winning_name,\n 'winning_abbr': winning_abbr,\n 'losing_name': losing_name,\n 'losing_abbr': losing_abbr\n }\n all_boxscores.append(game_info)\n return all_boxscores\n\n def _find_games(self, date, end_date):\n \"\"\"\n Retrieve all major games played on a given day.\n\n Builds a URL based on the requested date and downloads the HTML\n contents before parsing any and all games played during that day. Any\n games that are found are added to the boxscores dictionary with\n high-level game information such as the home and away team names and a\n link to the boxscore page.\n\n Parameters\n ----------\n date : datetime object\n The date to search for any matches. The month, day, and year are\n required for the search, but time is not factored into the search.\n end_date : datetime object\n Optionally specify an end date to iterate until. All boxscores\n starting from the date specified in the 'date' parameter up to and\n including the boxscores specified in the 'end_date' parameter will\n be pulled. If left empty, or if 'end_date' is prior to 'date', only\n the games from the day specified in the 'date' parameter will be\n saved.\n \"\"\"\n # Set the end date to the start date if the end date is before the\n # start date.\n if not end_date or date > end_date:\n end_date = date\n date_step = date\n while date_step <= end_date:\n url = self._create_url(date_step)\n page = self._get_requested_page(url)\n games = page('table[class=\"teams\"]').items()\n boxscores = self._extract_game_info(games)\n timestamp = '%s-%s-%s' % (date_step.month, date_step.day,\n date_step.year)\n self._boxscores[timestamp] = boxscores\n date_step += timedelta(days=1)\n" ]
[ [ "pandas.DataFrame" ] ]
linzehui/fastNLP
[ "9577a9acc47182c4fd4196e05d0e7290a5213f38" ]
[ "fastNLP/embeddings/embedding.py" ]
[ "\"\"\"\n该模块中的Embedding主要用于随机初始化的embedding(更推荐使用 :class:`fastNLP.embeddings.StaticEmbedding` ),或按照预训练权重初始化Embedding。\n\n\"\"\"\n\n__all__ = [\n \"Embedding\",\n \"TokenEmbedding\"\n]\n\nfrom abc import abstractmethod\n\nimport torch\nimport torch.nn as nn\n\nfrom .utils import get_embeddings\n\n\nclass Embedding(nn.Module):\n \"\"\"\n 词向量嵌入,支持输入多种方式初始化. 可以通过self.num_embeddings获取词表大小; self.embedding_dim获取embedding的维度.\n\n Example::\n\n >>> import numpy as np\n >>> from fastNLP.embeddings import Embedding\n >>> init_embed = (2000, 100)\n >>> embed = Embedding(init_embed) # 随机初始化一个具有2000个词,每个词表示为100维的词向量\n >>> init_embed = np.zeros((2000, 100))\n >>> embed = Embedding(init_embed) # 使用numpy.ndarray的值作为初始化值初始化一个Embedding\n\n \"\"\"\n \n def __init__(self, init_embed, word_dropout=0, dropout=0.0, unk_index=None):\n \"\"\"\n \n :param tuple(int,int),torch.FloatTensor,nn.Embedding,numpy.ndarray init_embed: 支持传入Embedding的大小(传入tuple(int, int),\n 第一个int为vocab_zie, 第二个int为embed_dim); 或传入Tensor, Embedding, numpy.ndarray等则直接使用该值初始化Embedding;\n :param float word_dropout: 按照一定概率随机将word设置为unk_index,这样可以使得unk这个token得到足够的训练, 且会对网络有\n 一定的regularize的作用。设置该值时,必须同时设置unk_index\n :param float dropout: 对Embedding的输出的dropout。\n :param int unk_index: drop word时替换为的index。fastNLP的Vocabulary的unk_index默认为1。\n \"\"\"\n super(Embedding, self).__init__()\n \n self.embed = get_embeddings(init_embed)\n \n self.dropout = nn.Dropout(dropout)\n if not isinstance(self.embed, TokenEmbedding):\n if hasattr(self.embed, 'embed_size'):\n self._embed_size = self.embed.embed_size\n elif hasattr(self.embed, 'embedding_dim'):\n self._embed_size = self.embed.embedding_dim\n else:\n self._embed_size = self.embed.weight.size(1)\n if word_dropout > 0 and not isinstance(unk_index, int):\n raise ValueError(\"When drop word is set, you need to pass in the unk_index.\")\n else:\n self._embed_size = self.embed.embed_size\n unk_index = self.embed.get_word_vocab().unknown_idx\n self.unk_index = unk_index\n self.word_dropout = word_dropout\n \n def forward(self, words):\n \"\"\"\n :param torch.LongTensor words: [batch, seq_len]\n :return: torch.Tensor : [batch, seq_len, embed_dim]\n \"\"\"\n if self.word_dropout > 0 and self.training:\n mask = torch.ones_like(words).float() * self.word_dropout\n mask = torch.bernoulli(mask).eq(1) # dropout_word越大,越多位置为1\n words = words.masked_fill(mask, self.unk_index)\n words = self.embed(words)\n return self.dropout(words)\n \n @property\n def num_embedding(self) -> int:\n if isinstance(self.embed, nn.Embedding):\n return self.embed.weight.size(0)\n else:\n return self.embed.num_embedding\n \n def __len__(self):\n return len(self.embed)\n \n @property\n def embed_size(self) -> int:\n return self._embed_size\n \n @property\n def embedding_dim(self) -> int:\n return self._embed_size\n \n @property\n def requires_grad(self):\n \"\"\"\n Embedding的参数是否允许优化。True: 所有参数运行优化; False: 所有参数不允许优化; None: 部分允许优化、部分不允许\n :return:\n \"\"\"\n if not isinstance(self.embed, TokenEmbedding):\n return self.embed.weight.requires_grad\n else:\n return self.embed.requires_grad\n \n @requires_grad.setter\n def requires_grad(self, value):\n if not isinstance(self.embed, TokenEmbedding):\n self.embed.weight.requires_grad = value\n else:\n self.embed.requires_grad = value\n \n @property\n def size(self):\n if isinstance(self.embed, TokenEmbedding):\n return self.embed.size\n else:\n return self.embed.weight.size()\n\n\nclass TokenEmbedding(nn.Module):\n \"\"\"\n fastNLP中各种Embedding的基类\n\n \"\"\"\n def __init__(self, vocab, word_dropout=0.0, dropout=0.0):\n super(TokenEmbedding, self).__init__()\n if vocab.rebuild:\n vocab.build_vocab()\n assert vocab.padding is not None, \"Vocabulary must have a padding entry.\"\n self._word_vocab = vocab\n self._word_pad_index = vocab.padding_idx\n if word_dropout > 0:\n assert vocab.unknown is not None, \"Vocabulary must have unknown entry when you want to drop a word.\"\n self.word_dropout = word_dropout\n self._word_unk_index = vocab.unknown_idx\n self.dropout_layer = nn.Dropout(dropout)\n \n def drop_word(self, words):\n \"\"\"\n 按照设定随机将words设置为unknown_index。\n\n :param torch.LongTensor words: batch_size x max_len\n :return:\n \"\"\"\n if self.word_dropout > 0 and self.training:\n mask = torch.full_like(words, fill_value=self.word_dropout, dtype=torch.float, device=words.device)\n mask = torch.bernoulli(mask).eq(1) # dropout_word越大,越多位置为1\n pad_mask = words.ne(self._word_pad_index)\n mask = mask.__and__(pad_mask)\n words = words.masked_fill(mask, self._word_unk_index)\n return words\n \n def dropout(self, words):\n \"\"\"\n 对embedding后的word表示进行drop。\n\n :param torch.FloatTensor words: batch_size x max_len x embed_size\n :return:\n \"\"\"\n return self.dropout_layer(words)\n \n @property\n def requires_grad(self):\n \"\"\"\n Embedding的参数是否允许优化。True: 所有参数运行优化; False: 所有参数不允许优化; None: 部分允许优化、部分不允许\n :return:\n \"\"\"\n requires_grads = set([param.requires_grad for param in self.parameters()])\n if len(requires_grads) == 1:\n return requires_grads.pop()\n else:\n return None\n \n @requires_grad.setter\n def requires_grad(self, value):\n for param in self.parameters():\n param.requires_grad = value\n \n def __len__(self):\n return len(self._word_vocab)\n \n @property\n def embed_size(self) -> int:\n return self._embed_size\n \n @property\n def embedding_dim(self) -> int:\n return self._embed_size\n \n @property\n def num_embedding(self) -> int:\n \"\"\"\n 这个值可能会大于实际的embedding矩阵的大小。\n :return:\n \"\"\"\n return len(self._word_vocab)\n \n def get_word_vocab(self):\n \"\"\"\n 返回embedding的词典。\n\n :return: Vocabulary\n \"\"\"\n return self._word_vocab\n \n @property\n def size(self):\n return torch.Size(self.num_embedding, self._embed_size)\n \n @abstractmethod\n def forward(self, words):\n raise NotImplementedError\n" ]
[ [ "torch.Size", "torch.nn.Dropout", "torch.bernoulli", "torch.full_like", "torch.ones_like" ] ]
Mingg8/turtlebot3
[ "14e5c9dd64fc1019b18a8d8e8f75b445d71ab278" ]
[ "turtlebot3_sandbot/scripts/turtlebot3_sandbot_pointop.py" ]
[ "#!/usr/bin/env python\n#################################################################################\n# Copyright 2018 ROBOTIS CO., LTD.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#################################################################################\n\n# Authors: Gilbert #\n\nimport rospy\nfrom geometry_msgs.msg import Twist, Point, Quaternion\nimport tf\nfrom math import radians, copysign, sqrt, pow, pi, atan2\nfrom tf.transformations import euler_from_quaternion\nimport numpy as np\nimport sys\n\nmsg = \"\"\"\ncontrol your Turtlebot3!\n-----------------------\nInsert xyz - coordinate.\nx : position x (m)\ny : position y (m)\nz : orientation z (degree: -180 ~ 180)\nIf you want to close, insert 's'\n-----------------------\n\"\"\"\n\nincrement = 4\n\narr_path_B = []\nwith open(sys.argv[1], \"r\") as file_path_B:\n for idx, line in enumerate(file_path_B):\n str = line.split()\n if not (len(str)==0):\n #print(str)\n arr_path_B.append([(float)(str[1]), 1.0-(float)(str[0])])\n else:\n break\n# now we imported arr_path_B from file to arr_path_B\n\nclass GotoPoint():\n def __init__(self):\n rospy.init_node('turtlebot3_sandbot', anonymous=False, disable_signals=True)\n rospy.on_shutdown(self.shutdown)\n self.cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=5)\n position = Point()\n move_cmd = Twist() #make empty twist message\n r = rospy.Rate(10)\n self.tf_listener = tf.TransformListener()\n self.odom_frame = '/odom'\n self.isFirst = True\n print(\"isFirst initialized\")\n self.offset_x=0\n self.offset_y=0\n self.offset_rot=0\n try:\n self.tf_listener.waitForTransform(self.odom_frame, '/base_footprint', rospy.Time(), rospy.Duration(1.0))\n self.base_frame = '/base_footprint'\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n try:\n self.tf_listener.waitForTransform(self.odom_frame, '/base_link', rospy.Time(), rospy.Duration(1.0))\n self.base_frame = '/base_link'\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n rospy.loginfo(\"Cannot find transform between /odom and /base_link or /base_footprint\")\n rospy.signal_shutdown(\"tf Exception\")\n \n (position, rotation) = self.get_odom()\n if self.isFirst:\n self.offset_x=position.x\n self.offset_y=position.y\n self.offset_rot=rotation\n self.isFirst=False\n print(\"offset initialized\")\n (position, rotation) = self.get_odom()\n print(\"x, y, rotation\", position.x, position.y, np.rad2deg(rotation))\n \n last_rotation = 0\n last_distance = 10\n linear_speed = 1\n angular_speed = 1\n # (goal_x, goal_y, goal_z) = self.getkey()\n\n # go through path array\n ind = 1\n length = len(arr_path_B)\n distanceIncreasing = False\n init_goal = arr_path_B[0]\n goal_x = arr_path_B[ind][0]-init_goal[0]\n goal_y = arr_path_B[ind][1]-init_goal[1]\n\n while ind < length:\n # if goal_z > 180 or goal_z < -180:\n # print(\"you input worng z range.\")\n # self.shutdown()\n # goal_z = np.deg2rad(goal_z)\n # (position,rotation) = self.get_odom()\n \n distance = sqrt(pow(goal_x - position.x, 2) + pow(goal_y - position.y, 2))\n\n\n while distance > 0.1:\n try:\n (position, rotation) = self.get_odom()\n distance = sqrt(pow((goal_x - position.x), 2) + pow((goal_y - position.y), 2))\n path_angle = atan2(goal_y - position.y, goal_x- position.x)\n\n # if last_distance<distance:\n # ind = ind - increment\n # distanceIncreasing = True \n\n \n #Normalization of path_angle\n if path_angle < -pi/4 or path_angle > pi/4:\n if goal_y < 0 and position.y < goal_y:\n path_angle = -2*pi + path_angle\n elif goal_y >= 0 and position.y > goal_y:\n path_angle = 2*pi + path_angle\n\n #Normalization of rotation\n if last_rotation > pi-0.1 and rotation <= 0:\n rotation = 2*pi + rotation\n elif last_rotation < -pi+0.1 and rotation > 0:\n rotation = -2*pi + rotation\n\n\n \n rot_angle = path_angle - rotation\n if rot_angle>pi or (rot_angle<0 and rot_angle>-pi):\n diff_sign = -1.0\n else:\n diff_sign = 1.0\n\n diff_magnitude = pi - abs(abs(path_angle - rotation) - pi)\n diff = diff_sign * diff_magnitude\n\n print(\"CURRENT \", position.x, position.y, np.rad2deg(rotation))\n print(\"GOAL\", goal_x, goal_y, np.rad2deg(path_angle))\n print (\"DISTANCE(m) \", distance)\n\n \n # move_cmd.angular.z = angular_speed * diff\n move_cmd.angular.z = angular_speed * rot_angle\n move_cmd.linear.x = min(linear_speed * distance, 0.1)\n\n if move_cmd.angular.z > 0:\n move_cmd.angular.z = min(move_cmd.angular.z, 0.2)\n else:\n move_cmd.angular.z = max(move_cmd.angular.z, -0.2)\n\n #if heading angle is over limit (while driving)\n if diff_magnitude > pi/4.0:\n move_cmd.linear.x = 0 \n # move_cmd.angular.z = move_cmd.angular.z * (-1.0)\n # move_cmd.angular.z = move_cmd.angular.z\n\n last_rotation = rotation\n last_distance = distance\n \n # if distanceIncreasing == True:\n # print(\"distance is increasing!!!\")\n # self.cmd_vel.publish(Twist())\n # r.sleep()\n # continue\n \n print(\"linear.x angular.z\")\n print(move_cmd.linear.x, move_cmd.angular.z)\n \n self.cmd_vel.publish(move_cmd)\n r.sleep()\n\n except KeyboardInterrupt:\n rospy.signal_shutdown(\"KeboardInterrupt\")\n break\n \n if rospy.is_shutdown():\n break\n\n #self.cmd_vel.publish(Twist())\n \n print(\"Now at Waypoint No.\", ind)\n\n if ind<length-increment: \n ind = ind + increment\n goal_x = arr_path_B[ind][0]-init_goal[0]\n goal_y = arr_path_B[ind][1]-init_goal[1]\n else: \n #arrived at the final destination\n print(\"Robot at the final destination\")\n rospy.signal_shutdown(\"Robot Task Done\")\n break\n\n (position, rotation) = self.get_odom()\n path_angle = atan2(goal_y - position.y, goal_x- position.x)\n rot_angle = path_angle - rotation\n while True:\n try:\n print(\"rotation\", np.rad2deg(rotation))\n print(\"path_angle\", np.rad2deg(path_angle))\n\n move_cmd.linear.x=0\n\n #diff is always positive\n diff = pi - abs(abs(rot_angle) - pi)\n print(\"diff\", np.rad2deg(diff))\n\n\n rot_angle = path_angle - rotation\n if rot_angle>pi or (rot_angle<0 and rot_angle>-pi):\n diff_sign = -1.0\n else:\n diff_sign = 1.0\n\n diff_magnitude = pi - abs(abs(path_angle - rotation) - pi)\n diff = diff_sign * diff_magnitude\n\n if diff_sign < 0:\n if diff_magnitude < np.deg2rad(20):\n move_cmd.angular.z=-0.1 \n else:\n move_cmd.angular.z=-0.2 \n else:\n if diff_magnitude < np.deg2rad(20):\n move_cmd.angular.z=0.1 \n else:\n move_cmd.angular.z=0.2 \n\n (position, rotation) = self.get_odom()\n path_angle = atan2(goal_y - position.y, goal_x- position.x)\n rot_angle = path_angle - rotation\n\n # 8 too small\n if abs(rot_angle) < np.deg2rad(8):\n r.sleep()\n break\n\n self.cmd_vel.publish(move_cmd)\n r.sleep()\n except KeyboardInterrupt:\n rospy.signal_shutdown(\"KeboardInterrupt\")\n break\n\n\n #self.cmd_vel.publish(Twist())\n\n if rospy.is_shutdown():\n break\n # if goal_z >= 0:\n # if rotation <= goal_z and rotation >= goal_z - pi:\n # move_cmd.linear.x = 0.00\n # move_cmd.angular.z = 0.2\n # else:\n # move_cmd.linear.x = 0.00\n # move_cmd.angular.z = -0.2\n # else:\n # if rotation <= goal_z + pi and rotation > goal_z:\n # move_cmd.linear.x = 0.00\n # move_cmd.angular.z = -0.2\n # else:\n # move_cmd.linear.x = 0.00\n # move_cmd.angular.z = 0.2\n \n\n rospy.loginfo(\"Stopping the robot...\")\n self.cmd_vel.publish(Twist())\n \n\n #contains offset correction\n def get_odom(self):\n try:\n (trans, rot) = self.tf_listener.lookupTransform(self.odom_frame, self.base_frame, rospy.Time(0))\n rotation = euler_from_quaternion(rot)\n\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n rospy.loginfo(\"TF Exception\")\n return\n pnt=Point(*trans)\n pnt.x=pnt.x-self.offset_x\n pnt.y=pnt.y-self.offset_y\n\n # if rotation[2]-self.offset_rot < -pi:\n # return(pnt, rotation[2]-self.offset_rot+2*pi)\n # return (pnt, rotation[2]-self.offset_rot)\n # return (pnt, rotation[2]-self.offset_rot)\n return (pnt, rotation[2])\n # return (Point(*trans), rotation[2])\n\n def shutdown(self):\n self.cmd_vel.publish(Twist())\n rospy.sleep(1)\n\nif __name__ == '__main__':\n try:\n while not rospy.is_shutdown():\n\n #print(msg)\n GotoPoint()\n\n except:\n rospy.loginfo(\"shutdown program.\")\n" ]
[ [ "numpy.deg2rad", "numpy.rad2deg" ] ]
mit-han-lab/amc-compressed-models
[ "49f83edfca6533a7e5464bbd2d5025835811c690" ]
[ "models/mobilenet_v2.py" ]
[ "import torch\nimport torch.nn as nn\nimport math\n\n\ndef conv_bn(inp, oup, stride):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 3, stride, 1, bias=False),\n nn.BatchNorm2d(oup),\n nn.ReLU6(inplace=True)\n )\n\n\ndef conv_1x1_bn(inp, oup):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n nn.ReLU6(inplace=True)\n )\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, oup, stride, expand_ratio):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n hidden_dim = round(inp * expand_ratio)\n self.use_res_connect = self.stride == 1 and inp == oup\n\n if expand_ratio == 1:\n self.conv = nn.Sequential(\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n else:\n self.conv = nn.Sequential(\n # pw\n nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True),\n # dw\n nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\ndef parse_pruned_channels(pruned_channels):\n # [24, 24, 16, 72, 24, 104, 24]\n strides = [1, 2, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1]\n input_channel = pruned_channels[1]\n last_channel = pruned_channels[-1]\n pruned_channels = pruned_channels[1:-1]\n interverted_residual_setting = [[1, pruned_channels[1], 1, strides[0]]]\n pruned_channels = pruned_channels[1:]\n s_idx = 1\n start = 0\n while start < len(pruned_channels) - 1:\n t = pruned_channels[start + 1] * 1. / pruned_channels[start]\n c = pruned_channels[start + 2]\n n = 1\n s = strides[s_idx]\n interverted_residual_setting.append([t, c, n, s])\n s_idx += 1\n start += 2\n assert len(strides) == len(interverted_residual_setting)\n return input_channel, last_channel, interverted_residual_setting\n\n\nclass MobileNetV2(nn.Module):\n def __init__(self, n_class=1000, input_size=224, width_mult=1., profile='normal'):\n super(MobileNetV2, self).__init__()\n if profile == 'normal':\n block = InvertedResidual\n input_channel = 32\n last_channel = 1280\n interverted_residual_setting = [\n # t, c, n, s\n [1, 16, 1, 1],\n [6, 24, 2, 2],\n [6, 32, 3, 2],\n [6, 64, 4, 2],\n [6, 96, 3, 1],\n [6, 160, 3, 2],\n [6, 320, 1, 1],\n ]\n elif profile == '0.7flops':\n block = InvertedResidual\n pruned_channels = [3, 24, 16, 72, 24, 104, 24, 104, 32, 136, 32, 136, 32, 136, 64, 272, 64, 272, 64, 272,\n 64, 272, 96, 408, 96, 400, 96, 400, 160, 712, 160, 720, 160, 712, 248, 848]\n input_channel, last_channel, interverted_residual_setting = parse_pruned_channels(pruned_channels)\n else:\n raise NotImplementedError\n\n # building first layer\n assert input_size % 32 == 0\n input_channel = int(input_channel * width_mult)\n self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel\n self.features = [conv_bn(3, input_channel, 2)]\n # building inverted residual blocks\n for t, c, n, s in interverted_residual_setting:\n output_channel = int(c * width_mult)\n for i in range(n):\n if i == 0:\n self.features.append(block(input_channel, output_channel, s, expand_ratio=t))\n else:\n self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))\n input_channel = output_channel\n # building last several layers\n self.features.append(conv_1x1_bn(input_channel, self.last_channel))\n # make it nn.Sequential\n self.features = nn.Sequential(*self.features)\n\n # building classifier\n self.classifier = nn.Sequential(\n nn.Dropout(0.2),\n nn.Linear(self.last_channel, n_class),\n )\n\n self._initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n x = x.mean(3).mean(2)\n x = self.classifier(x)\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.ReLU6", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.BatchNorm2d" ] ]
aks-research-team/HPC-research
[ "195fca1f40ebdace76747814378578e97afb9c33" ]
[ "test.py" ]
[ "import torch\nimport numpy as np\n\n\ngamma = torch.Tensor([7/5]).cuda()\nk = torch.Tensor([200]).cuda()\nR = torch.Tensor([8.31]).cuda()\nmu = torch.Tensor([0.029]).cuda()\nc = torch.Tensor([R / ((gamma - 1) * mu)]).cuda()\nv_sound = torch.Tensor([343]).cuda()\n\n\ndef U2params(U):\n ro = U[0]\n vx = U[1] / ro\n vx = U[2] / ro\n e = (U[3] - (U[1] ** 2 + U[2] ** 2) / (2 * ro)) / ro\n\n p = (gamma - 1) * e\n T = e / (ro * c)\n\n return ro, vx, vx, e, p, T\n\n\ndef params2U(ro, vx, vy, T):\n U = torch.zeros((4)).cuda()\n U[0] = ro\n U[1] = vx * ro\n U[2] = vy * ro\n U[3] = (ro) * ((T) * (ro) * c)\n\n return U\n\ndef params2U_parallel(ro, vx, vy, T):\n U = torch.zeros((n, n, 4)).cuda()\n U[:,:,0] = ro\n U[:,:,1] = vx * ro\n U[:,:,2] = vy * ro\n U[:,:,3] = ro * T * ro * c\n\n return U\n\nn = 503\nn += 2\nt = 20000\n\nU = params2U_parallel(1.25, 0, 0, 300)\n\ndx = torch.Tensor([0.001]).cuda()\ndy = torch.Tensor([0.001]).cuda()\n\n\nq = torch.zeros((n, n)).cuda()\nfx = torch.zeros((n, n)).cuda()\nfy = torch.zeros((n, n)).cuda()\n\nU[2 * ((1* n // 4) // 2), 2 * ((5 * n // 8) // 2)] = params2U(1.25, 0, 0, 400)\nU[2 * ((3 * n // 4) // 2), 2 * ((2 * n // 9) // 2)] = params2U(1.25, 0, 0, 470)\nU[2 * ((7 * n // 11) // 2), 2 * ((4 * n // 9) // 2)] = params2U(1.25, 0, 0, 650)\nU[2 * ((11 * n // 13) // 2), 2 * ((5 * n // 7) // 2)] = params2U(1.25, 0, 0, 90)\n\n\ndef updateU(U):\n\n dt = torch.Tensor([0.02 * (0\n +abs(U[:,:,1] / U[:,:,0]).max()/dx\n +abs(U[:,:,2] / U[:,:,0]).max()/dy\n + v_sound * (1/dx**2)**(0.5)\n )**(-1)]).cuda()\n Ucor = U.clone()\n\n Ucor[2:n-1:2, 2:n-1:2, 0] = (U[2:n-1:2, 2:n-1:2, 0] \n - dt * (U[2:n-1:2, 2:n-1:2, 0] + U[4:n+1:2, 2:n-1:2, 0]) * U[3:n:2, 2:n-1:2, 0] / (dx * 2)\n + dt * (U[2:n-1:2, 2:n-1:2, 0] + U[0:n-3:2, 2:n-1:2, 0]) * U[1:n-2:2, 2:n-1:2, 0] / (dx * 2)\n - dt * (U[2:n-1:2, 2:n-1:2, 0] + U[2:n-1:2, 4:n+1:2, 0]) * U[2:n-1:2, 3:n:2, 0] / (dy * 2)\n + dt * (U[2:n-1:2, 2:n-1:2, 0] + U[2:n-1:2, 0:n-3:2, 0]) * U[2:n-1:2, 1:n-2:2, 0] / (dy * 2)\n )\n\n Ucor[2:n-1:2, 2:n-1:2, 3] = (U[2:n-1:2, 2:n-1:2, 3]\n - dt * gamma * (U[4:n+1:2, 2:n-1:2, 3] + U[2:n-1:2, 2:n-1:2, 3]) * U[3:n:2, 2:n-1:2, 1] / (dx * 2)\n + dt * gamma * (U[0:n-3:2, 2:n-1:2, 3] + U[2:n-1:2, 2:n-1:2, 3]) * U[1:n-2:2, 2:n-1:2, 1] / (dx * 2)\n - dt * gamma * (U[2:n-1:2, 4:n+1:2, 3] + U[2:n-1:2, 2:n-1:2, 3]) * U[2:n-1:2, 3:n:2, 2] / (dy * 2)\n + dt * gamma * (U[2:n-1:2, 0:n-3:2, 3] + U[2:n-1:2, 2:n-1:2, 3]) * U[2:n-1:2, 1:n-2:2, 2] / (dy * 2)\n + dt * U[2:n-1:2, 2:n-1:2, 0] * q[2:n-1:2, 2:n-1:2]\n # + dt * U[2:n-1:2, 2:n-1:2, 0] * (fx[2:n-1:2, 2:n-1:2] * (U[3:n:2, 2:n-1:2, 1] + U[1:n-2:2, 2:n-1:2, 1]) / 2 + fy[2:n-1:2, 2:n-1:2] * (U[2:n-1:2, 3:n:2, 1] + U[2:n-1:2, 1:n-2:2, 1]) / 2)\n )\n\n Ucor[2:n-1:2, 2:n-1:2, 3] = (Ucor[2:n-1:2, 2:n-1:2, 3]\n + dt * k * (U[4:n+1:2, 2:n-1:2, 3] / (U[4:n+1:2, 2:n-1:2, 0] * c) - U[2:n-1:2, 2:n-1:2, 3] / (U[2:n-1:2, 2:n-1:2, 0] * c)) / dx**2\n + dt * k * (U[0:n-3:2, 2:n-1:2, 3] / (U[0:n-3:2, 2:n-1:2, 0] * c) - U[2:n-1:2, 2:n-1:2, 3] / (U[2:n-1:2, 2:n-1:2, 0] * c)) / dx**2\n + dt * k * (U[2:n-1:2, 4:n+1:2, 3] / (U[2:n-1:2, 4:n+1:2, 0] * c) - U[2:n-1:2, 2:n-1:2, 3] / (U[2:n-1:2, 2:n-1:2, 0] * c)) / dy**2\n + dt * k * (U[2:n-1:2, 0:n-3:2, 3] / (U[2:n-1:2, 0:n-3:2, 0] * c) - U[2:n-1:2, 2:n-1:2, 3] / (U[2:n-1:2, 2:n-1:2, 0] * c)) / dy**2\n )\n\n\n p3 = U[2:n:2, 2:n-1:2, 3] * (gamma - 1)\n p4 = U[0:n-2:2, 2:n-1:2, 3] * (gamma - 1)\n Ucor[1:n-1:2, 2:n-1:2, 1] = (U[1:n-1:2, 2:n-1:2, 1]\n - 2 * dt * p3 / (dx * (U[2:n:2, 2:n-1:2, 0] + U[0:n-2:2, 2:n-1:2, 0]) / 2)\n + 2 * dt * p4 / (dx * (U[2:n:2, 2:n-1:2, 0] + U[0:n-2:2, 2:n-1:2, 0]) / 2)\n + dt * fx[1:n-1:2, 2:n-1:2]\n ) \n Ucor[1,:,1] = 0\n Ucor[n-2,:,1] = 0\n Ucor[:,1,1] = 0\n Ucor[:,n-2:1] = 0\n\n p5 = U[2:n-1:2, 2:n:2, 3] * (gamma - 1)\n p6 = U[2:n-1:2, 0:n-2:2, 3] * (gamma - 1)\n Ucor[2:n-1:2, 1:n-1:2, 2] = (U[2:n-1:2, 1:n-1:2, 2]\n - 2 * dt * p5 / (dy * (U[2:n-1:2, 2:n:2, 0] + U[2:n-1:2, 0:n-2:2, 0]) / 2)\n + 2 * dt * p6 / (dy * (U[2:n-1:2, 2:n:2, 0] + U[2:n-1:2, 0:n-2:2, 0]) / 2)\n + dt * fy[2:n-1:2, 1:n-1:2]\n )\n Ucor[1,:,2] = 0\n Ucor[n-2,:,2] = 0\n Ucor[:,1,2] = 0\n Ucor[:,n-2:2] = 0\n\n return Ucor, dt\n\nsurfs = []\nfor i in range(t):\n\n U, dt = updateU(U)\n print(i)\n\n if i % 50 == 0:\n surfs.append(U[::2, ::2, 3] - 335802)\n\n\nlines = []\nfor idx, surf in enumerate(surfs):\n print(idx / len(surfs))\n l = len(surf)\n surf_str = np.char.add(surf.cpu().numpy().astype(str), \"\\n\")\n surf_str[:,l-1] = np.char.add(surf_str[:,l-1], \"\\n\")\n surf_x = np.char.add(np.array([list(range(l)) for i in range(l)]).astype(str), \" \")\n surf_y = surf_x.T.astype(str)\n\n surf_new = np.char.add(np.char.add(surf_x, surf_y), surf_str).flatten()\n lines.extend(list(surf_new))\n lines.append(\"\\n\\n\")\n\nwith open(\"data.txt\", \"w\") as f:\n f.writelines(lines)\n" ]
[ [ "torch.zeros", "torch.Tensor", "numpy.char.add" ] ]
hysts/pytorch_yolov3
[ "6d4c7a1e42d366894effac8ca52f7116f891b5ab", "6d4c7a1e42d366894effac8ca52f7116f891b5ab" ]
[ "yolov3/transforms/transforms.py", "yolov3/models/yolov3.py" ]
[ "import random\nimport cv2\nimport numpy as np\nimport torch\n\n\nclass Compose:\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, image, targets):\n for transform in self.transforms:\n image, targets = transform(image, targets)\n return image, targets\n\n\nclass FlipColorChannelOrder:\n def __call__(self, image, targets=None):\n image = image[:, :, ::-1]\n return image, targets\n\n\nclass Normalize:\n def __call__(self, image, targets=None):\n image = image.astype(np.float32) / 255\n return image, targets\n\n\nclass NormalizeTargets:\n def __call__(self, image, targets=None):\n size = max(image.shape[:2])\n if targets is not None:\n targets[:, :4] /= size\n return image, targets\n\n\nclass Pad:\n def __init__(self, random_padding):\n self.random_padding = random_padding\n\n def __call__(self, image, targets=None):\n h, w = image.shape[:2]\n offset = self._compute_offset(np.array([w, h]))\n left, top = offset\n out_size = max(w, h)\n new_image = np.ones((out_size, out_size, 3), dtype=np.uint8) * 127\n new_image[top:top + h, left:left + w] = image\n if targets is not None:\n targets[:, :2] += offset\n return new_image, targets\n\n def _compute_offset(self, image_size):\n out_size = max(image_size)\n w, h = image_size\n if self.random_padding:\n dx = np.random.randint(out_size - w + 1)\n dy = np.random.randint(out_size - h + 1)\n else:\n dx = (out_size - w) // 2\n dy = (out_size - h) // 2\n return np.array([dx, dy])\n\n\nclass RandomDistort:\n def __init__(self, hue, saturation, exposure):\n self.hue = hue\n self.saturation = saturation\n self.exposure = exposure\n\n def __call__(self, image, targets=None):\n image = self._randomly_distort(image)\n return image, targets\n\n @staticmethod\n def _compute_random_scale(max_scale):\n scale = np.random.uniform(1, max_scale)\n return scale if random.random() < 0.5 else 1 / scale\n\n def _randomly_distort(self, image):\n diff_hue = np.random.uniform(-self.hue, self.hue)\n sat_scale = self._compute_random_scale(self.saturation)\n exp_scale = self._compute_random_scale(self.exposure)\n\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n hsv = hsv.astype(np.float32)\n hsv[:, :, 0] /= 179\n hsv[:, :, 1:] /= 255\n hue = hsv[:, :, 0] + diff_hue\n hsv[:, :, 1] *= sat_scale\n hsv[:, :, 2] *= exp_scale\n if diff_hue > 0:\n hue[hue > 1] -= 1\n else:\n hue[hue < 0] += 1\n hsv[:, :, 0] = np.round((hue * 179).clip(0, 179))\n hsv[:, :, 1:] = np.round((hsv[:, :, 1:] * 255).clip(0, 255))\n hsv = hsv.astype(np.uint8)\n image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n return image\n\n\nclass RandomHorizontalFlip:\n def __call__(self, image, targets=None):\n if random.random() < 0.5:\n image = image[:, ::-1]\n if targets is not None:\n targets[:, 0] = 1 - targets[:, 0]\n return image, targets\n\n\nclass Resize:\n def __init__(self, out_size, random_aspect_ratio_jitter):\n self.out_size = out_size\n self.jitter = random_aspect_ratio_jitter\n\n def __call__(self, image, targets=None):\n h, w = image.shape[:2]\n org_size = np.array([w, h])\n new_size = self.jitter_and_scale_size(org_size)\n image = cv2.resize(image, tuple(new_size))\n if targets is not None:\n scale = new_size / org_size\n targets[:, :4] *= np.concatenate([scale, scale])\n return image, targets\n\n def jitter_and_scale_size(self, image_size):\n w, h = self._jitter_size(image_size, self.jitter)\n new_size = self._scale_size_based_on_longer_edge(self.out_size, w / h)\n return new_size\n\n @staticmethod\n def _jitter_size(image_size, jitter):\n max_dw, max_dh = image_size * jitter\n dw = np.random.uniform(-max_dw, max_dw)\n dh = np.random.uniform(-max_dh, max_dh)\n return image_size + np.array([dw, dh])\n\n @staticmethod\n def _scale_size_based_on_longer_edge(out_size, new_aspect_ratio):\n if new_aspect_ratio < 1:\n new_h = out_size\n new_w = new_h * new_aspect_ratio\n else:\n new_w = out_size\n new_h = new_w / new_aspect_ratio\n return np.array([new_w, new_h]).astype(np.int)\n\n\nclass ToFixedSizeTargets:\n def __init__(self, max_targets):\n self.max_targets = max_targets\n\n def __call__(self, image, targets=None):\n padded_targets = np.zeros((self.max_targets, 5))\n if targets is not None:\n targets = targets[:self.max_targets]\n padded_targets[:len(targets)] = targets\n return image, padded_targets\n\n\nclass ToTensor:\n def __call__(self, image, targets):\n image = torch.from_numpy(image.transpose(2, 0, 1)).float()\n if targets is not None:\n targets = torch.from_numpy(targets).float()\n return image, targets\n\n\nclass ToYOLOTargets:\n def __call__(self, image, targets):\n \"\"\"Convert COCO annotation to target\n\n Args:\n image (np.ndarray): image\n targets (dict): COCO annotation\n bounding box is represented as (x0, y0, w, h) where\n (x0, y0): coordinates of top-left corner of it\n (w, h): size\n\n Returns:\n (tuple): (image, targets)\n image (np.ndarray): image\n targets (np.ndarray or None): (shape: Nx5)\n each row is (bbox, category_index) where\n bbox: (shape: 4) (xc, yc, w, h) where\n (xc, yc): coordinates of center of the\n bounding box\n (w, h): size of the bounding box\n category_index: category index in the list\n of categories considered\n\n \"\"\"\n converted_targets = []\n for annotation in targets:\n bbox = np.asarray(annotation['bbox'])\n bbox[:2] = bbox[:2] + bbox[2:] / 2\n converted_targets.append([*bbox, annotation['category_index']])\n if len(converted_targets) > 0:\n converted_targets = np.vstack(converted_targets)\n else:\n converted_targets = None\n return image, converted_targets\n", "import importlib\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom yolov3.models.common import ConvBN, DarknetBottleneck, initialize\nfrom yolov3.models.yolo_layer import YOLOLayer\n\n\nclass YOLOv3Stage(nn.Sequential):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.add_module(\n 'bottleneck1',\n DarknetBottleneck(in_channels, 2 * out_channels, shortcut=False))\n self.add_module(\n 'bottleneck2',\n DarknetBottleneck(\n 2 * out_channels, 2 * out_channels, shortcut=False))\n self.add_module(\n 'conv',\n ConvBN(\n 2 * out_channels,\n out_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n\n\nclass YOLOv3Upsample(nn.Module):\n def __init__(self, in_channels):\n super().__init__()\n self.conv = ConvBN(\n in_channels, in_channels // 2, kernel_size=1, stride=1, padding=0)\n\n def forward(self, x):\n x = F.interpolate(self.conv(x), scale_factor=2, mode='nearest')\n return x\n\n\nclass YOLOv3Base(nn.Module):\n def __init__(self, backbone_name):\n super().__init__()\n module = importlib.import_module(f'yolov3.models.{backbone_name}')\n self.backbone = getattr(module, 'FeatureExtractor')()\n\n with torch.no_grad():\n outputs = self.backbone(\n torch.zeros((1, 3, 64, 64), dtype=torch.float32))\n skip_channels = [output.size(1) for output in outputs]\n in_channels1 = skip_channels[2]\n out_channels1 = in_channels1 // 2\n in_channels2 = skip_channels[1] + out_channels1 // 2\n out_channels2 = out_channels1 // 2\n in_channels3 = skip_channels[0] + out_channels2 // 2\n out_channels3 = out_channels2 // 2\n\n self.stage1 = YOLOv3Stage(in_channels1, out_channels1)\n self.upsample1 = YOLOv3Upsample(out_channels1)\n self.stage2 = YOLOv3Stage(in_channels2, out_channels2)\n self.upsample2 = YOLOv3Upsample(out_channels2)\n self.stage3 = YOLOv3Stage(in_channels3, out_channels3)\n\n def forward(self, x):\n feature_stage3, feature_stage4, feature_stage5 = self.backbone(x)\n outputs = []\n x = self.stage1(feature_stage5)\n outputs.append(x)\n x = torch.cat([self.upsample1(x), feature_stage4], dim=1)\n x = self.stage2(x)\n outputs.append(x)\n x = torch.cat([self.upsample2(x), feature_stage3], dim=1)\n x = self.stage3(x)\n outputs.append(x)\n return outputs\n\n\nclass YOLOv3(YOLOv3Base):\n def __init__(self, config):\n super().__init__(config.model.backbone)\n with torch.no_grad():\n outputs = super().forward(\n torch.zeros((1, 3, 64, 64), dtype=torch.float32))\n n_channels = [output.size(1) for output in outputs]\n self.conv1 = ConvBN(\n n_channels[0],\n n_channels[0] * 2,\n kernel_size=3,\n stride=1,\n padding=1)\n self.conv2 = ConvBN(\n n_channels[1],\n n_channels[1] * 2,\n kernel_size=3,\n stride=1,\n padding=1)\n self.conv3 = ConvBN(\n n_channels[2],\n n_channels[2] * 2,\n kernel_size=3,\n stride=1,\n padding=1)\n all_anchors = torch.tensor(config.model.anchors)\n all_anchor_indices = torch.tensor(config.model.anchor_indices)\n\n self.yolo_layer1 = YOLOLayer(\n n_channels[0] * 2,\n all_anchors=all_anchors,\n anchor_indices=all_anchor_indices[2],\n downsample_rate=32,\n n_classes=config.data.n_classes,\n iou_thresh=config.train.iou_thresh)\n self.yolo_layer2 = YOLOLayer(\n n_channels[1] * 2,\n all_anchors=all_anchors,\n anchor_indices=all_anchor_indices[1],\n downsample_rate=16,\n n_classes=config.data.n_classes,\n iou_thresh=config.train.iou_thresh)\n self.yolo_layer3 = YOLOLayer(\n n_channels[2] * 2,\n all_anchors=all_anchors,\n anchor_indices=all_anchor_indices[0],\n downsample_rate=8,\n n_classes=config.data.n_classes,\n iou_thresh=config.train.iou_thresh)\n\n self.apply(initialize)\n\n def forward(self, x, targets=None):\n outputs = super().forward(x)\n out1 = self.yolo_layer1(self.conv1(outputs[0]), targets)\n out2 = self.yolo_layer2(self.conv2(outputs[1]), targets)\n out3 = self.yolo_layer3(self.conv3(outputs[2]), targets)\n if targets is None:\n return torch.cat([out1, out2, out3], dim=1)\n else:\n return torch.stack([*out1, *out2, *out3]).view(3, 5).sum(dim=0)\n" ]
[ [ "numpy.asarray", "torch.from_numpy", "numpy.ones", "numpy.concatenate", "numpy.random.uniform", "numpy.array", "numpy.zeros", "numpy.vstack", "numpy.random.randint" ], [ "torch.zeros", "torch.cat", "torch.tensor", "torch.no_grad", "torch.stack" ] ]
kslam07/RWA2-LLT
[ "392fc78fc82a36a8ff88d1af94dabdd577397796" ]
[ "double_rotor_tip_effect.py" ]
[ "\"\"\"\nPlotting function\n\"\"\"\nfrom create_geometry import BladeGeometry\nfrom lifting_line_solver import LiftingLineSolver\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom read_BEMdata_into_Python import read_matlab_data\nimport numpy as np\n\n\nphase_shifts = np.linspace(0, 180, 4)\ncolors = [\"lawngreen\", \"deepskyblue\", \"orangered\", \"darkviolet\"]\n# start plots\nfig_circ, ax_circ = plt.subplots(1, 2, dpi=150)\nfig_ind, ax_ind = plt.subplots(1, 2, dpi=150)\nfig_aoa, ax_aoa = plt.subplots(1, 2, dpi=150)\nfig_rotor, ax_rotor = plt.subplots(1, 1, dpi=150)\nnspan = 20\nntheta = 200\nnblades = 3\nspacing = 'equal'\nnrotor = 2\n\nCP = []\nCT = []\n\nfor color, phase_shift in zip(colors, phase_shifts):\n\n\n prop_geo = BladeGeometry(radius=50.0, tsr=8, v_inf=10.0, n_blades=3, n_span=nspan, phase_diff=phase_shift,\n double_rotor=True,\n n_theta=ntheta, spacing=spacing, a=0.33, xshift=0, yshift=100, zshift=0)\n blade = prop_geo.bladepanels\n rings = prop_geo.filaments\n solver = LiftingLineSolver(geo=prop_geo, r_rotor=50, weight=0.5, tol=1e-6,\n n_iter=100)\n data_double = solver.run_solver()\n\n # non-dimensionalise data\n\n # forces [3, 4]\n data_double[3] = data_double[3] / (0.5 * solver.u_inf ** 2 * solver.r_rotor)\n data_double[4] = data_double[4] / (0.5 * solver.u_inf ** 2 * solver.r_rotor)\n # circulation [5]\n\n omega = solver.geo.tsr * solver.geo.v_inf / solver.geo.radius\n CPandCT = solver.CP_and_CT(np.resize(data_double[0], data_double[2].shape),\n np.resize(data_double[1], data_double[2].shape),\n data_double[2],\n np.resize(data_double[3], data_double[2].shape),\n np.resize(data_double[4], data_double[2].shape),\n solver.geo.v_inf, omega, solver.geo.radius, nblades)\n\n # =============================================================================\n # Double Rotor Plotting\n # =============================================================================\n\n def plotDoubleRotor():\n prop_geo = BladeGeometry(radius=50.0, tsr=8, v_inf=10.0, n_blades=3, n_span=nspan, n_theta=ntheta,\n spacing='cosine', a=0, xshift=0, yshift=100, zshift=0, phase_diff=120,\n double_rotor=True)\n prop_geo.doubleRotor()\n rings = prop_geo.filaments\n\n fig = plt.figure(dpi=150)\n ax = Axes3D(fig)\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n\n # ax.set_xlim3d(0, 50)\n # ax.set_ylim3d(-50, 200)\n # ax.set_zlim3d(-50, 50)\n\n c = ['green', 'blue', 'red', 'green', 'blue', 'red']\n for idx in range(nblades * nrotor):\n ax.plot_wireframe(rings[0, idx * (nspan - 1):(idx + 1) * (nspan - 1), :ntheta + 1],\n rings[1, idx * (nspan - 1):(idx + 1) * (nspan - 1), :ntheta + 1],\n rings[2, idx * (nspan - 1):(idx + 1) * (nspan - 1), :ntheta + 1],\n color=c[idx], cstride=0)\n\n plotDoubleRotor()\n\n\n # =============================================================================\n # Rotor performance plots\n # =============================================================================\n def plottingFunction(solver, prop_geo, data):\n plt.plot(prop_geo.centerPoints, data[-1][:len(prop_geo.centerPoints)])\n plt.show()\n return\n\n\n # plottingFunction(solver,prop_geo,data)\n\n # =============================================================================\n # DOUBLE ROTOR RESULTS\n # =============================================================================\n plt.close('All')\n r_R = data_double[2][:nspan-1]\n\n [BEM_rR, BEM_alpha, BEM_phi, BEM_rho, BEM_Ax, BEM_Az, BEM_Gamma , BEM_CT, BEM_CP, BEM_a,\n BEM_aline, BEM_vinf, BEM_radius] = read_matlab_data()\n BEM_Ax = BEM_Ax / (0.5 * 10**2 * 50)\n BEM_Az = BEM_Az / (0.5 * 10**2 * 50)\n\n ax_aoa[0].plot(r_R, np.degrees(data_double[6][:nspan-1]), linestyle='-', c=color,\n label=r\"$\\gamma$: {}$^\\circ$\".format(phase_shift))\n ax_aoa[1].plot(r_R, np.degrees(data_double[6][3*(nspan-1):4*(nspan-1)]), linestyle='--', c=color,\n label=r\"$\\gamma$: {}$^\\circ$\".format(phase_shift))\n\n # Radial distribution circulationd\n\n # made non-dimensional with (np.pi * Uinf**2) / (NBlades*Omega)\n circ_nondim = (np.pi * solver.geo.v_inf ** 2) / (nblades * omega)\n\n\n # ax_circ.plot(data_single[2][:nspan - 1, 0], np.resize(data_single[5], data_single[2].shape)[:nspan - 1,\n # 0] / circ_nondim)\n ax_circ[0].plot(r_R, data_double[5][:nspan - 1] / circ_nondim,\n linestyle='-', c=color, label=r\"$\\gamma$: {}$^\\circ$\".format(phase_shift))\n ax_circ[1].plot(r_R, data_double[5][3*(nspan-1):4*(nspan-1)] / circ_nondim, linestyle='--', c=color)\n\n\n # RADIAL DISTRIBUTION INDUCTION FACTORS\n\n # INDUCTION FACTORS\n ax_ind[0].plot(r_R, data_double[0][:nspan - 1],\n linestyle='-', c=color, label=r\"$\\gamma$: {}$^\\circ$\".format(phase_shift))\n ax_ind[1].plot(r_R, data_double[0][3*(nspan-1):4*(nspan-1)], linestyle='--', c=color)\n # ax_ind[0].plot(data_single[2][-(nspan - 1):], data_single[0][-(nspan - 1):], ':r')\n # ax_ind[1].plot(data_double[2][:nspan - 1], data_double[1][:nspan - 1], linestyle='-')\n # ax_ind[1].plot(data_double[2][-(nspan - 1):], data_double[1][-(nspan - 1):], linestyle='--')\n # ax_ind[1].plot(data_single[2][:nspan - 1], data_single[1][:nspan - 1], ':r', label=\"LLM - single rotor\")\n\n # CP and CT\n CT.append(CPandCT[-1])\n CP.append(CPandCT[-2])\n\nax_circ[0].set_xlabel('r/R (-)', fontsize=14)\nax_circ[0].set_ylabel(r'$\\Gamma$ (-)', fontsize=14)\nax_circ[0].legend(prop={\"size\": 10})\nax_circ[0].grid(True)\nax_circ[1].set_xlabel('r/R (-)', fontsize=14)\nax_circ[1].set_ylabel(r'$\\Gamma$ (-)', fontsize=14)\nax_circ[1].grid(True)\n\nax_ind[0].set_xlabel(\"r/R (-)\", fontsize=14)\nax_ind[0].set_ylabel(\"a (-)\", fontsize=14)\nax_ind[1].set_xlabel(\"r/R (-)\", fontsize=14)\nax_ind[1].set_ylabel(\"a' (-)\", fontsize=14)\nax_ind[0].legend(prop={\"size\": 10})\nax_ind[0].grid()\nax_ind[1].grid()\n\nax_aoa[0].set_xlabel('r/R (-)', fontsize=14)\nax_aoa[0].set_ylabel(r'$\\alpha$ ($^\\circ$)', fontsize=14)\nax_aoa[0].legend(prop={\"size\": 10})\nax_aoa[0].grid(True)\nax_aoa[1].set_xlabel('r/R (-)', fontsize=14)\nax_aoa[1].set_ylabel(r'$\\alpha$ ($^\\circ$)', fontsize=14)\nax_aoa[1].grid(True)\nax_aoa.set_title(\"Blue\")" ]
[ [ "numpy.resize", "numpy.linspace", "numpy.degrees", "matplotlib.pyplot.subplots", "matplotlib.pyplot.close", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
gwpark-git/dynamics_of_networks_and_colloids
[ "0b0a3687533379ec75171ae6b906aeff5bedfbba" ]
[ "post_processing/plot_position_dumbbell_multiprocessing.py" ]
[ "from numpy import *\nimport matplotlib.pyplot as plt\nimport sys\nfrom multiprocessing import Pool\nfrom functools import partial\n\nfn = sys.argv[1]\nout_path = sys.argv[2]\n\ndef plot_t(traj, t):\n dimension = 2\n Np = int((shape(traj)[1] - 1)/(2*dimension)) \n box_dimension = [10.0, 10.0]\n\n marker_style = ['bo-', 'ro-', 'go-', 'ko-', 'co-']\n fig = plt.figure(t)\n ax = fig.add_subplot(111)\n for i in range(0,Np, 2):\n index_px = i*dimension*2 + 1 + 0\n index_px_2 = (i+1)*dimension*2 + 1 + 0\n index_py = index_px + 1\n index_py_2 = index_px_2 + 1\n px = asarray([[traj[t, index_px], traj[t, index_py]],\n [traj[t, index_px_2], traj[t, index_py_2]]])\n ax.plot(px[:,0], px[:,1], marker_style[i%5])\n\n # ax.axis('equal')\n ax.axis([0, box_dimension[0], 0, box_dimension[1]])\n # plt.axis([-0.5*box_dimension[0], 0.5*box_dimension[0], -0.5*box_dimension[1], 0.5*box_dimension[1]])\n ax.grid('on')\n ax.set_xlabel('x dimension')\n ax.set_ylabel('y dimension')\n ax.set_aspect(1)\n plt.savefig('%s/t%08d.png'%(out_path, t), dpi=300, bbox_inches = 'tight')\n plt.close()\n\n\nif __name__ == '__main__':\n traj = loadtxt(fn)\n # Nt = 1000000\n Nt = shape(traj)[0]\n N_skip = 1 #already skipped data\n pool = Pool(processes=None) # processes = None allocate each thread to the each CPUs.\n pool.map(partial(plot_t, traj), range(0, Nt, N_skip))\n\n# ffmpeg -r 60 -i figures/t%08d.png -vcodec copy out.mov\n" ]
[ [ "matplotlib.pyplot.close", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure" ] ]
opveclib/opveclib
[ "2d70ec1857437426d488d924fe05a8260c6e69bd" ]
[ "opveclib/test/test_limits.py" ]
[ "# Copyright 2016 Hewlett Packard Enterprise Development LP\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for\n# the specific language governing permissions and limitations under the License.\n\nimport unittest\nimport numpy as np\nfrom ..operator import operator, evaluate\nfrom ..expression import position_in, output, min_value, max_value, epsilon\nfrom ..local import cuda_enabled, clear_op_cache\n\n\n@operator()\ndef set_limits(arg):\n pos = position_in(arg.shape)\n limits = output([arg.shape[0], 3], arg.dtype)\n t = arg.dtype\n limits[pos,0] = min_value(t)\n limits[pos,1] = max_value(t)\n limits[pos,2] = epsilon(t)\n\n return limits\n\n\nclass TestLimits(unittest.TestCase):\n clear_op_cache()\n\n def test(self):\n a = np.array([0])\n types = [np.float32, np.float64]\n for t in types:\n op = set_limits(a.astype(t))\n op_c = evaluate(op, target_language='cpp')\n op_np = np.array([[np.finfo(t).tiny, np.finfo(t).max, np.finfo(t).eps]])\n assert np.all(np.equal(op_c, op_np))\n\n if cuda_enabled:\n op_cuda = evaluate(op, target_language='cuda')\n assert np.all(np.equal(op_cuda, op_np))\n" ]
[ [ "numpy.array", "numpy.equal", "numpy.finfo" ] ]
equinor/Marmot
[ "99ab6336f920f460df335d4a1b3e4f36d847ae46" ]
[ "marmot/plottingmodules/storage.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Energy storage plots.\n\nThis module creates energy storage plots.\n\"\"\"\n\nimport logging\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nimport marmot.config.mconfig as mconfig\nimport marmot.plottingmodules.plotutils.plot_library as plotlib\nfrom marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper\nfrom marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)\n\n\nclass MPlot(PlotDataHelper):\n \"\"\"storage MPlot class.\n\n All the plotting modules use this same class name.\n This class contains plotting methods that are grouped based on the\n current module name.\n \n The storage.py module contains methods that are\n related to storage devices. \n \n MPlot inherits from the PlotDataHelper class to assist in creating figures.\n \"\"\"\n\n def __init__(self, argument_dict: dict):\n \"\"\"\n Args:\n argument_dict (dict): Dictionary containing all\n arguments passed from MarmotPlot.\n \"\"\"\n # iterate over items in argument_dict and set as properties of class\n # see key_list in Marmot_plot_main for list of properties\n for prop in argument_dict:\n self.__setattr__(prop, argument_dict[prop])\n \n # Instantiation of MPlotHelperFunctions\n super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen, \n self.PLEXOS_color_dict, self.Scenarios, self.ylabels, \n self.xlabels, self.gen_names_dict, Region_Mapping=self.Region_Mapping) \n\n self.logger = logging.getLogger('marmot_plot.'+__name__)\n self.y_axes_decimalpt = mconfig.parser(\"axes_options\",\"y_axes_decimalpt\")\n \n\n def storage_volume(self, timezone: str = \"\", \n start_date_range: str = None, \n end_date_range: str = None, **_):\n \"\"\"Creates time series plot of aggregate storage volume for all storage objects in a given region.\n\n A horizontal line represents full charge of the storage device.\n All scenarios are plotted on a single figure.\n\n Args:\n timezone (str, optional): The timezone to display on the x-axes.\n Defaults to \"\".\n start_date_range (str, optional): Defines a start date at which to represent data from. \n Defaults to None.\n end_date_range (str, optional): Defines a end date at which to represent data to.\n Defaults to None.\n\n Returns:\n dict: Dictionary containing the created plot and its data table.\n \"\"\"\n if self.AGG_BY == 'zone':\n agg = 'zone'\n else:\n agg = 'region'\n \n outputs = {}\n \n # List of properties needed by the plot, properties are a set of tuples and contain 3 parts:\n # required True/False, property name and scenarios required, scenarios must be a list.\n properties = [(True, \"storage_Initial_Volume\", self.Scenarios),\n (True, f\"{agg}_Unserved_Energy\", self.Scenarios),\n (True, \"storage_Max_Volume\", [self.Scenarios[0]])]\n \n # Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary \n # with all required properties, returns a 1 if required data is missing\n check_input_data = self.get_formatted_data(properties)\n\n if 1 in check_input_data:\n return MissingInputData()\n \n for zone_input in self.Zones:\n self.logger.info(f\"{self.AGG_BY} = {zone_input}\")\n\n storage_volume_all_scenarios = pd.DataFrame()\n use_all_scenarios = pd.DataFrame()\n\n for scenario in self.Multi_Scenario:\n\n self.logger.info(f\"Scenario = {str(scenario)}\")\n\n storage_volume_read = self[\"storage_Initial_Volume\"].get(scenario)\n try:\n storage_volume = storage_volume_read.xs(zone_input, level = self.AGG_BY)\n except KeyError:\n self.logger.warning(f'No storage resources in {zone_input}')\n outputs[zone_input] = MissingZoneData()\n continue\n\n #Isolate only head storage objects (not tail).\n storage_gen_units = storage_volume.index.get_level_values('storage_resource')\n head_units = [unit for unit in storage_gen_units if 'head' in unit]\n storage_volume = storage_volume.iloc[storage_volume.index.get_level_values('storage_resource').isin(head_units)]\n storage_volume = storage_volume.groupby(\"timestamp\").sum()\n storage_volume.columns = [scenario]\n\n max_volume = storage_volume.max().squeeze()\n try:\n max_volume = self[\"storage_Max_Volume\"].get(scenario)\n max_volume = max_volume.xs(zone_input, level = self.AGG_BY)\n max_volume = max_volume.groupby('timestamp').sum()\n max_volume = max_volume.squeeze()[0]\n except KeyError:\n self.logger.warning(f'No storage resources in {zone_input}')\n\n #Pull unserved energy.\n use_read = self[f\"{agg}_Unserved_Energy\"].get(scenario)\n use = use_read.xs(zone_input, level = self.AGG_BY)\n use = use.groupby(\"timestamp\").sum() / 1000\n use.columns = [scenario]\n\n # if prop == \"Peak Demand\":\n\n # peak_demand_t = Total_Demand.idxmax()\n # end_date = peak_demand_t + dt.timedelta(days=end)\n # start_date = peak_demand_t - dt.timedelta(days=start)\n # Peak_Demand = Total_Demand[peak_demand_t]\n # storage_volume = storage_volume[start_date : end_date]\n # use = use[start_date : end_date]\n\n # elif prop == \"Min Net Load\":\n # min_net_load_t = Net_Load.idxmin()\n # end_date = min_net_load_t + dt.timedelta(days=end)\n # start_date = min_net_load_t - dt.timedelta(days=start)\n # Min_Net_Load = Net_Load[min_net_load_t]\n # storage_volume = storage_volume[start_date : end_date]\n # use = use[start_date : end_date]\n\n if pd.notna(start_date_range):\n self.logger.info(f\"Plotting specific date range: \\\n {str(start_date_range)} to {str(end_date_range)}\")\n\n storage_volume = storage_volume[start_date_range : end_date_range]\n use = use[start_date_range : end_date_range]\n\n storage_volume_all_scenarios = pd.concat([storage_volume_all_scenarios,storage_volume], axis = 1)\n #storage_volume_all_scenarios.columns = storage_volume_all_scenarios.columns.str.replace('_',' ')\n\n use_all_scenarios = pd.concat([use_all_scenarios,use], axis = 1)\n #use_all_scenarios.columns = use_all_scenarios.columns.str.replace('_',' ')\n\n # Data table of values to return to main program\n Data_Table_Out = pd.concat([storage_volume_all_scenarios,use_all_scenarios],axis = 1)\n #Make scenario/color dictionary.\n color_dict = dict(zip(storage_volume_all_scenarios.columns,self.color_list))\n\n fig1, axs = plotlib.setup_plot(ydimension = 2,sharey = False)\n plt.subplots_adjust(wspace=0.05, hspace=0.2)\n \n if storage_volume_all_scenarios.empty:\n out = MissingZoneData()\n outputs[zone_input] = out\n continue\n \n for column in storage_volume_all_scenarios:\n plotlib.create_line_plot(axs,storage_volume_all_scenarios,column,color_dict,label = column,n = 0) \n axs[0].set_ylabel('Head Storage Volume (GWh)', color='black', rotation='vertical')\n axs[0].yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))\n axs[0].margins(x=0.01)\n\n axs[0].set_ylim(ymin = 0)\n axs[0].set_title(zone_input)\n #axs[0].xaxis.set_visible(False)\n\n plotlib.create_line_plot(axs,use_all_scenarios,column,color_dict,label = column + ' Unserved Energy', n = 1)\n axs[1].set_ylabel('Unserved Energy (GWh)', color='black', rotation='vertical')\n axs[1].set_xlabel(timezone, color='black', rotation='horizontal')\n axs[1].yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))\n axs[1].margins(x=0.01)\n\n [PlotDataHelper.set_plot_timeseries_format(axs,n) for n in range(0,2)]\n \n axs[0].axhline(y = max_volume, linestyle = ':',label = 'Max Volume')\n axs[0].legend(loc = 'lower left',bbox_to_anchor = (1.15,0),facecolor = 'inherit',frameon = True)\n axs[1].legend(loc = 'lower left',bbox_to_anchor = (1.15,0.2),facecolor = 'inherit',frameon = True)\n if mconfig.parser(\"plot_title_as_region\"):\n fig1.title(zone_input)\n\n outputs[zone_input] = {'fig': fig1, 'data_table': Data_Table_Out}\n return outputs\n\n\n" ]
[ [ "pandas.notna", "pandas.concat", "pandas.DataFrame", "matplotlib.pyplot.subplots_adjust" ] ]
ajavadia/qiskit-ignis
[ "c03d1bd22f49f461e7f3112cb8b854e92f6d961f" ]
[ "qiskit/ignis/verification/tomography/fitters/mle_fit.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\n\"\"\"\nMaximum-Likelihood estimation quantum tomography fitter\n\"\"\"\n\nimport numpy as np\nfrom scipy.linalg import lstsq\n\nfrom .utils import make_positive_semidefinite\n\n\ndef state_mle_fit(data, basis_matrix, weights=None):\n \"\"\"\n Reconstruct a density matrix using MLE least-squares fitting.\n\n Args:\n data (vector like): vector of expectation values\n basis_matrix (matrix like): matrix of measurement operators\n weights (vector like, optional): vector of weights to apply to the\n objective function (default: None)\n PSD (bool, optional): Enforced the fitted matrix to be positive\n semidefinite (default: True)\n trace (int, optional): trace constraint for the fitted matrix\n (default: None).\n\n Returns:\n The fitted matrix rho that minimizes\n ||basis_matrix * vec(rho) - data||_2.\n\n Additional Information:\n This function is a wrapper for `mle_fit`. See\n `tomography.fitters.mle_fit` documentation for additional information.\n \"\"\"\n return mle_fit(data, basis_matrix, weights=weights, PSD=True, trace=1)\n\n\ndef process_mle_fit(data, basis_matrix, weights=None):\n \"\"\"\n Reconstruct a process (Choi) matrix using MLE least-squares fitting.\n\n Note: due to limitations of the fitting method the returned Choi-matrix\n will be completely-positive, but not necessarily trace preserving.\n\n Args:\n data (vector like): vector of expectation values\n basis_matrix (matrix like): matrix of measurement operators\n weights (vector like, optional): vector of weights to apply to the\n objective function (default: None)\n PSD (bool, optional): Enforced the fitted matrix to be positive\n semidefinite (default: True)\n trace (int, optional): trace constraint for the fitted matrix\n (default: None).\n\n Returns:\n The fitted Choi-matrix that minimizes\n ||basis_matrix * vec(choi) - data||_2.\n\n Additional Information:\n Due to limitations of the fitting method the returned Choi-matrix will\n be completely-positive, but not necessarily trace preserving.\n\n This function is a wrapper for `mle_fit`. See\n `tomography.fitters.mle_fit` documentation for additional information.\n \"\"\"\n # Calculate trace of Choi-matrix from projector length\n _, cols = np.shape(basis_matrix)\n dim = int(np.sqrt(np.sqrt(cols)))\n if dim ** 4 != cols:\n raise ValueError(\"Input data does not correspond to a process matrix.\")\n return mle_fit(data, basis_matrix, weights=weights, PSD=True, trace=dim)\n\n\n###########################################################################\n# Linear Inversion (Least-Squares) Fitter\n###########################################################################\n\ndef mle_fit(data, basis_matrix, weights=None, PSD=True, trace=None):\n \"\"\"\n Reconstruct a density matrix using MLE least-squares fitting.\n\n Args:\n data (vector like): vector of expectation values\n basis_matrix (matrix like): matrix of measurement operators\n weights (vector like, optional): vector of weights to apply to the\n objective function (default: None)\n PSD (bool, optional): Enforced the fitted matrix to be positive\n semidefinite (default: True)\n trace (int, optional): trace constraint for the fitted matrix\n (default: None).\n\n Returns:\n The fitted matrix rho that minimizes\n ||basis_matrix * vec(rho) - data||_2.\n\n Additional Information:\n\n Objective function\n ------------------\n This fitter solves the least-squares minimization:\n\n minimize ||a * x - b ||_2\n\n where:\n a is the matrix of measurement operators a[i] = vec(M_i).H\n b is the vector of expectation value data for each projector\n b[i] ~ Tr[M_i.H * x] = (a * x)[i]\n x is the vectorized density matrix (or Choi-matrix) to be fitted\n\n PSD Constraint\n --------------\n Since this minimization problem is unconstrained the returned fitted\n matrix may not be postive semidefinite (PSD). To enforce the PSD\n constraint the fitted matrix is rescaled using the method proposed in\n Reference [1].\n\n Trace constraint\n ----------------\n In general the trace of the fitted matrix will be determined by the\n input data. If a trace constraint is specified the fitted matrix\n will be rescaled to have this trace by:\n rho = trace * rho / trace(rho)\n\n References:\n [1] J Smolin, JM Gambetta, G Smith, Phys. Rev. Lett. 108, 070502\n (2012). Open access: arXiv:1106.5458 [quant-ph].\n \"\"\"\n\n # We are solving the least squares fit: minimize ||a * x - b ||_2\n # where:\n # a is the matrix of measurement operators\n # b is the vector of expectation value data for each projector\n # x is the vectorized density matrix (or Choi-matrix) to be fitted\n a = basis_matrix\n b = np.array(data)\n\n # Optionally apply a weights vector to the data and projectors\n if weights is not None:\n w = np.array(weights)\n a = w[:, None] * a\n b = w * b\n\n # Perform least squares fit using Scipy.linalg lstsq function\n rho_fit, _, _, _ = lstsq(a, b)\n\n # Reshape fit to a density matrix\n size = len(rho_fit)\n dim = int(np.sqrt(size))\n if dim * dim != size:\n raise ValueError(\"fitted vector is not a square matrix.\")\n # Devectorize in column-major (Fortran order in Numpy)\n rho_fit = rho_fit.reshape(dim, dim, order='F')\n\n # Rescale fitted density matrix be positive-semidefinite\n if PSD is True:\n rho_fit = make_positive_semidefinite(rho_fit)\n\n # Rescale fitted density matrix to satisfy trace constraint\n if trace is not None:\n rho_fit *= trace / np.trace(rho_fit)\n return rho_fit\n" ]
[ [ "numpy.sqrt", "scipy.linalg.lstsq", "numpy.shape", "numpy.array", "numpy.trace" ] ]
colonelzentor/GENN
[ "65376ddbb4084a7b687d3c059a09ed3dae065b1d" ]
[ "src/jenn/tests/test_utils.py" ]
[ "import numpy as np\nfrom jenn.tests.test_problems import rosenbrock\nfrom jenn._utils import grad_check\n\n\ndef test_grad_check():\n \"\"\"\n Test that method to check gradient accuracy works, using the\n banana Rosenbrock function as a test example\n \"\"\"\n x0 = [np.array([1.25, -1.75]).reshape((2, 1))]\n f = lambda x: rosenbrock(x)[0]\n dfdx = lambda x: rosenbrock(x)[1]\n assert grad_check(x0, f, dfdx)" ]
[ [ "numpy.array" ] ]
jeromedockes/nistats
[ "45894a76794d24051221951e346a9a2e882b98d1" ]
[ "nistats/contrasts.py" ]
[ "\"\"\"\nThis module is for contrast computation and operation on contrast to\nobtain fixed effect results.\n\nAuthor: Bertrand Thirion, Martin Perez-Guevara, 2016\n\"\"\"\n\nfrom warnings import warn\n\nimport numpy as np\nimport scipy.stats as sps\nimport pandas as pd\n\nfrom .utils import z_score\n\nDEF_TINY = 1e-50\nDEF_DOFMAX = 1e10\n\n\ndef expression_to_contrast_vector(expression, design_columns):\n if expression in design_columns:\n contrast_vector = np.zeros(len(design_columns))\n contrast_vector[list(design_columns).index(expression)] = 1.\n return contrast_vector\n df = pd.DataFrame(np.eye(len(design_columns)), columns=design_columns)\n contrast_vector = df.eval(expression, engine=\"python\").values\n return contrast_vector\n\n\ndef compute_contrast(labels, regression_result, con_val, contrast_type=None):\n \"\"\" Compute the specified contrast given an estimated glm\n\n Parameters\n ----------\n labels : array of shape (n_voxels,),\n A map of values on voxels used to identify the corresponding model\n\n results : dict,\n With keys corresponding to the different labels\n values are RegressionResults instances corresponding to the voxels.\n\n con_val : numpy.ndarray of shape (p) or (q, p)\n Where q = number of contrast vectors and p = number of regressors.\n\n contrast_type : {None, 't', 'F'}, optional\n Type of the contrast. If None, then defaults to 't' for 1D\n `con_val` and 'F' for 2D `con_val`\n\n Returns\n -------\n con : Contrast instance,\n Yields the statistics of the contrast (effects, variance, p-values)\n \"\"\"\n con_val = np.asarray(con_val)\n dim = 1\n if con_val.ndim > 1:\n dim = con_val.shape[0]\n\n if contrast_type is None:\n contrast_type = 't' if dim == 1 else 'F'\n\n acceptable_contrast_types = ['t', 'F']\n if contrast_type not in acceptable_contrast_types:\n raise ValueError(\n '\"{0}\" is not a known contrast type. Allowed types are {1}'.\n format(contrast_type, acceptable_contrast_types))\n\n if contrast_type == 't':\n effect_ = np.zeros((1, labels.size))\n var_ = np.zeros(labels.size)\n for label_ in regression_result:\n label_mask = labels == label_\n resl = regression_result[label_].Tcontrast(con_val)\n effect_[:, label_mask] = resl.effect.T\n var_[label_mask] = (resl.sd ** 2).T\n elif contrast_type == 'F':\n from scipy.linalg import sqrtm\n effect_ = np.zeros((dim, labels.size))\n var_ = np.zeros(labels.size)\n for label_ in regression_result:\n label_mask = labels == label_\n reg = regression_result[label_]\n cbeta = np.atleast_2d(np.dot(con_val, reg.theta))\n invcov = np.linalg.inv(np.atleast_2d(\n reg.vcov(matrix=con_val, dispersion=1.0)))\n wcbeta = np.dot(sqrtm(invcov), cbeta)\n rss = reg.dispersion\n effect_[:, label_mask] = wcbeta\n var_[label_mask] = rss\n\n dof_ = regression_result[label_].df_resid\n return Contrast(effect=effect_, variance=var_, dim=dim, dof=dof_,\n contrast_type=contrast_type)\n\n\ndef _fixed_effect_contrast(labels, results, con_vals, contrast_type=None):\n \"\"\"Computes the summary contrast assuming fixed effects.\n\n Adds the same contrast applied to all labels and results lists.\n \"\"\"\n contrast = None\n n_contrasts = 0\n for i, (lab, res, con_val) in enumerate(zip(labels, results, con_vals)):\n if np.all(con_val == 0):\n warn('Contrast for session %d is null' % i)\n continue\n contrast_ = compute_contrast(lab, res, con_val, contrast_type)\n if contrast is None:\n contrast = contrast_\n else:\n contrast = contrast + contrast_\n n_contrasts += 1\n if contrast is None:\n raise ValueError('all contrasts provided were null contrasts')\n return contrast * (1. / n_contrasts)\n\n\nclass Contrast(object):\n \"\"\" The contrast class handles the estimation of statistical contrasts\n on a given model: student (t) or Fisher (F).\n The important feature is that it supports addition,\n thus opening the possibility of fixed-effects models.\n\n The current implementation is meant to be simple,\n and could be enhanced in the future on the computational side\n (high-dimensional F constrasts may lead to memory breakage).\n \"\"\"\n\n def __init__(self, effect, variance, dim=None, dof=DEF_DOFMAX,\n contrast_type='t', tiny=DEF_TINY, dofmax=DEF_DOFMAX):\n \"\"\"\n Parameters\n ----------\n effect : array of shape (contrast_dim, n_voxels)\n the effects related to the contrast\n\n variance : array of shape (n_voxels)\n the associated variance estimate\n\n dim: int or None,\n the dimension of the contrast\n\n dof : scalar\n the degrees of freedom of the residuals\n\n contrast_type: {'t', 'F'}\n specification of the contrast type\n \"\"\"\n if variance.ndim != 1:\n raise ValueError('Variance array should have 1 dimension')\n if effect.ndim != 2:\n raise ValueError('Effect array should have 2 dimensions')\n\n self.effect = effect\n self.variance = variance\n self.dof = float(dof)\n if dim is None:\n self.dim = effect.shape[0]\n else:\n self.dim = dim\n if self.dim > 1 and contrast_type is 't':\n print('Automatically converted multi-dimensional t to F contrast')\n contrast_type = 'F'\n self.contrast_type = contrast_type\n self.stat_ = None\n self.p_value_ = None\n self.baseline = 0\n self.tiny = tiny\n self.dofmax = dofmax\n\n def effect_size(self):\n \"\"\"Make access to summary statistics more straightforward when\n computing contrasts\"\"\"\n return self.effect[0, :]\n\n def effect_variance(self):\n \"\"\"Make access to summary statistics more straightforward when\n computing contrasts\"\"\"\n return self.variance\n\n def stat(self, baseline=0.0):\n \"\"\" Return the decision statistic associated with the test of the\n null hypothesis: (H0) 'contrast equals baseline'\n\n Parameters\n ----------\n baseline : float, optional\n Baseline value for the test statistic\n\n Returns\n -------\n stat: 1-d array, shape=(n_voxels,)\n statistical values, one per voxel\n \"\"\"\n self.baseline = baseline\n\n # Case: one-dimensional contrast ==> t or t**2\n if self.contrast_type == 'F':\n stat = np.sum((self.effect - baseline) ** 2, 0) / self.dim /\\\n np.maximum(self.variance, self.tiny)\n elif self.contrast_type == 't':\n # avoids division by zero\n stat = (self.effect - baseline) / np.sqrt(\n np.maximum(self.variance, self.tiny))\n else:\n raise ValueError('Unknown statistic type')\n self.stat_ = stat.ravel()\n return self.stat_\n\n def p_value(self, baseline=0.0):\n \"\"\"Return a parametric estimate of the p-value associated\n with the null hypothesis: (H0) 'contrast equals baseline'\n\n Parameters\n ----------\n baseline : float, optional\n baseline value for the test statistic\n\n Returns\n -------\n p_values : 1-d array, shape=(n_voxels,)\n p-values, one per voxel\n \"\"\"\n if self.stat_ is None or not self.baseline == baseline:\n self.stat_ = self.stat(baseline)\n # Valid conjunction as in Nichols et al, Neuroimage 25, 2005.\n if self.contrast_type == 't':\n p_values = sps.t.sf(self.stat_, np.minimum(self.dof, self.dofmax))\n elif self.contrast_type == 'F':\n p_values = sps.f.sf(self.stat_, self.dim, np.minimum(\n self.dof, self.dofmax))\n else:\n raise ValueError('Unknown statistic type')\n self.p_value_ = p_values\n return p_values\n\n def z_score(self, baseline=0.0):\n \"\"\"Return a parametric estimation of the z-score associated\n with the null hypothesis: (H0) 'contrast equals baseline'\n\n Parameters\n ----------\n baseline: float, optional,\n Baseline value for the test statistic\n\n Returns\n -------\n z_score: 1-d array, shape=(n_voxels,)\n statistical values, one per voxel\n\n \"\"\"\n if self.p_value_ is None or not self.baseline == baseline:\n self.p_value_ = self.p_value(baseline)\n\n # Avoid inf values kindly supplied by scipy.\n self.z_score_ = z_score(self.p_value_)\n return self.z_score_\n\n def __add__(self, other):\n \"\"\"Addition of selfwith others, Yields an new Contrast instance\n This should be used only on indepndent contrasts\"\"\"\n if self.contrast_type != other.contrast_type:\n raise ValueError(\n 'The two contrasts do not have consistant type dimensions')\n if self.dim != other.dim:\n raise ValueError(\n 'The two contrasts do not have compatible dimensions')\n dof_ = self.dof + other.dof\n if self.contrast_type == 'F':\n warn('Running approximate fixed effects on F statistics.')\n effect_ = self.effect + other.effect\n variance_ = self.variance + other.variance\n return Contrast(effect=effect_, variance=variance_, dim=self.dim,\n dof=dof_, contrast_type=self.contrast_type)\n\n def __rmul__(self, scalar):\n \"\"\"Multiplication of the contrast by a scalar\"\"\"\n scalar = float(scalar)\n effect_ = self.effect * scalar\n variance_ = self.variance * scalar ** 2\n dof_ = self.dof\n return Contrast(effect=effect_, variance=variance_, dof=dof_,\n contrast_type=self.contrast_type)\n\n __mul__ = __rmul__\n\n def __div__(self, scalar):\n return self.__rmul__(1 / float(scalar))\n" ]
[ [ "numpy.dot", "numpy.maximum", "numpy.minimum", "numpy.asarray", "numpy.all", "scipy.linalg.sqrtm", "numpy.zeros", "numpy.sum" ] ]
kiat/debs2019
[ "b1231a0995a154f8549ef23a00f635b81cc3c689" ]
[ "src/ssh-kd/plugin/kde_seg.py" ]
[ "import numpy as np\nfrom sklearn.cluster import DBSCAN\nfrom scipy import stats\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\ndef get_different_sectors(temp, threshold=0.7):\n \"\"\" \n Divides the scene into sectors. \n \n In a given 3D scene, from the top view, this functions divides the scene into object containing sectors and empty sectors \n \n Parameters: \n temp (numpy.ndarray): The input is a numpy array with X,Y,Z,radius values \n \n Returns: \n list_of_sectors (list): list of sectors, where each sector is a numpy.ndarray with X,Y,Z,radius,angles, top_radius values\n \n \"\"\"\n # Calculate the angles using the np.arctan2\n angles = np.round(np.arctan2(temp[:,2],temp[:,0])*180/np.pi,1).astype(np.float)\n angles[angles<0] = angles[angles<0]+360\n\n # calculate the top_radius(sqrt(x**2+z**2))\n top_rad = np.round(np.sqrt(temp[:,0]**2+temp[:,2]**2),1)\n # print(top_rad.tolist())\n # appending angles and the top radius to the input\n temp = np.hstack([temp, angles.reshape(-1,1), top_rad.reshape(-1,1)])\n\n # Calculating the unique angles\n unique_angles = sorted(list(np.unique(angles)))\n indexes_to_split = list(np.where(np.diff(unique_angles)>=threshold)[0]+1)\n start=0\n\n if len(unique_angles)==0:\n angle_ranges = [unique_angles]\n else:\n angle_ranges = []\n\n for i in indexes_to_split:\n angle_ranges.append(unique_angles[start:i])\n start = i\n \n list_of_sectors = []\n \n for j in angle_ranges:\n max_angle = max(j)\n min_angle = min(j)\n bool_vec = (angles>=min_angle) & (angles<=max_angle)\n if np.sum(bool_vec)>10:\n list_of_sectors.append(temp[bool_vec])\n \n return list_of_sectors\n\n\ndef get_valid_density(list_of_valid_sec):\n \"\"\" \n Divides the Sector into mini areas of objects. \n \n In a given sectors of a 3D scene, from the top view, this functions divides the sectors into object containing area and empty areas \n \n Parameters: \n list_of_valid_sec (list of numpy.ndarray): The input is a list of numpy array with X,Y,Z,radius,angles,top_radius values \n \n Returns: \n list_of_valid_density (list): list of objects, where each object is a numpy.ndarray with X,Y,Z,radius,angles, top_radius values\n \n \"\"\"\n\n # Return value\n list_of_valid_density = []\n\n\n for temp in list_of_valid_sec:\n # Initializing the kernel\n try:\n kernel = stats.gaussian_kde(temp[:,5],bw_method=0.05)\n except:\n print(temp[:,5])\n exit(-1)\n\n # Evaluating the values\n to_plot2 = kernel.evaluate(np.linspace(-20,180,500))\n\n # Threshold ==0.001\n bool_vec = [~(to_plot2<=0.001)]\n\n # Selecting valid values wrt threshold\n to_plot = to_plot2[bool_vec]\n x_val = np.linspace(-20,180,500)[bool_vec]\n\n # Selecting the boundary points\n req_indexes = np.where((np.diff(x_val)<=0.5)==False)[0]+1\n markers_on = x_val[req_indexes].round(0).astype(int).tolist()\n markers_on = [0]+markers_on+[180]\n\n # Calculate the dense indexs\n to_dense = np.split(to_plot,req_indexes)\n try:\n max_dense = [np.max(j) for j in to_dense]\n except:\n list_of_valid_density.append(temp)\n continue\n\n # Selecting the valid objects\n for i in range(len(markers_on)-1):\n if max_dense[i]>=0.01:\n temp1 = temp[ (temp[:,5]>=markers_on[i]) & (temp[:,5]<=markers_on[i+1]) ]\n if len(temp1)>=15:\n list_of_valid_density.append(temp1)\n \n return list_of_valid_density\n\ndef removeOutWithDBSCAN(temp1, top=False, count=0, objects=False):\n if top:\n data = np.array(\n list(\n zip(\n np.array(temp1[:,0]), \n np.array(temp1[:,2])\n )\n )\n )\n else:\n data = np.array(\n list(\n zip(\n np.array(temp1[:,0]), \n np.array(temp1[:,1]), \n np.array(temp1[:,2])\n )\n )\n )\n\n # Fitting the model \n clustering = DBSCAN(eps=1, min_samples=16).fit(data)\n labels = clustering.labels_\n\n # Appending the labels to the array\n temp1 = np.hstack([temp1, labels.reshape(-1,1)])\n temp1 = temp1[~(labels==-1)]\n\n if objects:\n temp1[:,6] = temp1[:,6]+count\n return temp1\n\ndef get_valid_objects(list_of_densities):\n objects = [] # Return this list\n count = 0 # TO count the number of objects\n\n for each in list_of_densities:\n temp1 = removeOutWithDBSCAN(each, True,count, True)\n if len(temp1)>=1:\n objs = np.unique(temp1[:,6])\n count+=len(objs)\n objects += [ temp1[temp1[:,6]==i] for i in objs ]\n \n return objects\n \n\n\ndef prep_obj_data(temp, use_db = False):\n \"\"\"\n Takes in the data frame and return the list of the valid objects\n \n Parameters: \n temp (numpy.ndarray): The input is a list of numpy array with X,Y,Z,radius \n \n Returns: \n list_of_valid_density (list): list of objects, where each object is a numpy.ndarray with X,Y,Z,radius,angles, top_radius values \n \"\"\"\n # Finding the list of sectors \n list_of_sectors = get_different_sectors(temp)\n\n # Finding the list of objects\n list_of_valid_density = get_valid_density(list_of_sectors)\n\n # Using the dbscan to get the objects\n if use_db:\n list_of_valid_density = get_valid_objects(list_of_valid_density)\n\n return list_of_valid_density" ]
[ [ "numpy.split", "numpy.sqrt", "numpy.linspace", "numpy.unique", "sklearn.cluster.DBSCAN", "numpy.arctan2", "numpy.max", "scipy.stats.gaussian_kde", "numpy.diff", "numpy.array", "numpy.sum" ] ]