repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
lunayach/texar-pytorch
|
[
"ac3e334e491f524dd01654b07af030fa20c88b34",
"ac3e334e491f524dd01654b07af030fa20c88b34",
"ac3e334e491f524dd01654b07af030fa20c88b34"
] |
[
"texar/core/optimization_test.py",
"texar/modules/decoders/transformer_decoders.py",
"texar/core/attention_mechanism_test.py"
] |
[
"\"\"\"\nUnit tests for various optimization related utilities.\n\"\"\"\n\nimport unittest\n\nimport torch\n\nfrom texar.core.optimization import *\n\n\nclass OptimizationTest(unittest.TestCase):\n r\"\"\"Test optimization.\n \"\"\"\n\n def setUp(self):\n N, D_in, H, D_out = 64, 100, 10, 1\n\n self.x = torch.randn(N, D_in)\n self.y = torch.randn(N, D_out)\n\n self.model = torch.nn.Sequential(\n torch.nn.Linear(D_in, H),\n torch.nn.ReLU(),\n torch.nn.Linear(H, D_out),)\n\n self.loss_fn = torch.nn.MSELoss(reduction='sum')\n\n def test_get_optimizer(self):\n r\"\"\"Tests get_optimizer.\n \"\"\"\n default_optimizer = get_optimizer(params=[torch.tensor(1)],\n hparams=None)\n self.assertIsInstance(default_optimizer, torch.optim.Adam)\n\n hparams = {\n \"optimizer\": {\n \"type\": \"RMSprop\",\n \"kwargs\": {\n \"lr\": 0.001,\n \"alpha\": 0.99,\n \"eps\": 1e-8,\n \"weight_decay\": 0,\n \"momentum\": 0,\n \"centered\": False\n }\n },\n \"learning_rate_decay\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_clip\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_noise_scale\": None,\n \"name\": None\n }\n\n rmsprop_optimizer = get_optimizer(params=[torch.tensor(1)],\n hparams=hparams)\n self.assertIsInstance(rmsprop_optimizer, torch.optim.RMSprop)\n\n hparams = {\n \"optimizer\": {\n \"type\": torch.optim.SGD,\n \"kwargs\": {\n \"lr\": 0.001,\n \"weight_decay\": 0,\n \"momentum\": 0\n }\n },\n \"learning_rate_decay\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_clip\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_noise_scale\": None,\n \"name\": None\n }\n\n sgd_optimizer = get_optimizer(params=[torch.tensor(1)],\n hparams=hparams)\n self.assertIsInstance(sgd_optimizer, torch.optim.SGD)\n\n def test_get_scheduler(self):\n r\"\"\"Tests get_scheduler.\n \"\"\"\n optimizer = get_optimizer(params=[torch.tensor(1)], hparams=None)\n\n default_scheduler = get_scheduler(optimizer=optimizer,\n hparams=None)\n self.assertEqual(default_scheduler, None)\n\n hparams = {\n \"optimizer\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"learning_rate_decay\": {\n \"type\": \"ExponentialLR\",\n \"kwargs\": {\n \"gamma\": 0.99\n }\n },\n \"gradient_clip\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_noise_scale\": None,\n \"name\": None\n }\n\n scheduler = get_scheduler(optimizer=optimizer,\n hparams=hparams)\n self.assertIsInstance(scheduler, torch.optim.lr_scheduler.ExponentialLR)\n\n hparams = {\n \"optimizer\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"learning_rate_decay\": {\n \"type\": torch.optim.lr_scheduler.ExponentialLR,\n \"kwargs\": {\n \"gamma\": 0.99\n }\n },\n \"gradient_clip\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_noise_scale\": None,\n \"name\": None\n }\n\n scheduler = get_scheduler(optimizer=optimizer,\n hparams=hparams)\n self.assertIsInstance(scheduler, torch.optim.lr_scheduler.ExponentialLR)\n\n def test_get_grad_clip_fn(self):\n r\"\"\"Tests get_grad_clip_fn.\n \"\"\"\n default_grad_clip_fn = get_grad_clip_fn(hparams=None)\n self.assertEqual(default_grad_clip_fn, None)\n\n hparams = {\n \"optimizer\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"learning_rate_decay\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_clip\": {\n \"type\": \"clip_grad_norm_\",\n \"kwargs\": {\n \"max_norm\": 10,\n \"norm_type\": 2\n }\n },\n \"gradient_noise_scale\": None,\n \"name\": None\n }\n\n grad_clip_fn = get_grad_clip_fn(hparams=hparams)\n if not callable(grad_clip_fn):\n raise ValueError(\"grad_clip_fn is not callable\")\n\n hparams = {\n \"optimizer\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"learning_rate_decay\": {\n \"type\": \"\",\n \"kwargs\": {}\n },\n \"gradient_clip\": {\n \"type\": torch.nn.utils.clip_grad_norm_,\n \"kwargs\": {\n \"max_norm\": 10,\n \"norm_type\": 2\n }\n },\n \"gradient_noise_scale\": None,\n \"name\": None\n }\n\n grad_clip_fn = get_grad_clip_fn(hparams=hparams)\n if not callable(grad_clip_fn):\n raise ValueError(\"grad_clip_fn is not callable\")\n\n def test_get_train_op(self):\n r\"\"\"Tests get_train_op.\n \"\"\"\n hparams = {\n \"optimizer\": {\n \"type\": torch.optim.SGD,\n \"kwargs\": {\n \"lr\": 0.001\n }\n },\n \"learning_rate_decay\": {\n \"type\": torch.optim.lr_scheduler.ExponentialLR,\n \"kwargs\": {\n \"gamma\": 0.99\n }\n },\n \"gradient_clip\": {\n \"type\": torch.nn.utils.clip_grad_norm_,\n \"kwargs\": {\n \"max_norm\": 10,\n \"norm_type\": 2\n }\n },\n \"gradient_noise_scale\": None,\n \"name\": None\n }\n\n optimizer = get_optimizer(self.model.parameters(), hparams)\n train_op = get_train_op(optimizer, hparams)\n\n for t in range(50):\n y_pred = self.model(self.x)\n loss = self.loss_fn(y_pred, self.y)\n loss.backward()\n train_op()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2019 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nTransformer decoder.\n\"\"\"\n\nfrom typing import Callable, Dict, NamedTuple, Optional, Tuple, Union\n\nimport torch\nfrom torch import nn\n\nfrom texar.core import layers\nfrom texar.hyperparams import HParams\nfrom texar.modules.decoders.decoder_base import DecoderBase, _make_output_layer\nfrom texar.modules.decoders.decoder_helpers import EmbeddingHelper, Helper\nfrom texar.modules.encoders.multihead_attention import (\n Cache, MultiheadAttentionEncoder)\nfrom texar.modules.encoders.transformer_encoder import (\n default_transformer_poswise_net_hparams)\nfrom texar.modules.networks.networks import FeedForwardNetwork\nfrom texar.utils import transformer_attentions as attn\nfrom texar.utils.beam_search import beam_search\nfrom texar.utils.shapes import mask_sequences\nfrom texar.utils.utils import sequence_mask\n\n__all__ = [\n 'TransformerDecoderOutput',\n 'TransformerDecoder',\n]\n\n\nclass TransformerDecoderOutput(NamedTuple):\n r\"\"\"The output of :class:`TransformerDecoder`.\n \"\"\"\n logits: torch.Tensor\n r\"\"\"A :tensor:`Tensor` of shape ``[batch_size, max_time, vocab_size]``\n containing the logits.\"\"\"\n sample_id: torch.LongTensor\n r\"\"\"A :tensor:`LongTensor` of shape ``[batch_size, max_time]`` containing\n the sampled token indices.\"\"\"\n\n\nclass TransformerDecoder(DecoderBase[Cache, TransformerDecoderOutput]):\n r\"\"\"Transformer decoder that applies multi-head self-attention for\n sequence decoding.\n\n It is a stack of :class:`~texar.modules.encoders.MultiheadAttentionEncoder`,\n :class:`~texar.modules.FeedForwardNetwork`, and residual connections.\n\n Args:\n vocab_size (int, optional): Vocabulary size. Required if\n :attr:`output_layer` is `None`.\n output_layer (optional): An output layer that transforms cell output\n to logits. This can be:\n\n - A callable layer, e.g., an instance of :torch_nn:`Module`.\n - A tensor. A :torch_nn:`Linear` layer will be created using the\n tensor as weights. The bias of the dense layer is determined\n by ``hparams.output_layer_bias``. This can be used to tie the\n output layer with the input embedding matrix, as proposed in\n https://arxiv.org/pdf/1608.05859.pdf.\n - `None`. A :torch_nn:`Linear` layer will be created based on\n attr:`vocab_size` and ``hparams.output_layer_bias``.\n - If no output layer is needed at the end, set\n :attr:`vocab_size` to `None` and ``output_layer`` to\n :func:`~texar.core.identity`.\n hparams (dict or HParams, optional): Hyperparameters. Missing\n hyperparameters will be set to default values. See\n :meth:`default_hparams` for the hyperparameter structure and\n default values.\n\n .. document private functions\n \"\"\"\n\n # State variables used during `dynamic_decode`. Assigned in `forward`.\n _state_max_decoding_length: int\n _state_context: Optional[torch.LongTensor]\n _state_context_sequence_length: Optional[torch.LongTensor]\n _state_cache: Cache\n\n def __init__(self,\n vocab_size: Optional[int] = None,\n output_layer: Optional[Union[nn.Module, torch.Tensor]] = None,\n hparams: Optional[HParams] = None):\n super().__init__(0, vocab_size, # dummy value for input_size\n input_time_major=False,\n output_time_major=False, hparams=hparams)\n self._input_size = self._hparams.dim\n\n self._output_layer, self._vocab_size = _make_output_layer(\n output_layer, vocab_size, self._input_size,\n self._hparams.output_layer_bias)\n\n self.self_attns = nn.ModuleList()\n self.self_attn_layer_norm = nn.ModuleList()\n self.enc_dec_attns = nn.ModuleList()\n self.end_dec_attn_layer_norm = nn.ModuleList()\n self.poswise_networks = nn.ModuleList()\n self.poswise_layer_norm = nn.ModuleList()\n\n if self._hparams.use_gpt_config:\n eps = 1e-5\n else:\n eps = 1e-12\n\n for _ in range(self._hparams.num_blocks):\n attn_module = MultiheadAttentionEncoder(\n self._input_size, self._hparams.multihead_attention)\n if self._hparams.dim != attn_module.output_size:\n raise ValueError(\"The output dimension of \"\n \"MultiheadEncoder should be equal \"\n \"to the dim of TransformerDecoder\")\n self.self_attns.append(attn_module)\n self.self_attn_layer_norm.append(\n nn.LayerNorm(self._input_size, eps=eps))\n\n attn_module = MultiheadAttentionEncoder(\n self._input_size, self._hparams.multihead_attention)\n if self._hparams.dim != attn_module.output_size:\n raise ValueError(\"The output dimension of \"\n \"MultiheadEncoder should be equal \"\n \"to the dim of TransformerDecoder\")\n self.enc_dec_attns.append(attn_module)\n self.end_dec_attn_layer_norm.append(\n nn.LayerNorm(self._input_size, eps=eps))\n\n poswise_network = FeedForwardNetwork(\n hparams=self._hparams.poswise_feedforward)\n if (poswise_network.hparams.layers[-1]['kwargs']['out_features']\n != self._hparams.dim):\n raise ValueError(\"The output dimension of \"\n \"FeedForwardNetwork should be equal \"\n \"to the dim of TransformerDecoder\")\n self.poswise_networks.append(poswise_network)\n self.poswise_layer_norm.append(\n nn.LayerNorm(self._input_size, eps=eps))\n\n self.final_layer_norm = nn.LayerNorm(self._input_size, eps=eps)\n self.embed_dropout = nn.Dropout(self._hparams.embedding_dropout)\n self.residual_dropout = nn.Dropout(self._hparams.residual_dropout)\n\n if self._hparams.initializer:\n # TODO: This might be different to what TensorFlow does\n initialize = layers.get_initializer(self._hparams.initializer)\n assert initialize is not None\n # Do not re-initialize LayerNorm modules.\n for name, param in self.named_parameters():\n if name.split(\".\")[-1] == \"weight\" and \"layer_norm\" not in name:\n initialize(param)\n\n @staticmethod\n def default_hparams():\n r\"\"\"Returns a dictionary of hyperparameters with default values.\n\n .. code-block:: python\n\n {\n # Same as in TransformerEncoder\n \"num_blocks\": 6,\n \"dim\": 512,\n \"use_gpt_config\": False,\n \"embedding_dropout\": 0.1,\n \"residual_dropout\": 0.1,\n \"poswise_feedforward\": default_transformer_poswise_net_hparams,\n \"multihead_attention\": {\n 'name': 'multihead_attention',\n 'num_units': 512,\n 'output_dim': 512,\n 'num_heads': 8,\n 'dropout_rate': 0.1,\n 'output_dim': 512,\n 'use_bias': False,\n },\n \"initializer\": None,\n \"name\": \"transformer_decoder\"\n\n # Additional for TransformerDecoder\n \"embedding_tie\": True,\n \"output_layer_bias\": False,\n \"max_decoding_length\": int(1e10),\n }\n\n Here:\n\n `\"num_blocks\"`: int\n Number of stacked blocks.\n\n `\"dim\"`: int\n Hidden dimension of the encoder.\n\n `\"use_gpt_config\"`: bool\n Whether to follow the `eps` setting of OpenAI GPT.\n\n `\"embedding_dropout\"`: float\n Dropout rate of the input word and position embeddings.\n\n `\"residual_dropout\"`: float\n Dropout rate of the residual connections.\n\n `\"poswise_feedforward\"`: dict\n Hyperparameters for a feed-forward network used in residual\n connections.\n Make sure the dimension of the output tensor is equal to ``dim``.\n\n See :func:`~texar.modules.default_transformer_poswise_net_hparams`\n for details.\n\n `\"multihead_attention\"`: dict\n Hyperparameters for the multi-head attention strategy.\n Make sure the ``output_dim`` in this module is equal to ``dim``.\n\n See :func:`~texar.modules.MultiheadAttentionEncoder.default_hparams`\n for details.\n\n `\"initializer\"`: dict, optional\n Hyperparameters of the default initializer that initializes\n variables created in this module.\n\n See :func:`~texar.core.get_initializer` for details.\n\n `\"embedding_tie\"`: bool\n Whether to use the word embedding matrix as the output layer\n that computes logits. If `False`, a new dense layer is created.\n\n `\"output_layer_bias\"`: bool\n Whether to use bias to the output layer.\n\n `\"max_decoding_length\"`: int\n The maximum allowed number of decoding steps.\n Set to a very large number of avoid the length constraint.\n Ignored if provided in :meth:`forward` or ``\"train_greedy\"``\n decoding is used.\n\n `\"name\"`: str\n Name of the module.\n \"\"\"\n dim = 512\n return {\n 'num_blocks': 6,\n 'dim': dim,\n 'use_gpt_config': False,\n 'embedding_tie': True,\n 'output_layer_bias': False,\n 'max_decoding_length': int(1e10),\n 'embedding_dropout': 0.1,\n 'residual_dropout': 0.1,\n 'poswise_feedforward': default_transformer_poswise_net_hparams(dim),\n 'multihead_attention': {\n 'name': 'multihead_attention',\n 'num_units': 512,\n 'num_heads': 8,\n 'dropout_rate': 0.1,\n 'output_dim': 512,\n 'use_bias': False,\n },\n 'initializer': None,\n 'name': \"transformer_decoder\",\n }\n\n def _inputs_to_outputs(self, inputs: torch.Tensor,\n cache: Cache) -> Tuple[torch.Tensor, Cache]:\n r\"\"\"Returns the outputs of one decoding step (for example,\n the predicted logits of the next token).\n\n :attr:`inputs` should be of shape ``[batch_size, dim]``.\n\n Returns:\n A tuple of logits and updated cache. Logits are of shape\n ``[batch_size, vocab_size]``.\n \"\"\"\n outputs = self._self_attention_stack(\n inputs.unsqueeze(1), memory=cache['memory'], cache=cache)\n outputs = self._output_layer(outputs)\n outputs = outputs.squeeze(1)\n return outputs, cache\n\n def forward(self, # type: ignore\n inputs: Optional[torch.Tensor] = None,\n sequence_length: Optional[torch.LongTensor] = None,\n memory: Optional[torch.Tensor] = None,\n memory_sequence_length: Optional[torch.LongTensor] = None,\n memory_attention_bias: Optional[torch.Tensor] = None,\n context: Optional[torch.Tensor] = None,\n context_sequence_length: Optional[torch.LongTensor] = None,\n helper: Optional[Helper] = None,\n decoding_strategy: str = 'train_greedy',\n max_decoding_length: Optional[int] = None,\n impute_finished: bool = False,\n infer_mode: Optional[bool] = None,\n beam_width: Optional[int] = None,\n length_penalty: float = 0.,\n **kwargs) \\\n -> Union[\n TransformerDecoderOutput,\n Tuple[TransformerDecoderOutput, torch.LongTensor],\n Dict[str, torch.Tensor]]:\n r\"\"\"Performs decoding.\n\n The interface is very similar to that of RNN decoders\n (:class:`texar.modules.RNNDecoderBase`). In particular,\n the function provides **3 ways** to specify the decoding method, with\n varying flexibility:\n\n 1. The :attr:`decoding_strategy` argument.\n\n - **\"train_greedy\"**: decoding in teacher-forcing fashion (i.e.,\n feeding ground truth to decode the next step), and for each step\n sample is obtained by taking the `argmax` of logits.\n Argument :attr:`inputs` is required for this strategy.\n :attr:`sequence_length` is optional.\n - **\"infer_greedy\"**: decoding in inference fashion (i.e., feeding\n `generated` sample to decode the next step), and for each step\n sample is obtained by taking the `argmax` of logits.\n Arguments :attr:`(start_tokens, end_token)` are\n required for this strategy, and argument\n :attr:`max_decoding_length` is optional.\n - **\"infer_sample\"**: decoding in inference fashion, and for each\n step sample is obtained by `random sampling` from the logits.\n Arguments :attr:`(start_tokens, end_token)` are required for this\n strategy, and argument :attr:`max_decoding_length` is optional.\n\n This argument is used only when arguments :attr:`helper` and\n :attr:`beam_width` are both `None`.\n\n 2. The :attr:`helper` argument: An instance of subclass of\n :class:`texar.modules.decoders.Helper`.\n This provides a superset of decoding strategies than above.\n The interface is the same as in RNN decoders.\n Please refer to :meth:`texar.modules.RNNDecoderBase.forward` for\n detailed usage and examples.\n\n Note that, here, though using a\n :class:`~texar.decoder.TrainingHelper` corresponding to the\n ``\"train_greedy\"`` strategy above, the implementation is *slower*\n than directly setting ``decoding_strategy=\"train_greedy\"`` (though\n output results are the same).\n\n Argument :attr:`max_decoding_length` is optional.\n\n 3. **Beam search**: set :attr:`beam_width` to use beam search decoding.\n Arguments :attr:`(start_tokens, end_token)` are required,\n and argument :attr:`max_decoding_length` is optional.\n\n .. warning::\n Beam search is not yet implemented. Setting :attr:`beam_width`\n to any value greater than 1 would raise a\n :exc:`NotImplementedError`\n\n Args:\n memory (optional): The memory to attend, e.g., the output of an RNN\n encoder. A :tensor:`Tensor` of shape\n ``[batch_size, memory_max_time, dim]``.\n memory_sequence_length (optional): A :tensor:`Tensor` of shape\n ``[batch_size]`` containing the sequence lengths for the batch\n entries in memory. Used to create attention bias of\n :attr:`memory_attention_bias` is not given. Ignored if\n :attr:`memory_attention_bias` is provided.\n memory_attention_bias (optional): A :tensor:`Tensor` of shape\n ``[batch_size, num_heads, memory_max_time, dim]``.\n An attention bias typically sets the value of a padding\n position to a large negative value for masking. If not given,\n :attr:`memory_sequence_length` is used to automatically\n create an attention bias.\n inputs (optional): Input tensor for teacher forcing decoding, of\n shape ``[batch_size, target_max_time, emb_dim]`` containing the\n target sequence word embeddings. Used when\n :attr:`decoding_strategy` is set to ``\"train_greedy\"``.\n sequence_length (optional): A :tensor:`LongTensor` of shape\n ``[batch_size]``, containing the sequence length of\n :attr:`inputs`. Tokens beyond the respective sequence length are\n masked out.\n Used when :attr:`decoding_strategy` is set to\n ``\"train_greedy\"``.\n decoding_strategy (str): A string specifying the decoding\n strategy, including ``\"train_greedy\"``, ``\"infer_greedy\"``,\n ``\"infer_sample\"``.\n Different arguments are required based on the\n strategy. See above for details. Ignored if\n :attr:`beam_width` or :attr:`helper` is set.\n beam_width (int): Set to use beam search. If given,\n :attr:`decoding_strategy` is ignored.\n length_penalty (float): Length penalty coefficient used in beam\n search decoding. Refer to https://arxiv.org/abs/1609.08144\n for more details.\n It should be larger if longer sentences are desired.\n context (optional): An :tensor:`LongTensor` of shape\n ``[batch_size, length]``, containing the starting tokens for\n decoding. If context is set, ``start_tokens`` of the\n :class:`~texar.modules.Helper` will be ignored.\n context_sequence_length (optional): Specify the length of context.\n max_decoding_length (int, optional): The maximum allowed number of\n decoding steps.\n If `None` (default), use ``\"max_decoding_length\"`` defined in\n :attr:`hparams`. Ignored in ``\"train_greedy\"`` decoding.\n impute_finished (bool): If `True`, then states for batch\n entries which are marked as finished get copied through and\n the corresponding outputs get zeroed out. This causes some\n slowdown at each time step, but ensures that the final state\n and outputs have the correct values and that backprop ignores\n time steps that were marked as finished. Ignored in\n ``\"train_greedy\"`` decoding.\n helper (optional): An instance of\n :class:`texar.modules.decoders.Helper`\n that defines the decoding strategy. If given,\n ``decoding_strategy`` and helper configurations in\n :attr:`hparams` are ignored.\n infer_mode (optional): If not `None`, overrides mode given by\n :attr:`self.training`.\n\n Returns:\n\n - For **\"train_greedy\"** decoding, returns an instance of\n :class:`~texar.modules.TransformerDecoderOutput` which contains\n `sample_id` and `logits`.\n\n - For **\"infer_greedy\"** and **\"infer_sample\"** decoding or\n decoding with :attr:`helper`, returns\n a tuple ``(outputs, sequence_lengths)``, where ``outputs`` is an\n instance of :class:`~texar.modules.TransformerDecoderOutput` as\n in `\"train_greedy\"`, and ``sequence_lengths`` is a\n :tensor:`LongTensor` of shape ``[batch_size]`` containing the\n length of each sample.\n\n - For **beam search** decoding, returns a ``dict`` containing keys\n ``\"sample_id\"`` and ``\"log_prob\"``.\n\n - ``\"sample_id\"`` is a :tensor:`LongTensor` of shape\n ``[batch_size, max_time, beam_width]`` containing generated\n token indexes. ``sample_id[:,:,0]`` is the highest-probable\n sample.\n - ``\"log_prob\"`` is a :tensor:`Tensor` of shape\n ``[batch_size, beam_width]`` containing the log probability\n of each sequence sample.\n \"\"\"\n\n if memory is not None:\n if memory_attention_bias is None:\n if memory_sequence_length is None:\n raise ValueError(\n \"`memory_sequence_length` is required if \"\n \"`memory_attention_bias` is not given.\")\n\n enc_padding = 1 - sequence_mask(\n memory_sequence_length, memory.size(1),\n dtype=torch.float32)\n memory_attention_bias = attn.attention_bias_ignore_padding(\n enc_padding)\n\n # record the context, which will be used in step function\n # for dynamic_decode\n if context is not None:\n if context_sequence_length is None:\n raise ValueError(\"'context_sequence_length' must not be None\"\n \"when 'context' is specified.\")\n self._state_context = context[:, 1:]\n self._state_context_sequence_length = context_sequence_length - 1\n else:\n self._state_context = None\n self._state_context_sequence_length = None\n\n # Faster code path for teacher-forcing training\n if (helper is None and beam_width is None and\n decoding_strategy == 'train_greedy'):\n if inputs is None:\n raise ValueError(\"'input' must not be none \"\n \"when using 'train_greedy' decoding strategy.\")\n if sequence_length is not None:\n inputs = mask_sequences(inputs, sequence_length)\n\n decoder_self_attention_bias = (\n attn.attention_bias_lower_triangle(inputs.size(1)))\n\n decoder_output = self._self_attention_stack(\n inputs, memory, decoder_self_attention_bias,\n memory_attention_bias, cache=None)\n logits = self._output_layer(decoder_output)\n sample_id = torch.argmax(logits, dim=-1)\n\n return TransformerDecoderOutput(logits, sample_id)\n\n # Inference code path.\n if max_decoding_length is None:\n max_decoding_length = self._hparams.max_decoding_length\n\n self._state_max_decoding_length = max_decoding_length\n\n if beam_width is None or beam_width == 1: # Inference-like decoding\n # Prepare helper\n if helper is None:\n kwargs.update(decoding_strategy=decoding_strategy)\n if context is not None:\n kwargs.update(start_tokens=context[:, 0])\n helper = self._create_or_get_helper(infer_mode, **kwargs)\n assert isinstance(helper, EmbeddingHelper)\n\n self._state_cache = self._init_cache(\n memory, memory_attention_bias,\n beam_search_decoding=False, batch_size=helper.batch_size)\n if context is not None:\n assert self._state_context is not None\n pad_length = max_decoding_length - self._state_context.size(1)\n if pad_length > 0:\n self._state_context = torch.cat((\n self._state_context,\n self._state_context.new_zeros(\n self._state_context.size(0), pad_length)\n ), dim=1)\n\n outputs, cache, sequence_lengths = self.dynamic_decode(\n helper, inputs=None, sequence_length=None,\n initial_state=None, max_decoding_length=max_decoding_length,\n impute_finished=impute_finished)\n del cache # not used\n\n if context is not None:\n # Here the length of sample_id will be larger than that\n # of logit by 1, because there will be a additional\n # start_token in the returned sample_id.\n # the start_id should be the first token of the\n # given context\n start_tokens = context[:, 0]\n outputs = TransformerDecoderOutput(\n logits=outputs.logits,\n sample_id=torch.cat([\n start_tokens.unsqueeze(1),\n outputs.sample_id\n ], dim=1))\n sequence_lengths = sequence_lengths + 1\n\n return outputs, sequence_lengths\n\n else: # Beam-search decoding\n # Ignore `decoding_strategy` and # assume `helper` is not set.\n if helper is not None:\n raise ValueError(\"Must not set 'beam_width' and 'helper' \"\n \"simultaneously.\")\n if context is not None:\n start_tokens = context[:, 0]\n else:\n if 'start_tokens' not in kwargs:\n raise ValueError(\n \"'start_tokens' must be specified when using\"\n \"beam search decoding.\")\n start_tokens = kwargs['start_tokens']\n _batch_size = start_tokens.size(0)\n self._state_cache = self._init_cache(\n memory, memory_attention_bias,\n beam_search_decoding=True,\n batch_size=_batch_size)\n end_token: int = kwargs.get('end_token') # type: ignore\n\n # The output format is different when running beam search.\n sample_id, log_prob = self._beam_decode(\n start_tokens,\n end_token,\n embedding_fn=kwargs['embedding'],\n beam_width=beam_width,\n length_penalty=length_penalty,\n decode_length=max_decoding_length)\n\n return {\n 'sample_id': sample_id,\n 'log_prob': log_prob\n }\n\n def _self_attention_stack(\n self, inputs: torch.Tensor,\n memory: Optional[torch.Tensor],\n decoder_self_attention_bias: Optional[torch.Tensor] = None,\n memory_attention_bias: Optional[torch.Tensor] = None,\n cache: Optional[Cache] = None) -> torch.Tensor:\n r\"\"\"Forward through the stacked multi-head attentions.\n \"\"\"\n inputs = self.embed_dropout(inputs)\n if cache is not None:\n if memory is not None:\n memory_attention_bias = cache['memory_attention_bias']\n else:\n assert decoder_self_attention_bias is not None\n\n x = inputs\n for i in range(self._hparams.num_blocks):\n layer_cache = cache['layers'][i] if cache is not None else None\n\n selfatt_output = self.self_attns[i](\n queries=self.self_attn_layer_norm[i](x),\n memory=None,\n memory_attention_bias=decoder_self_attention_bias,\n cache=layer_cache)\n x = x + self.residual_dropout(selfatt_output)\n\n if memory is not None:\n encdec_output = self.enc_dec_attns[i](\n queries=self.end_dec_attn_layer_norm[i](x),\n memory=memory,\n memory_attention_bias=memory_attention_bias)\n x = x + self.residual_dropout(encdec_output)\n\n sub_output = self.poswise_networks[i](self.poswise_layer_norm[i](x))\n x = x + self.residual_dropout(sub_output)\n\n return self.final_layer_norm(x)\n\n def _init_cache(self, memory: Optional[torch.Tensor],\n memory_attention_bias: Optional[torch.Tensor],\n beam_search_decoding: bool,\n batch_size: int) -> Cache:\n r\"\"\"Returns an initialized cache.\n\n In order to support both inference-like decoding and beam-search\n decoding, the elements of each layer must be initialized and extended\n as different structure respectively. Specifically, for inference-like\n decoding, a simple list is used; for beam-search decoding, a\n :tensor:`Tensor` of shape ``[batch_size, current_steps, num_units]``\n is maintained, where ``current_steps`` is the number of steps currently\n decoded.\n \"\"\"\n\n device = next(self.parameters()).device\n\n def _create_ta():\n return []\n\n def _create_empty_tensor():\n ret = torch.zeros(\n batch_size, 0, self._hparams.multihead_attention.num_units,\n dtype=torch.float, device=device)\n return ret\n\n _create_fn = (_create_empty_tensor if beam_search_decoding\n else _create_ta)\n\n cache: Cache = {\n 'memory': memory,\n 'memory_attention_bias': memory_attention_bias,\n 'layers': [{\n 'keys': _create_fn(),\n 'values': _create_fn(),\n } for _ in range(self._hparams.num_blocks)],\n }\n\n return cache\n\n def _beam_decode(self, start_tokens: torch.LongTensor, end_token: int,\n embedding_fn: Callable[\n [torch.LongTensor, torch.LongTensor], torch.Tensor],\n decode_length: int = 256, beam_width: int = 5,\n length_penalty: float = 0.6) \\\n -> Tuple[torch.Tensor, torch.Tensor]:\n\n def _symbols_to_logits_fn(ids, cache):\n batch_size = ids.size(0)\n step = ids.size(-1) - 1\n times = ids.new_full((batch_size,), step)\n inputs = embedding_fn(ids[:, -1], times)\n return self._inputs_to_outputs(inputs, cache)\n\n assert self._vocab_size is not None\n\n outputs, log_prob = beam_search(\n _symbols_to_logits_fn,\n start_tokens,\n beam_width,\n decode_length,\n self._vocab_size,\n length_penalty,\n states=self._state_cache,\n eos_id=end_token)\n\n # Ignores <BOS>\n outputs = outputs[:, :, 1:]\n # shape = [batch_size, seq_length, beam_width]\n outputs = outputs.permute(0, 2, 1)\n return outputs, log_prob\n\n @property\n def output_size(self) -> int:\n r\"\"\"Output size of one step.\n \"\"\"\n return self._input_size\n\n def initialize(self, helper: Helper, inputs: Optional[torch.Tensor],\n sequence_length: Optional[torch.LongTensor],\n initial_state: Optional[Cache]) \\\n -> Tuple[torch.ByteTensor, torch.Tensor, Cache]:\n initial_finished, initial_inputs = helper.initialize(\n inputs, sequence_length)\n state = initial_state or self._state_cache\n return initial_finished, initial_inputs, state\n\n def step(self, helper: Helper, time: int,\n inputs: torch.Tensor, state: Optional[Cache]) \\\n -> Tuple[TransformerDecoderOutput, Cache,\n torch.Tensor, torch.ByteTensor]:\n assert state is not None\n outputs, state = self._inputs_to_outputs(inputs, state)\n sample_ids = helper.sample(time=time, outputs=outputs)\n if self._state_context is not None:\n assert self._state_context_sequence_length is not None\n sample_ids = torch.where(\n self._state_context_sequence_length > time,\n self._state_context[:, time],\n sample_ids)\n\n if time + 1 == self._state_max_decoding_length:\n # Maximum decoding length reached, mark all batches as finished.\n # This requires special handling because performing lookup on\n # position embeddings with `time + 1` may result in IndexError.\n finished = torch.ones_like(sample_ids, dtype=torch.uint8)\n # Since `next_inputs` will not be used, simply create a null tensor.\n next_inputs = torch.empty(0)\n else:\n finished, next_inputs = helper.next_inputs(\n time=time, outputs=outputs, sample_ids=sample_ids)\n next_state = state\n outputs = TransformerDecoderOutput(\n logits=outputs,\n sample_id=sample_ids)\n return outputs, next_state, next_inputs, finished\n\n def finalize(self, # type: ignore\n outputs: TransformerDecoderOutput,\n final_state: Optional[Cache],\n sequence_lengths: torch.LongTensor) \\\n -> Tuple[TransformerDecoderOutput, Optional[Cache]]:\n # Clear state variables at end of decoding.\n del self._state_max_decoding_length\n del self._state_context\n del self._state_context_sequence_length\n del self._state_cache\n\n return super().finalize(outputs, final_state, sequence_lengths)\n",
"\"\"\"\nUnit tests for attention mechanism.\n\"\"\"\n\nimport unittest\n\nimport numpy as np\nimport torch\n\nfrom texar.core.attention_mechanism import *\n\n\nclass AttentionMechanismTest(unittest.TestCase):\n r\"\"\"Tests attention mechanism.\n \"\"\"\n\n def setUp(self):\n self._batch_size = 8\n self._max_time = 16\n self._encoder_output_size = 64\n self._attention_dim = 256\n self._memory = torch.rand(\n self._batch_size, self._max_time, self._encoder_output_size\n )\n self._memory_sequence_length = torch.tensor(\n np.random.randint(self._max_time, size=[self._batch_size]) + 1\n )\n self._attention_state = torch.rand(self._batch_size, self._max_time)\n\n def test_LuongAttention(self):\n r\"\"\"Tests `LuongAttention`\n \"\"\"\n # Case 1\n attention_mechanism = LuongAttention(\n num_units=self._attention_dim,\n encoder_output_size=self._encoder_output_size)\n\n cell_output = torch.rand(self._batch_size, self._attention_dim)\n\n attention, alignments, next_attention_state = \\\n compute_attention(\n attention_mechanism=attention_mechanism,\n cell_output=cell_output,\n attention_state=self._attention_state,\n memory=self._memory,\n attention_layer=None,\n memory_sequence_length=self._memory_sequence_length)\n\n self.assertEqual(attention.shape, torch.Size(\n [self._batch_size, self._encoder_output_size]))\n self.assertEqual(alignments.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(next_attention_state.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(len(attention_mechanism.trainable_variables), 1)\n\n # Case 2\n attention_mechanism = LuongAttention(\n num_units=self._attention_dim,\n encoder_output_size=self._encoder_output_size,\n scale=True)\n\n cell_output = torch.rand(self._batch_size, self._attention_dim)\n\n attention, alignments, next_attention_state = \\\n compute_attention(\n attention_mechanism=attention_mechanism,\n cell_output=cell_output,\n attention_state=self._attention_state,\n memory=self._memory,\n attention_layer=None,\n memory_sequence_length=self._memory_sequence_length)\n\n self.assertEqual(attention.shape, torch.Size(\n [self._batch_size, self._encoder_output_size]))\n self.assertEqual(alignments.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(next_attention_state.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(len(attention_mechanism.trainable_variables), 2)\n\n def test_BahdanauAttention(self):\n r\"\"\"Tests BahdanauAttention\n \"\"\"\n # Case 1\n attention_mechanism = BahdanauAttention(\n num_units=self._attention_dim,\n decoder_output_size=128,\n encoder_output_size=self._encoder_output_size)\n\n cell_output = torch.rand(self._batch_size, 128)\n\n attention, alignments, next_attention_state = \\\n compute_attention(\n attention_mechanism=attention_mechanism,\n cell_output=cell_output,\n attention_state=self._attention_state,\n memory=self._memory,\n attention_layer=None,\n memory_sequence_length=self._memory_sequence_length)\n\n self.assertEqual(attention.shape, torch.Size(\n [self._batch_size, self._encoder_output_size]))\n self.assertEqual(alignments.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(next_attention_state.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(len(attention_mechanism.trainable_variables), 3)\n\n # Case 2\n attention_mechanism = BahdanauAttention(\n num_units=self._attention_dim,\n decoder_output_size=128,\n encoder_output_size=self._encoder_output_size,\n normalize=True)\n\n cell_output = torch.rand(self._batch_size, 128)\n\n attention, alignments, next_attention_state = \\\n compute_attention(\n attention_mechanism=attention_mechanism,\n cell_output=cell_output,\n attention_state=self._attention_state,\n memory=self._memory,\n attention_layer=None,\n memory_sequence_length=self._memory_sequence_length)\n\n self.assertEqual(attention.shape, torch.Size(\n [self._batch_size, self._encoder_output_size]))\n self.assertEqual(alignments.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(next_attention_state.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(len(attention_mechanism.trainable_variables), 5)\n\n def test_LuongMonotonicAttention(self):\n r\"\"\"Tests LuongMonotonicAttention\n \"\"\"\n # Case 1\n attention_mechanism = LuongMonotonicAttention(\n num_units=self._attention_dim,\n encoder_output_size=self._encoder_output_size)\n\n cell_output = torch.rand(self._batch_size, self._attention_dim)\n\n attention, alignments, next_attention_state = \\\n compute_attention(\n attention_mechanism=attention_mechanism,\n cell_output=cell_output,\n attention_state=self._attention_state,\n memory=self._memory,\n attention_layer=None,\n memory_sequence_length=self._memory_sequence_length)\n\n self.assertEqual(attention.shape, torch.Size(\n [self._batch_size, self._encoder_output_size]))\n self.assertEqual(alignments.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(next_attention_state.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(len(attention_mechanism.trainable_variables), 2)\n\n # Case 2\n attention_mechanism = LuongMonotonicAttention(\n num_units=self._attention_dim,\n encoder_output_size=self._encoder_output_size,\n scale=True)\n\n cell_output = torch.rand(self._batch_size, self._attention_dim)\n\n attention, alignments, next_attention_state = \\\n compute_attention(\n attention_mechanism=attention_mechanism,\n cell_output=cell_output,\n attention_state=self._attention_state,\n memory=self._memory,\n attention_layer=None,\n memory_sequence_length=self._memory_sequence_length)\n\n self.assertEqual(attention.shape, torch.Size(\n [self._batch_size, self._encoder_output_size]))\n self.assertEqual(alignments.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(next_attention_state.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(len(attention_mechanism.trainable_variables), 3)\n\n def test_BahdanauMonotonicAttention(self):\n r\"\"\"Tests BahdanauMonotonicAttention\n \"\"\"\n # Case 1\n attention_mechanism = BahdanauMonotonicAttention(\n num_units=self._attention_dim,\n decoder_output_size=128,\n encoder_output_size=self._encoder_output_size)\n\n cell_output = torch.rand(self._batch_size, 128)\n\n attention, alignments, next_attention_state = \\\n compute_attention(\n attention_mechanism=attention_mechanism,\n cell_output=cell_output,\n attention_state=self._attention_state,\n memory=self._memory,\n attention_layer=None,\n memory_sequence_length=self._memory_sequence_length)\n\n self.assertEqual(attention.shape, torch.Size(\n [self._batch_size, self._encoder_output_size]))\n self.assertEqual(alignments.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(next_attention_state.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(len(attention_mechanism.trainable_variables), 4)\n\n # Case 2\n attention_mechanism = BahdanauMonotonicAttention(\n num_units=self._attention_dim,\n decoder_output_size=128,\n encoder_output_size=self._encoder_output_size,\n normalize=True)\n\n cell_output = torch.rand(self._batch_size, 128)\n\n attention, alignments, next_attention_state = \\\n compute_attention(\n attention_mechanism=attention_mechanism,\n cell_output=cell_output,\n attention_state=self._attention_state,\n memory=self._memory,\n attention_layer=None,\n memory_sequence_length=self._memory_sequence_length)\n\n self.assertEqual(attention.shape, torch.Size(\n [self._batch_size, self._encoder_output_size]))\n self.assertEqual(alignments.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(next_attention_state.shape, torch.Size(\n [self._batch_size, self._max_time]))\n self.assertEqual(len(attention_mechanism.trainable_variables), 6)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.nn.ReLU",
"torch.tensor",
"torch.randn"
],
[
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.ModuleList",
"torch.ones_like",
"torch.empty",
"torch.argmax",
"torch.where"
],
[
"torch.Size",
"torch.rand",
"numpy.random.randint"
]
] |
GeoscienceAustralia/anuga_core
|
[
"372e21a5c1c88867437374c851f1ff629bd3dab3"
] |
[
"anuga/parallel/tests/test_parallel_sw_flow_low_froude_0.py"
] |
[
"\"\"\"\nSimple water flow example using ANUGA\n\nWater driven up a linear slope and time varying boundary,\nsimilar to a beach environment\n\nThis is a very simple test of the parallel algorithm using the simplified parallel API\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\n\n\n#------------------------------------------------------------------------------\n# Import necessary modules\n#------------------------------------------------------------------------------\nfrom builtins import range\nfrom past.utils import old_div\nfrom future.utils import raise_\nimport unittest\nimport os\nimport sys\n#import pypar\nimport numpy as num\n\nimport anuga\n\nfrom anuga import Domain\nfrom anuga import Reflective_boundary\nfrom anuga import Dirichlet_boundary\nfrom anuga import Time_boundary\nfrom anuga import Transmissive_boundary\nfrom anuga import rectangular_cross_domain\n\nfrom anuga import distribute, myid, numprocs, send, receive, barrier, finalize\n\n#--------------------------------------------------------------------------\n# Setup parameters\n#--------------------------------------------------------------------------\nyieldstep = 0.25\nfinaltime = 1.0\nnprocs = 4\nN = 29\nM = 29\nverbose = False\n\n#---------------------------------\n# Setup Functions\n#---------------------------------\ndef topography(x,y):\n return old_div(-x,2)\n\n###########################################################################\n# Setup Test\n##########################################################################\ndef run_simulation(parallel=False, G = None, seq_interpolation_points=None, verbose=False):\n\n #--------------------------------------------------------------------------\n # Setup computational domain and quantities\n #--------------------------------------------------------------------------\n domain = rectangular_cross_domain(M, N)\n\n\n domain.set_quantity('elevation', topography) # Use function for elevation\n domain.set_quantity('friction', 0.0) # Constant friction\n domain.set_quantity('stage', expression='elevation') # Dry initial stage\n\n domain.set_low_froude(0)\n\n domain.set_name('runup') # Set sww filename\n domain.set_datadir('.') # Set output dir\n\n #--------------------------------------------------------------------------\n # Create the parallel domain\n #--------------------------------------------------------------------------\n if parallel:\n if myid == 0 and verbose : print('DISTRIBUTING PARALLEL DOMAIN')\n domain = distribute(domain, verbose=False)\n\n #--------------------------------------------------------------------------\n # Setup domain parameters\n #--------------------------------------------------------------------------\n\n\n domain.set_quantities_to_be_stored(None)\n\n\n #------------------------------------------------------------------------------\n # Setup boundary conditions\n # This must currently happen *AFTER* domain has been distributed\n #------------------------------------------------------------------------------\n\n Br = Reflective_boundary(domain) # Solid reflective wall\n Bd = Dirichlet_boundary([-0.2,0.,0.]) # Constant boundary values\n\n # Associate boundary tags with boundary objects\n domain.set_boundary({'left': Br, 'right': Bd, 'top': Br, 'bottom': Br})\n\n #------------------------------------------------------------------------------\n # Find which sub_domain in which the interpolation points are located\n #\n # Sometimes the interpolation points sit exactly\n # between two centroids, so in the parallel run we\n # reset the interpolation points to the centroids\n # found in the sequential run\n #------------------------------------------------------------------------------\n interpolation_points = [[0.4,0.5], [0.6,0.5], [0.8,0.5], [0.9,0.5]]\n\n\n gauge_values = []\n tri_ids = []\n for i, point in enumerate(interpolation_points):\n gauge_values.append([]) # Empty list for timeseries\n\n #if is_inside_polygon(point, domain.get_boundary_polygon()):\n #print \"Point \", myid, i, point\n try:\n k = domain.get_triangle_containing_point(point)\n if domain.tri_full_flag[k] == 1:\n tri_ids.append(k)\n else:\n tri_ids.append(-1)\n except:\n tri_ids.append(-2)\n\n #print \" tri_ids \",myid, i, tri_ids[-1]\n\n if verbose: print('P%d has points = %s' %(myid, tri_ids))\n\n\n c_coord = domain.get_centroid_coordinates()\n interpolation_points = []\n for id in tri_ids:\n if id<1:\n if verbose: print('WARNING: Interpolation point not within the domain!')\n interpolation_points.append(c_coord[id,:])\n\n #------------------------------------------------------------------------------\n # Evolve system through time\n #------------------------------------------------------------------------------\n time = []\n\n if parallel:\n if myid == 0 and verbose: print('PARALLEL EVOLVE')\n else:\n if myid == 0 and verbose: print('SEQUENTIAL EVOLVE')\n\n\n for t in domain.evolve(yieldstep = yieldstep, finaltime = finaltime):\n if myid == 0 and verbose : domain.write_time()\n\n # Record time series at known points\n time.append(domain.get_time())\n\n stage = domain.get_quantity('stage')\n\n for i in range(4):\n if tri_ids[i] > -1:\n gauge_values[i].append(stage.centroid_values[tri_ids[i]])\n\n\n #----------------------------------------\n # Setup test arrays during sequential run\n #----------------------------------------\n if not parallel:\n G = []\n for i in range(4):\n G.append(gauge_values[i])\n\n success = True\n\n for i in range(4):\n if tri_ids[i] > -1:\n #print num.max(num.array(gauge_values[i])- num.array(G[i]))\n success = success and num.allclose(gauge_values[i], G[i])\n\n assert_(success)\n\n return G, interpolation_points\n\n# Test an nprocs-way run of the shallow water equations\n# against the sequential code.\n\nclass Test_parallel_sw_flow(unittest.TestCase):\n def test_parallel_sw_flow(self):\n if verbose : print(\"Expect this test to fail if not run from the parallel directory.\")\n\n cmd = anuga.mpicmd(os.path.abspath(__file__))\n result = os.system(cmd)\n\n # Just use the normal Python assert\n msg = 'Result == %i, expected 0' % result\n assert result == 0, msg\n\n# Because we are doing assertions outside of the TestCase class\n# the PyUnit defined assert_ function can't be used.\n# FIXME (Ole): Why not use the normal Python assert?\ndef assert_(condition, msg=\"Assertion Failed\"):\n if condition == False:\n #pypar.finalize()\n raise_(AssertionError, msg)\n\nif __name__==\"__main__\":\n if numprocs == 1:\n runner = unittest.TextTestRunner()\n suite = unittest.makeSuite(Test_parallel_sw_flow, 'test')\n runner.run(suite)\n else:\n\n #------------------------------------------\n # Run the sequential code on each processor\n # and save results at 4 gauge stations to\n # array G\n #------------------------------------------\n barrier()\n if myid == 0 and verbose: print('SEQUENTIAL START')\n\n G , interpolation_points = run_simulation(parallel=False,verbose=verbose)\n G = num.array(G,num.float)\n\n barrier()\n\n #------------------------------------------\n # Run the code code and compare sequential\n # results at 4 gauge stations\n #------------------------------------------\n if myid ==0 and verbose: print('PARALLEL START')\n\n from anuga.utilities.parallel_abstraction import global_except_hook\n import sys\n sys.excepthook = global_except_hook\n\n run_simulation(parallel=True, G=G, seq_interpolation_points = interpolation_points, verbose= verbose)\n\n finalize()\n"
] |
[
[
"numpy.allclose",
"numpy.array"
]
] |
jkortner/ml-ops
|
[
"c0b6d4ce76008a325cfb49e87f2be40a22b62b3e"
] |
[
"sandbox/ml/train.py"
] |
[
"import mlflow\nimport mlflow.sklearn\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import BernoulliNB\nfrom sklearn.metrics import accuracy_score\nimport pandas as pd\n\n\ndef get_X_y(df, target):\n\n # sklearn split\n train, test = train_test_split(df)\n\n # X, y split\n X_train = train.drop([target], axis=1)\n X_test = test.drop([target], axis=1)\n y_train = train[[target]]\n y_test = test[[target]]\n\n return X_train, X_test, y_train, y_test\n\n\ndef metrics(y, y_hat):\n\n accuracy = accuracy_score(y, y_hat)\n\n return accuracy\n\n\nif __name__ == \"__main__\":\n # get zoo.data from:\n # https://archive.ics.uci.edu/ml/datasets/Zoo\n # COLS = [\"name\", \"hair\", \"feathers\", \"eggs\", \"milk\", \"airborne\", \"aquatic\", \n # \"predator\", \"toothed\", \"backbone\", \"breathes\", \"venomous\", \"fins\", \n # \"legs\", \"tail\", \"domestic\", \"catsize\", \"class\"]\n # DF = pd.read_csv('zoo.data', sep = ',', names=COLS)\n # print(DF)\n # DF.to_csv('zoo.csv', index=False)\n\n # load zoo.csv\n df = pd.read_csv('zoo.csv')\n df = df.drop(columns=['name'])\n \n # split df into training and test sets\n X_train, X_test, y_train, y_test = get_X_y(df, 'class')\n\n with mlflow.start_run():\n\n clf = BernoulliNB()\n clf.fit(X=X_train, y=y_train.values.ravel())\n\n y_hat = clf.predict(X_test)\n\n accuracy = metrics(y_test, y_hat)\n print('Accuracy: %s' % accuracy)\n\n mlflow.log_metric(\"accuracy\", accuracy)\n mlflow.sklearn.log_model(clf, \"model\")\n"
] |
[
[
"sklearn.model_selection.train_test_split",
"pandas.read_csv",
"sklearn.naive_bayes.BernoulliNB",
"sklearn.metrics.accuracy_score"
]
] |
fossabot/autofocus
|
[
"45954416011eca7c3f25a548bfe5017a083d7593"
] |
[
"autofocus/predict/example_post.py"
] |
[
"# Examples of how to make requests agains the image classification endpoints\n# Note:\n# 1. This assumes that the image_classifier_api is running (i.e., using docker compose up)\n# 2. It also assumes that the api address is at 127.0.0.1 (which should be the case)\nimport os\n\nimport pandas as pd\nimport requests\n\n\n#####################\n# SINGLE FILE EXAMPLE#\n#####################\n\"\"\"\nThe example below illustrates how to pass a set of files to the single\nfile endpoint. If you have a lot of images, this will be a lot slower\nthan zipping those images into a single file and using the predict_zip\nendpoint.\n\"\"\"\n\n\ndef allowed_file(filename, allowed_extensions):\n \"\"\"\n Check for whether a filename is in the ALLOWED_EXTENSIONS\n\n Parameters\n ----------\n filename (str): filename to check\n\n Returns\n -------\n bool: whether the filename is in allowed extensions\n \"\"\"\n return \".\" in filename and filename.rsplit(\".\", 1)[1] in allowed_extensions\n\n\ndef find_image_files(search_dir, img_extensions=[\"jpeg\", \"jpg\", \"png\", \"bmp\", \"gif\"]):\n \"\"\"\n Find all image files recursively starting in search dir\n\n Parameters\n ----------\n search_dir(str): path of directory to start from\n img_extensions(list): file extensions for image files\n\n Returns\n -------\n file_list(list): list of paths containing img_extensions\n \"\"\"\n file_list = [\n os.path.join(dp, f)\n for dp, dn, fn in os.walk(os.path.expanduser(search_dir))\n for f in fn\n ]\n return [x for x in file_list if allowed_file(x, img_extensions)]\n\n\nsearch_dir = \"/Users/dacheson/repos/image-classifier-api/\"\nimage_files = find_image_files(search_dir)\n\n# This is the endpoint\nuri = \"http://127.0.0.1/predict\"\n\n# Loop through all image files and get the response\nresponse_list = list()\nfor img_path in image_files:\n response = requests.post(uri, files={\"file\": open(img_path, \"rb\")})\n if response.status_code == 200:\n response_list.append(response.json())\n\n\n# combine all predictions into a single data.table\npredictions = pd.DataFrame(response_list)\n\n#################\n# ZipFile Example#\n#################\n\n\"\"\"\nThe example below illustrates how to pass a single zipfile to the\npredict_zip endpoint. This will be much faster if you have a lot of\nimages, although therer may be some limitations on the size of the file\nyou send in. Note: that the zipfile enpoint can handle directories and\nsubdirectories, as well as files that aren't images.\n\"\"\"\n\nuri = \"http://127.0.0.1/predict_zip\"\nzipfile = \"/Users/dacheson/repos/image-classifier-api/app/test.zip\"\n\nresponse = requests.post(uri, files={\"file\": open(zipfile, \"rb\")})\n\n# response is returned as a list of objects, so just combine into a dataframe\npredictions = pd.DataFrame(response.json())\n"
] |
[
[
"pandas.DataFrame"
]
] |
nirvaank/pyqmc
|
[
"a92c926d1008e466409cdefaab74d343061a30b4"
] |
[
"tests/integration/test_twist.py"
] |
[
"import numpy as np\nimport pyqmc.api as pyq\nfrom pyqmc.slater import Slater\nfrom pyqmc.pbc import enforce_pbc\nfrom pyqmc.coord import PeriodicConfigs\n\n\ndef test_cubic_with_ecp(li_cubic_ccecp, kind=1):\n cell, mf = li_cubic_ccecp\n runtest(cell, mf, kind=kind)\n\n\ndef test_noncubic(diamond_primitive, kind=1):\n cell, mf = diamond_primitive\n runtest(cell, mf, kind=kind)\n\n\ndef runtest(mol, mf, kind=0):\n kpt = mf.kpts[kind]\n twist = np.dot(kpt, mol.lattice_vectors().T / (2 * np.pi))\n\n wf0 = Slater(mol, mf)\n wft = Slater(mol, mf, twist=twist)\n\n #####################################\n ## compare values across boundary\n ## psi, KE, ecp,\n #####################################\n nconfig = 50\n coords = pyq.initial_guess(mol, nconfig, 1)\n epos, wrap = enforce_pbc(coords.lvecs, coords.configs)\n coords = PeriodicConfigs(epos, coords.lvecs)\n\n shift_ = np.random.randint(10, size=coords.configs.shape) - 5\n phase = np.exp(2j * np.pi * np.einsum(\"ijk,k->ij\", shift_, twist))\n\n shift = np.dot(shift_, mol.lattice_vectors())\n epos, wrap = enforce_pbc(coords.lvecs, epos + shift)\n newcoords = PeriodicConfigs(epos, coords.lvecs, wrap=wrap)\n\n assert np.linalg.norm(newcoords.configs - coords.configs) < 1e-12\n\n ph0, val0 = wf0.recompute(coords)\n pht, valt = wft.recompute(coords)\n enacc = pyq.EnergyAccumulator(mol, threshold=np.inf)\n np.random.seed(0)\n en0 = enacc(coords, wf0)\n np.random.seed(0)\n ent = enacc(coords, wft)\n\n e = 0\n rat0 = wf0.testvalue(e, newcoords.electron(e))\n assert np.linalg.norm(rat0 - 1) < 1e-9, rat0 - 1\n ratt = wft.testvalue(e, newcoords.electron(e))\n rattdiff = ratt - phase[:, e]\n print(\"phase\", phase[:, e])\n assert np.linalg.norm(rattdiff) < 1e-9, [\n np.round(rattdiff, 10),\n np.amax(np.abs(rattdiff)),\n ]\n\n ph0new, val0new = wf0.recompute(newcoords)\n phtnew, valtnew = wft.recompute(newcoords)\n np.random.seed(0)\n en0new = enacc(newcoords, wf0)\n np.random.seed(0)\n entnew = enacc(newcoords, wft)\n\n assert np.linalg.norm(ph0 - ph0new) < 1e-11\n assert np.linalg.norm(pht * phase.prod(axis=1) - phtnew) < 1e-11, (\n pht * phase.prod(axis=1) - phtnew\n )\n assert np.linalg.norm(val0 - val0new) < 1e-11, np.linalg.norm(val0 - val0new)\n assert np.linalg.norm(valt - valtnew) < 1e-11, np.linalg.norm(valt - valtnew)\n\n for k in en0.keys():\n diff0 = en0[k] - en0new[k]\n difft = ent[k] - entnew[k]\n if k == \"ecp\":\n for l, diff in [(\"0\", diff0), (\"t\", difft)]:\n mad = np.mean(np.abs(diff))\n if True: # mad > 1e-12:\n print(\"ecp%s diff\" % l, mad, np.linalg.norm(diff))\n assert mad < 1e-3, diff\n else:\n assert np.mean(np.abs(diff0)) < 1e-6, diff0\n assert np.mean(np.abs(difft)) < 1e-6, difft\n"
] |
[
[
"numpy.linalg.norm",
"numpy.random.seed",
"numpy.round",
"numpy.einsum",
"numpy.random.randint",
"numpy.abs"
]
] |
PASTAplus/dex
|
[
"d04fcf756b77856f4be0585dd7f5d38795751247",
"d04fcf756b77856f4be0585dd7f5d38795751247"
] |
[
"tools/prepopulate_caches.py",
"dex/util.py"
] |
[
"#!/usr/bin/env python\n\n\"\"\"Prepopulate caches\"\"\"\nimport collections\nimport contextlib\nimport csv\nimport logging\nimport os\nimport pathlib\nimport sys\nimport time\nimport unittest.mock\n\nimport pandas as pd\nimport pandas_profiling.model.base\n\nimport pandas_profiling.model.describe\n\n# WALK_ROOT_PATH = '/pasta/data/backup/data1'\nWALK_ROOT_PATH = './test_csv'\n\nSOURCE_CSV_PATH = 'sources.csv'\nCACHE_CSV_PATH = 'cache2.csv'\n\nSOURCE_FIELD_LIST = ['path', 'size_bytes']\nCACHE_FIELD_LIST = [\n 'path',\n 'plot_count',\n 'continuous_variable_count',\n 'continuous_variable_list',\n]\n\nSourceRow = collections.namedtuple('SourceRow', SOURCE_FIELD_LIST)\nCacheRow = collections.namedtuple('CacheRow', CACHE_FIELD_LIST)\n\n\nlog = logging.getLogger(__name__)\n\n\ndef main():\n logging.basicConfig(\n format='%(levelname)-8s %(message)s',\n level=logging.DEBUG,\n stream=sys.stdout,\n )\n\n # log.debug('debug')\n # log.info('info')\n # log.error('error')\n\n source_path = pathlib.Path(SOURCE_CSV_PATH)\n if not source_path.exists() or source_path.stat().st_size == 0:\n create_source_csv()\n\n # with pathlib.Path(CACHE_CSV_PATH).open('w', newline='') as csv_file:\n with DictWriter(\n CACHE_CSV_PATH,\n CACHE_FIELD_LIST,\n row_fn=lambda x: x._asdict(),\n is_write=True,\n ) as cache_writer:\n with DictReader(\n # csv_path, field_list, row_fn, is_write\n SOURCE_CSV_PATH,\n SOURCE_FIELD_LIST,\n row_fn=SourceRow,\n is_write=False,\n ) as source_reader:\n for row_tup in source_reader:\n file_path = pathlib.Path(row_tup.path.strip())\n try:\n create_cache(file_path, cache_writer)\n except Exception as e:\n # log.error(f'Error: {file_path}: {repr(e)}')\n # raise\n pass\n\n\ndef create_source_csv():\n log.debug('Creating CSV with list of CSV files to process...')\n with DictWriter(\n SOURCE_CSV_PATH,\n SOURCE_FIELD_LIST,\n row_fn=lambda x: x._asdict(),\n is_write=True,\n ) as source_writer:\n for root_dir, dir_list, file_list in os.walk(WALK_ROOT_PATH):\n dir_list[:] = list(sorted(dir_list))\n file_list[:] = list(sorted(file_list))\n for file_name in file_list:\n file_path = pathlib.Path(root_dir, file_name)\n\n # log.debug(f'Found: {file_path.as_posix()}')\n # log.debug(f'{file_path.suffix.lower()}')\n\n # if file_path.suffix.lower() != '.csv':\n # continue\n\n source_writer.writerow(\n SourceRow(\n path=file_path.as_posix(),\n size_bytes=file_path.stat().st_size,\n )\n )\n\n\ndef create_cache(csv_path, csv_writer):\n log.debug(f'create_cache() csv_path={csv_path}, csv_writer={repr(csv_writer)}')\n df = pd.read_csv(csv_path)\n # log.debug('-' * 100)\n # log.debug(f'CSV: {csv_path}')\n # '/home/dahl/dev/dex/test-pandas-profiling-interactions-full.csv'\n description_dict = pandas_profiling.model.describe.get_series_descriptions(\n df, unittest.mock.Mock()\n )\n # log.debug(description_dict)\n variables = {column: description[\"type\"] for column, description in description_dict.items()}\n continuous_variable_list = [\n column\n for column, t in variables.items()\n if t == pandas_profiling.model.base.Variable.TYPE_NUM\n ]\n if not continuous_variable_list:\n return\n csv_writer.writerow(\n CacheRow(\n path=csv_path,\n plot_count=len(continuous_variable_list) ** 2,\n continuous_variable_count=len(continuous_variable_list),\n continuous_variable_list=','.join(continuous_variable_list),\n )\n )\n\n\ndef csv_wrapper(csv_class):\n class CSVWrapper(csv_class):\n def __init__(self, csv_path, field_list, row_fn, is_write, *a, **kw):\n self.csv_path = pathlib.Path(csv_path)\n log.info(f'CSV file: {self.csv_path.as_posix()}')\n\n self.es = contextlib.ExitStack()\n self.f = self.es.enter_context(self.csv_path.open('w' if is_write else 'r', newline=''))\n\n if is_write:\n kw['fieldnames'] = field_list\n\n super().__init__(self.f, *a, **kw)\n\n assert self.fieldnames == field_list, (\n f'CSV is outdated. path=\"{self.csv_path.as_posix()}\" '\n f'expected=\"{field_list}\" actual=\"{self.fieldnames}\"'\n )\n\n self.field_list = field_list\n self.row_fn = row_fn\n\n self.start_ts = time.time()\n self.row_count = 0\n\n def _log_progress(self, row_dict):\n if time.time() - self.start_ts > 1.0:\n self.start_ts = time.time()\n # noinspection PyProtectedMember\n self._dump_dict(self.csv_path.as_posix(), row_dict)\n log.info(f' row count: {self.row_count}')\n\n def _dump_dict(self, title_str, d):\n log.info(title_str)\n # noinspection PyProtectedMember\n list(\n map(\n log.info,\n [f' {k}: {v}' for (k, v) in d.items()],\n )\n )\n\n return CSVWrapper\n\n\n@csv_wrapper\nclass DictReader(csv.DictReader):\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n def __next__(self):\n row_dict = super().__next__()\n row_tup = self.row_fn(**row_dict)\n self.row_count += 1\n self._log_progress(row_dict)\n # log.debug(f'__next__() {row_tup}')\n return row_tup\n\n\n@csv_wrapper\nclass DictWriter(csv.DictWriter):\n def __enter__(self):\n self.writeheader()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n def writeheader(self):\n header = dict(zip(self.fieldnames, self.fieldnames))\n super().writerow(header)\n\n def writerow(self, row_tup):\n # log.debug(f'writerow() {row_tup}')\n # noinspection PyProtectedMember\n row_dict = self.row_fn(row_tup)\n super().writerow(row_dict)\n self.row_count += 1\n self._log_progress(row_dict)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n",
"import datetime\nimport builtins\nimport collections\nimport contextlib\nimport io\nimport logging\nimport os\nimport pathlib\nimport pprint\nimport tempfile\nimport threading\nimport time\nimport types\nimport dateutil.parser\nimport dateutil\nimport fasteners\nimport flask\nimport flask.json\nimport lxml.etree\nimport pandas as pd\nimport pygments\nimport pygments.formatters\nimport pygments.lexers\nfrom flask import current_app as app\n\nimport dex.db\n\nlog = logging.getLogger(__name__)\n\n\nclass Counter:\n def __init__(self):\n self.count_dict = collections.defaultdict(lambda: 0)\n self.last_msg_ts = None\n\n def __enter__(self):\n self.last_msg_ts = time.time()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n log.debug(\"-\" * 100)\n self.print_counters(log.info)\n\n def count(self, id_str, key, detail_obj=None):\n self.count_dict[key] += 1\n if detail_obj is None:\n pass\n elif isinstance(detail_obj, str):\n self.p(f\"{id_str}: {key}: {detail_obj}\")\n else:\n self.pp(f'{id_str}: {key}:', detail_obj)\n if time.time() - self.last_msg_ts >= 1.0:\n self.last_msg_ts = time.time()\n self.print_counters(log.info)\n\n def print_counters(self, print_func):\n if not self.count_dict:\n print_func(\"No checks counted yet...\")\n return\n print_func(\"Counters:\")\n for k, v in sorted(self.count_dict.items()):\n print_func(f\" {v:>5,}: {k}\")\n\n def pp(self, title_str, obj, print_func=None):\n self.p(title_str, print_func)\n [self.p(f' {s}', print_func) for s in pprint.pformat(obj).splitlines(keepends=False)]\n\n def p(self, s, print_func=None):\n (print_func or log.debug)(s)\n\n\nclass CombinedLock:\n def __init__(self, root_path):\n self._root_path = pathlib.Path(root_path)\n self._thread_dict = {}\n\n @contextlib.contextmanager\n def lock(self, rid, key, obj_type, write=False):\n # log.debug(f'{self._root_path}, {lock_name}, {is_write}')\n lock_name = f'{rid}_{key}_{obj_type}'\n tp = f'{self._root_path / lock_name}_t'\n pp = f'{self._root_path / lock_name}_p'\n # print(f'tp = {tp}')\n # print(f'pp = {pp}')\n t = self._thread_dict.setdefault(tp, fasteners.ReaderWriterLock())\n p = self._thread_dict.setdefault(\n pp, fasteners.InterProcessReaderWriterLock(self._root_path / lock_name)\n )\n lock_fn = 'write_lock' if write else 'read_lock'\n es = contextlib.ExitStack()\n es.enter_context(getattr(t, lock_fn)())\n es.enter_context(getattr(p, lock_fn)())\n with es:\n try:\n yield es\n except Exception:\n pass\n\n\nclass Lock(object):\n \"\"\"\n Thread locks work by having the threads access the same object in shared\n memory. So it's important that only one lock object object is created for\n each controlled resource.\n\n Process locks work by having the processes access a named external resource.\n So the lock works regardless of how many lock objects are created, as\n long as they reference the same named resource.\n \"\"\"\n\n LOCK_METHOD_DICT = {\n 'read': 'read_lock',\n 'write': 'write_lock',\n }\n\n def __init__(self, name):\n self._name = name\n self._temp_dir = tempfile.TemporaryDirectory(prefix='locks')\n # Temp files are created under TemporaryDirectory, which is deleted\n # automatically, So delete is disabled on the individual files.\n self._lock_file = tempfile.NamedTemporaryFile(dir=self._temp_dir.name, delete=False)\n self._temp_root_path = pathlib.Path(self._temp_dir.name)\n self._lock_dict = {}\n self._thread_lock = threading.RLock()\n self._process_lock = fasteners.InterProcessLock(self._lock_file.name)\n\n @contextlib.contextmanager\n def lock_all(self):\n \"\"\"Lock both threads and processes.\"\"\"\n with self._thread_lock:\n with self._process_lock:\n yield self\n\n @contextlib.contextmanager\n def lock(self, rid, key, obj_type, write=False):\n \"\"\"Lock name for read or write. Supports upgrade and downgrade of the lock in a\n nested context manager.\n \"\"\"\n with self.lock_all():\n lock_name = f'{rid}_{key}_{obj_type}'\n if lock_name not in self._lock_dict:\n self._lock_dict[lock_name] = {\n 'thread': {\n 'lock': fasteners.ReaderWriterLock(),\n 'status': None,\n },\n 'process': {\n 'lock': fasteners.InterProcessReaderWriterLock(\n self._temp_root_path / lock_name\n ),\n 'status': None,\n },\n }\n\n type_str = 'write' if write else 'read'\n cm_list = [\n self._lock(lock_name, domain_str, type_str) for domain_str in ('thread', 'process')\n ]\n es = contextlib.ExitStack()\n [es.enter_context(cm) for cm in cm_list if cm is not None]\n with es:\n yield\n\n def _lock(self, lock_name, domain_str, type_str):\n lock_dict = self._lock_dict[lock_name]\n dom_dict = lock_dict[domain_str]\n if dom_dict['status'] == type_str:\n self._dbg(\n 'Lock already acquired',\n lock_name=lock_name,\n domain_str=domain_str,\n type_str=type_str,\n )\n return None\n dom_dict['status'] = type_str\n lock_obj = dom_dict['lock']\n self._dbg('Waiting for lock', lock_name=lock_name, type_str=type_str)\n return getattr(lock_obj, self.LOCK_METHOD_DICT[type_str])()\n\n def _dbg(self, msg_str, **kv):\n log.debug(\n f\"name:{self._name}, tid:{threading.get_native_id()}, pid={os.getpid()} - \"\n f\"{msg_str}: {' '.join([f'{k}={v}' for k, v in kv.items()])}\"\n )\n\n\n# Add SimpleNamespace as N to the global namespace.\n# If PyCharm complains, add N to the list at:\n# Settings > Inspections > Python > Unresolved references > Options > Ignore references\nbuiltins.N = types.SimpleNamespace\n\n\ndef logpp(obj, msg=None, logger=log.debug, sort_keys=False):\n \"\"\"pprint to a logger\"\"\"\n # if not logging.isEnabledFor(logging.DEBUG):\n # return\n if lxml.etree.iselement(obj):\n obj_str = get_etree_as_pretty_printed_xml(obj)\n else:\n obj_str = pprint.pformat(obj, indent=2, width=200, sort_dicts=sort_keys)\n logger(\"-\" * 100)\n if msg:\n logger(f'{msg}:')\n for line in obj_str.splitlines():\n logger(f' {line}')\n logger(\"-\" * 100)\n\n\n# builtins.dd = logpp\n\n\n# XML rendering\n\n\ndef get_etree_as_highlighted_html(el):\n \"\"\"Return a (css, html) tuple\"\"\"\n xml_str = get_etree_as_pretty_printed_xml(el)\n html_formatter = pygments.formatters.HtmlFormatter(style=app.config['EML_STYLE_NAME'])\n return (\n pygments.highlight(xml_str, pygments.lexers.XmlLexer(), html_formatter),\n html_formatter.get_style_defs('.highlight'),\n )\n\n\ndef get_etree_as_pretty_printed_xml(el):\n \"\"\"etree to pretty printed XML\"\"\"\n if not isinstance(el, list):\n el = [el]\n buf = io.BytesIO()\n for e in el:\n buf.write(lxml.etree.tostring(e, pretty_print=True))\n return buf.getvalue().decode('utf-8')\n\n\n# JSON\n\n\nclass DatetimeEncoder(flask.json.JSONEncoder):\n def default(self, o):\n try:\n return super().default(o)\n except TypeError:\n if isinstance(o, datetime.date):\n return o.isoformat()\n return str(o)\n\n\nclass DatetimeDecoder(flask.json.JSONDecoder):\n # def decode(self, s, w=flask.json.decoder.WHITESPACE.match):\n import json\n\n def decode(self, s, w=json.decoder.WHITESPACE.match):\n try:\n return super().decode(s, w)\n except TypeError:\n try:\n return dateutil.parser.isoparse(s)\n except Exception:\n return s\n\n\ndef date_to_iso(**g_dict):\n return flask.json.loads(flask.json.dumps(g_dict))\n # return flask.json.loads(flask.json.dumps(g_dict, cls=DatetimeEncoder))\n\n\ndef json_enc(**g_dict):\n logpp(g_dict, 'g_dict', log.debug)\n j = flask.json.htmlsafe_dumps(g_dict)\n log.debug(j)\n return j\n\n\n# Housekeeping\n\n\ndef wipe_cache():\n \"\"\"Delete all cached objects from the filesystem and corresponding\n information from the database.\n \"\"\"\n for p in (\n app.config['TMP_CACHE_ROOT'],\n app.config['CACHE_ROOT_DIR'],\n ):\n # p:pathlib.Path\n log.debug(f'Deleting dir tree: {p.as_posix()}')\n wipe_dir(p.resolve().absolute())\n\n log.debug(f'Deleting entities from database')\n dex.db.clear_entities()\n\n\ndef wipe_dir(p):\n \"\"\"Delete everything inside the dir, but does not delete the dir itself.\n\n It seems like this can be a major footgun some time in the future, when a bug causes\n us to feed \"/\" to this, and it does its best to delete the whole server. What sort\n of check can we add to make it safer?\n \"\"\"\n p: pathlib.Path\n log.debug(f'Entering dir: {p.as_posix()}')\n assert p.is_absolute()\n assert p.as_posix() not in ('', '/')\n for item in list(p.iterdir()):\n if item.is_dir():\n wipe_dir(item)\n else:\n item.unlink()\n\n\n# def _is_file_uri(uri):\n# return uri.startswith('file://')\n\n\n# def _is_url(uri):\n# return uri.startswith('http://') or uri.startswith('https://')\n\n\n# def _uri_to_path(uri):\n# \"\"\"Convert a file:// URI to a local path.\n#\n# Args:\n# uri: file:// URI\n#\n# Returns:\n# pathlib.Path()\n# \"\"\"\n# uri_tup = urllib.parse.urlparse(uri)\n# p = pathlib.Path(\n# uri_tup.netloc,\n# urllib.request.url2pathname(urllib.parse.unquote(uri_tup.path)),\n# ).resolve()\n# if not p.exists():\n# raise dex.exc.CacheError(f\"Invalid file URI: {uri}\")\n# return p\n\n\ndef dump_full_dataframe(csv_df):\n if not log.isEnabledFor(logging.DEBUG):\n return\n with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n # log.debug('Final DF passed to Pandas Profiling:')\n log.debug('DataFrame:')\n log.debug(csv_df)\n log.debug('Info:')\n info_buf = io.StringIO()\n csv_df.info(verbose=True, buf=info_buf)\n log.debug(info_buf.getvalue())\n"
] |
[
[
"pandas.read_csv"
],
[
"pandas.option_context"
]
] |
axiezai/complex_laplacian
|
[
"e84574a7d9c051a95b5d37aa398765aeb5f85fa4"
] |
[
"scripts/laplacian_pearson_basinhopping.py"
] |
[
"\"\"\"\nUse the basinhopping algorithm to find best alpha, speed, and frequency\nthat produces the best spatial correlation for a given canonical network\n\"\"\"\n\n# number stuff imports\nimport h5py\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import basinhopping\nfrom scipy.stats import pearsonr\nfrom sklearn.linear_model import LinearRegression\n\nimport sys\nimport os\nimport time\n\n# spectrome imports\nfrom spectrome.brain import Brain\nfrom spectrome.utils import functions, path\nfrom spectrome.forward import eigenmode\n\n# Limit number of threads\n# os.environ[\"OMP_NUM_THREADS\"] = \"2\"\n# os.environ[\"MKL_NUM_THREADS\"] = \"2\"\n# os.environ[\"NUMEXPR_NUM_THREADS\"] = \"2\"\n\n# hcp template connectome directory\nhcp_dir = \"../data\"\n\nHCP_brain = Brain.Brain()\nHCP_brain.add_connectome(hcp_dir)\nHCP_brain.reorder_connectome(HCP_brain.connectome, HCP_brain.distance_matrix)\nHCP_brain.bi_symmetric_c()\nHCP_brain.reduce_extreme_dir()\n\n# Load Pablo's Yeo 2017 canonical network maps\ncom_dk = np.load(\"../data/com_dk.npy\", allow_pickle=True).item()\nDK_df_normalized = pd.read_csv(\"../data/DK_dictionary_normalized.csv\").set_index(\n \"Unnamed: 0\"\n)\n\n# binarize:\nub, lb = 1, 0 # define binary boundaries\n\nDKfc_binarized = pd.DataFrame(\n [], index=DK_df_normalized.index, columns=DK_df_normalized.columns\n)\nfor name in DK_df_normalized.index:\n u = np.mean(np.nan_to_num(DK_df_normalized.loc[name].values))\n s = np.std(np.nan_to_num(DK_df_normalized.loc[name].values))\n threshold = u - s * 0.1\n DKfc_binarized.loc[name] = np.where(\n DK_df_normalized.loc[name].values > threshold, ub, lb\n )\n\n\ndef laplacian_corr(x, Brain, FC_networks, network_name):\n # start = time.time()\n # w = 2 * np.pi * x[0]\n\n # Laplacian, Brain already prep-ed with connectomes outside of function:\n Brain.decompose_complex_laplacian(alpha=x[0], k=x[1], num_ev=86)\n canon_network = np.nan_to_num(FC_networks.loc[network_name].values)\n\n # compute max correlation for optimization\n corrs = np.zeros([Brain.norm_eigenmodes.shape[1], 1])\n for e in np.arange(0, len(corrs)):\n corrs[e] = -pearsonr(np.squeeze(canon_network), Brain.norm_eigenmodes[:, e])[0]\n\n # end = time.time()\n # print(end - start)\n return np.min(corrs)\n\n\nclass BH_bounds(object):\n def __init__(self, xmax=[5, 600], xmin=[0, 0.1]):\n self.xmax = np.array(xmax)\n self.xmin = np.array(xmin)\n\n def __call__(self, **kwargs):\n x = kwargs[\"x_new\"]\n tmax = bool(np.all(x <= self.xmax))\n tmin = bool(np.all(x >= self.xmin))\n return tmax and tmin\n\n\nallx0 = np.array(\n [\n [0.5, 5],\n [1, 100],\n [0.8, 50],\n [0.8, 200],\n [0.5, 400],\n [3, 15],\n [5, 250],\n [2, 150],\n [2, 300],\n [1, 500],\n ]\n)\n\nbnds = BH_bounds()\nprint(\n \"Starting optimization for {} initial condition {}\".format(\n str(sys.argv[1]), str(sys.argv[2])\n )\n)\n\nopt_res = basinhopping(\n laplacian_corr,\n x0=allx0[int(sys.argv[2]), :],\n minimizer_kwargs={\"args\": (HCP_brain, DK_df_normalized, str(sys.argv[1]))},\n niter=1500,\n T=0.1,\n stepsize=2,\n accept_test=bnds,\n seed=24,\n niter_success=100,\n disp=True,\n)\n\nopt_alpha = opt_res[\"x\"][0]\nopt_phi = opt_res[\"x\"][1]\n\n# print('optimized output: {}'.format(opt_res))\n# Recreate the forward solution:\n# w_opt = 2 * np.pi * opt_freq\nHCP_brain.decompose_complex_laplacian(alpha=opt_alpha, k=opt_phi)\n\ncanon_network = np.nan_to_num(DK_df_normalized.loc[str(sys.argv[1])].values)\n# compute max correlation for optimization\ncorrs = np.squeeze(np.zeros([HCP_brain.norm_eigenmodes.shape[1], 1]))\nfor e in np.arange(0, len(corrs)):\n prcorr = pearsonr(np.squeeze(canon_network), HCP_brain.norm_eigenmodes[:, e])[\n 0\n ]\n corrs[e] = prcorr\n # print(prcorr)\n\nntw_opt_corr = np.round(corrs, 3)\nmax_opt_corr = np.max(ntw_opt_corr)\nordered_corr = np.argsort(-ntw_opt_corr)\n# print(ordered_corr)\nprint(\"basinhop:{}\".format(opt_res[\"fun\"]))\nprint(\"forward max:{}\".format(max_opt_corr))\nassert ntw_opt_corr[ordered_corr[1]] <= ntw_opt_corr[ordered_corr[0]]\nassert max_opt_corr == -np.round(opt_res[\"fun\"], 3)\n# Linear Regression for 10 K's and save in a dictionary:\n# K = 11\n# if str(sys.argv[3]) == 'dice':\n# # create empty list of dicts:\n# LinReg = []\n# keys = ['num','coef','r2score','ordereigs']\n# for k in np.arange(1,K):\n# selected_eigs = HCP_brain.norm_eigenmodes[:,ordered_dice[0:k]]\n# canon_network = np.nan_to_num(DK_df_normalized.loc[str(sys.argv[1])].values).reshape(-1,1)\n# regr = LinearRegression()\n# regr.fit(canon_network, selected_eigs)\n# c = regr.coef_\n# r2 = regr.score(canon_network, selected_eigs)\n# reg_results = {keys[0]:k, keys[1]:c, keys[2]:r2, keys[3]:ordered_dice[0:k]}\n# LinReg.append(reg_results)\n# print('For K = {}, chosen eigs: {}, coefficients: {} , residual error: {}'.format(k, ordered_dice[0:k], c, r2))\n\n# opt_res['LinRegResults'] = LinReg\n\n# file_name = str(sys.argv[1]) + str(sys.argv[2]) + \"_BH_dice.h5\"\n# file_path = os.path.join(hcp_dir, file_name)\n# path.save_hdf5(file_path, opt_res)\n# print(\"Optimal result: \" , opt_res['x'])\n# elif str(sys.argv[3]) == 'corr':\n# # create empty list of dicts:\n# LinReg = []\n# keys = ['num','coef','r2score','ordereigs']\n# for k in np.arange(1,K):\n# selected_eigs = HCP_brain.norm_eigenmodes[:,ordered_corr[0:k]]\n# canon_network = np.nan_to_num(DK_df_normalized.loc[str(sys.argv[1])].values).reshape(-1,1)\n# regr = LinearRegression()\n# regr.fit(canon_network, selected_eigs)\n# c = regr.coef_\n# r2 = regr.score(canon_network, selected_eigs)\n# reg_results = {keys[0]:k, keys[1]:c, keys[2]:r2, keys[3]:ordered_corr[0:k]}\n# LinReg.append(reg_results)\n# print('For K = {}, chosen eigs: {}, coefficients: {} , residual error: {}'.format(k, ordered_corr[0:k], c, r2))\n\n# opt_res['LinRegResults'] = LinReg\n\nfile_name = str(sys.argv[1]) + str(sys.argv[2]) + \"_BH_pearson.h5\"\nfile_path = os.path.join(hcp_dir, file_name)\npath.save_hdf5(file_path, opt_res)\nprint(\"Optimal result: \", opt_res[\"x\"])\n"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.nan_to_num",
"numpy.zeros",
"numpy.round",
"pandas.DataFrame",
"numpy.load",
"numpy.min",
"numpy.where",
"numpy.argsort",
"numpy.all",
"pandas.read_csv",
"numpy.squeeze"
]
] |
jerryuhoo/PaddleSpeech
|
[
"1eec7b5e042da294c7524af92f0fae4c32a71aa3",
"1eec7b5e042da294c7524af92f0fae4c32a71aa3"
] |
[
"paddlespeech/server/engine/tts/online/onnx/tts_engine.py",
"audio/tests/features/test_log_melspectrogram.py"
] |
[
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport base64\nimport math\nimport os\nimport time\nfrom typing import Optional\n\nimport numpy as np\nimport paddle\n\nfrom .pretrained_models import pretrained_models\nfrom paddlespeech.cli.log import logger\nfrom paddlespeech.cli.tts.infer import TTSExecutor\nfrom paddlespeech.server.engine.base_engine import BaseEngine\nfrom paddlespeech.server.utils.audio_process import float2pcm\nfrom paddlespeech.server.utils.onnx_infer import get_sess\nfrom paddlespeech.server.utils.util import denorm\nfrom paddlespeech.server.utils.util import get_chunks\nfrom paddlespeech.t2s.frontend import English\nfrom paddlespeech.t2s.frontend.zh_frontend import Frontend\n\n__all__ = ['TTSEngine']\n\n\nclass TTSServerExecutor(TTSExecutor):\n def __init__(self, am_block, am_pad, voc_block, voc_pad, voc_upsample):\n super().__init__()\n self.am_block = am_block\n self.am_pad = am_pad\n self.voc_block = voc_block\n self.voc_pad = voc_pad\n self.voc_upsample = voc_upsample\n\n self.pretrained_models = pretrained_models\n\n def _init_from_path(\n self,\n am: str='fastspeech2_csmsc_onnx',\n am_ckpt: Optional[list]=None,\n am_stat: Optional[os.PathLike]=None,\n phones_dict: Optional[os.PathLike]=None,\n tones_dict: Optional[os.PathLike]=None,\n speaker_dict: Optional[os.PathLike]=None,\n am_sample_rate: int=24000,\n am_sess_conf: dict=None,\n voc: str='mb_melgan_csmsc_onnx',\n voc_ckpt: Optional[os.PathLike]=None,\n voc_sample_rate: int=24000,\n voc_sess_conf: dict=None,\n lang: str='zh', ):\n \"\"\"\n Init model and other resources from a specific path.\n \"\"\"\n\n if (hasattr(self, 'am_sess') or\n (hasattr(self, 'am_encoder_infer_sess') and\n hasattr(self, 'am_decoder_sess') and hasattr(\n self, 'am_postnet_sess'))) and hasattr(self, 'voc_inference'):\n logger.info('Models had been initialized.')\n return\n # am\n am_tag = am + '-' + lang\n if am == \"fastspeech2_csmsc_onnx\":\n # get model info\n if am_ckpt is None or phones_dict is None:\n am_res_path = self._get_pretrained_path(am_tag)\n self.am_res_path = am_res_path\n self.am_ckpt = os.path.join(\n am_res_path, self.pretrained_models[am_tag]['ckpt'][0])\n # must have phones_dict in acoustic\n self.phones_dict = os.path.join(\n am_res_path, self.pretrained_models[am_tag]['phones_dict'])\n\n else:\n self.am_ckpt = os.path.abspath(am_ckpt[0])\n self.phones_dict = os.path.abspath(phones_dict)\n self.am_res_path = os.path.dirname(\n os.path.abspath(self.am_ckpt))\n\n # create am sess\n self.am_sess = get_sess(self.am_ckpt, am_sess_conf)\n\n elif am == \"fastspeech2_cnndecoder_csmsc_onnx\":\n if am_ckpt is None or am_stat is None or phones_dict is None:\n am_res_path = self._get_pretrained_path(am_tag)\n self.am_res_path = am_res_path\n self.am_encoder_infer = os.path.join(\n am_res_path, self.pretrained_models[am_tag]['ckpt'][0])\n self.am_decoder = os.path.join(\n am_res_path, self.pretrained_models[am_tag]['ckpt'][1])\n self.am_postnet = os.path.join(\n am_res_path, self.pretrained_models[am_tag]['ckpt'][2])\n # must have phones_dict in acoustic\n self.phones_dict = os.path.join(\n am_res_path, self.pretrained_models[am_tag]['phones_dict'])\n self.am_stat = os.path.join(\n am_res_path, self.pretrained_models[am_tag]['speech_stats'])\n\n else:\n self.am_encoder_infer = os.path.abspath(am_ckpt[0])\n self.am_decoder = os.path.abspath(am_ckpt[1])\n self.am_postnet = os.path.abspath(am_ckpt[2])\n self.phones_dict = os.path.abspath(phones_dict)\n self.am_stat = os.path.abspath(am_stat)\n self.am_res_path = os.path.dirname(\n os.path.abspath(self.am_ckpt))\n\n # create am sess\n self.am_encoder_infer_sess = get_sess(self.am_encoder_infer,\n am_sess_conf)\n self.am_decoder_sess = get_sess(self.am_decoder, am_sess_conf)\n self.am_postnet_sess = get_sess(self.am_postnet, am_sess_conf)\n\n self.am_mu, self.am_std = np.load(self.am_stat)\n\n logger.info(f\"self.phones_dict: {self.phones_dict}\")\n logger.info(f\"am model dir: {self.am_res_path}\")\n logger.info(\"Create am sess successfully.\")\n\n # voc model info\n voc_tag = voc + '-' + lang\n if voc_ckpt is None:\n voc_res_path = self._get_pretrained_path(voc_tag)\n self.voc_res_path = voc_res_path\n self.voc_ckpt = os.path.join(\n voc_res_path, self.pretrained_models[voc_tag]['ckpt'])\n else:\n self.voc_ckpt = os.path.abspath(voc_ckpt)\n self.voc_res_path = os.path.dirname(os.path.abspath(self.voc_ckpt))\n logger.info(self.voc_res_path)\n\n # create voc sess\n self.voc_sess = get_sess(self.voc_ckpt, voc_sess_conf)\n logger.info(\"Create voc sess successfully.\")\n\n with open(self.phones_dict, \"r\") as f:\n phn_id = [line.strip().split() for line in f.readlines()]\n self.vocab_size = len(phn_id)\n logger.info(f\"vocab_size: {self.vocab_size}\")\n\n # frontend\n self.tones_dict = None\n if lang == 'zh':\n self.frontend = Frontend(\n phone_vocab_path=self.phones_dict,\n tone_vocab_path=self.tones_dict)\n\n elif lang == 'en':\n self.frontend = English(phone_vocab_path=self.phones_dict)\n logger.info(\"frontend done!\")\n\n def depadding(self, data, chunk_num, chunk_id, block, pad, upsample):\n \"\"\" \n Streaming inference removes the result of pad inference\n \"\"\"\n front_pad = min(chunk_id * block, pad)\n # first chunk\n if chunk_id == 0:\n data = data[:block * upsample]\n # last chunk\n elif chunk_id == chunk_num - 1:\n data = data[front_pad * upsample:]\n # middle chunk\n else:\n data = data[front_pad * upsample:(front_pad + block) * upsample]\n\n return data\n\n @paddle.no_grad()\n def infer(\n self,\n text: str,\n lang: str='zh',\n am: str='fastspeech2_csmsc_onnx',\n spk_id: int=0, ):\n \"\"\"\n Model inference and result stored in self.output.\n \"\"\"\n\n am_block = self.am_block\n am_pad = self.am_pad\n am_upsample = 1\n voc_block = self.voc_block\n voc_pad = self.voc_pad\n voc_upsample = self.voc_upsample\n # first_flag 用于标记首包\n first_flag = 1\n get_tone_ids = False\n merge_sentences = False\n\n # front \n frontend_st = time.time()\n if lang == 'zh':\n input_ids = self.frontend.get_input_ids(\n text,\n merge_sentences=merge_sentences,\n get_tone_ids=get_tone_ids)\n phone_ids = input_ids[\"phone_ids\"]\n if get_tone_ids:\n tone_ids = input_ids[\"tone_ids\"]\n elif lang == 'en':\n input_ids = self.frontend.get_input_ids(\n text, merge_sentences=merge_sentences)\n phone_ids = input_ids[\"phone_ids\"]\n else:\n logger.error(\"lang should in {'zh', 'en'}!\")\n frontend_et = time.time()\n self.frontend_time = frontend_et - frontend_st\n\n for i in range(len(phone_ids)):\n part_phone_ids = phone_ids[i].numpy()\n voc_chunk_id = 0\n\n # fastspeech2_csmsc\n if am == \"fastspeech2_csmsc_onnx\":\n # am \n mel = self.am_sess.run(\n output_names=None, input_feed={'text': part_phone_ids})\n mel = mel[0]\n if first_flag == 1:\n first_am_et = time.time()\n self.first_am_infer = first_am_et - frontend_et\n\n # voc streaming\n mel_chunks = get_chunks(mel, voc_block, voc_pad, \"voc\")\n voc_chunk_num = len(mel_chunks)\n voc_st = time.time()\n for i, mel_chunk in enumerate(mel_chunks):\n sub_wav = self.voc_sess.run(\n output_names=None, input_feed={'logmel': mel_chunk})\n sub_wav = self.depadding(sub_wav[0], voc_chunk_num, i,\n voc_block, voc_pad, voc_upsample)\n if first_flag == 1:\n first_voc_et = time.time()\n self.first_voc_infer = first_voc_et - first_am_et\n self.first_response_time = first_voc_et - frontend_st\n first_flag = 0\n\n yield sub_wav\n\n # fastspeech2_cnndecoder_csmsc \n elif am == \"fastspeech2_cnndecoder_csmsc_onnx\":\n # am \n orig_hs = self.am_encoder_infer_sess.run(\n None, input_feed={'text': part_phone_ids})\n orig_hs = orig_hs[0]\n\n # streaming voc chunk info\n mel_len = orig_hs.shape[1]\n voc_chunk_num = math.ceil(mel_len / self.voc_block)\n start = 0\n end = min(self.voc_block + self.voc_pad, mel_len)\n\n # streaming am\n hss = get_chunks(orig_hs, self.am_block, self.am_pad, \"am\")\n am_chunk_num = len(hss)\n for i, hs in enumerate(hss):\n am_decoder_output = self.am_decoder_sess.run(\n None, input_feed={'xs': hs})\n am_postnet_output = self.am_postnet_sess.run(\n None,\n input_feed={\n 'xs': np.transpose(am_decoder_output[0], (0, 2, 1))\n })\n am_output_data = am_decoder_output + np.transpose(\n am_postnet_output[0], (0, 2, 1))\n normalized_mel = am_output_data[0][0]\n\n sub_mel = denorm(normalized_mel, self.am_mu, self.am_std)\n sub_mel = self.depadding(sub_mel, am_chunk_num, i, am_block,\n am_pad, am_upsample)\n\n if i == 0:\n mel_streaming = sub_mel\n else:\n mel_streaming = np.concatenate(\n (mel_streaming, sub_mel), axis=0)\n\n # streaming voc\n # 当流式AM推理的mel帧数大于流式voc推理的chunk size,开始进行流式voc 推理\n while (mel_streaming.shape[0] >= end and\n voc_chunk_id < voc_chunk_num):\n if first_flag == 1:\n first_am_et = time.time()\n self.first_am_infer = first_am_et - frontend_et\n voc_chunk = mel_streaming[start:end, :]\n\n sub_wav = self.voc_sess.run(\n output_names=None, input_feed={'logmel': voc_chunk})\n sub_wav = self.depadding(sub_wav[0], voc_chunk_num,\n voc_chunk_id, voc_block,\n voc_pad, voc_upsample)\n if first_flag == 1:\n first_voc_et = time.time()\n self.first_voc_infer = first_voc_et - first_am_et\n self.first_response_time = first_voc_et - frontend_st\n first_flag = 0\n\n yield sub_wav\n\n voc_chunk_id += 1\n start = max(0, voc_chunk_id * voc_block - voc_pad)\n end = min((voc_chunk_id + 1) * voc_block + voc_pad,\n mel_len)\n\n else:\n logger.error(\n \"Only support fastspeech2_csmsc or fastspeech2_cnndecoder_csmsc on streaming tts.\"\n )\n\n self.final_response_time = time.time() - frontend_st\n\n\nclass TTSEngine(BaseEngine):\n \"\"\"TTS server engine\n\n Args:\n metaclass: Defaults to Singleton.\n \"\"\"\n\n def __init__(self, name=None):\n \"\"\"Initialize TTS server engine\n \"\"\"\n super().__init__()\n\n def init(self, config: dict) -> bool:\n self.config = config\n assert (\n self.config.am == \"fastspeech2_csmsc_onnx\" or\n self.config.am == \"fastspeech2_cnndecoder_csmsc_onnx\"\n ) and (\n self.config.voc == \"hifigan_csmsc_onnx\" or\n self.config.voc == \"mb_melgan_csmsc_onnx\"\n ), 'Please check config, am support: fastspeech2, voc support: hifigan_csmsc-zh or mb_melgan_csmsc.'\n\n assert (\n self.config.voc_block > 0 and self.config.voc_pad > 0\n ), \"Please set correct voc_block and voc_pad, they should be more than 0.\"\n\n assert (\n self.config.voc_sample_rate == self.config.am_sample_rate\n ), \"The sample rate of AM and Vocoder model are different, please check model.\"\n\n self.executor = TTSServerExecutor(\n self.config.am_block, self.config.am_pad, self.config.voc_block,\n self.config.voc_pad, self.config.voc_upsample)\n\n try:\n if self.config.am_sess_conf.device is not None:\n self.device = self.config.am_sess_conf.device\n elif self.config.voc_sess_conf.device is not None:\n self.device = self.config.voc_sess_conf.device\n else:\n self.device = paddle.get_device()\n paddle.set_device(self.device)\n except BaseException as e:\n logger.error(\n \"Set device failed, please check if device is already used and the parameter 'device' in the yaml file\"\n )\n logger.error(\"Initialize TTS server engine Failed on device: %s.\" %\n (self.device))\n return False\n\n try:\n self.executor._init_from_path(\n am=self.config.am,\n am_ckpt=self.config.am_ckpt,\n am_stat=self.config.am_stat,\n phones_dict=self.config.phones_dict,\n tones_dict=self.config.tones_dict,\n speaker_dict=self.config.speaker_dict,\n am_sample_rate=self.config.am_sample_rate,\n am_sess_conf=self.config.am_sess_conf,\n voc=self.config.voc,\n voc_ckpt=self.config.voc_ckpt,\n voc_sample_rate=self.config.voc_sample_rate,\n voc_sess_conf=self.config.voc_sess_conf,\n lang=self.config.lang)\n\n except Exception as e:\n logger.error(\"Failed to get model related files.\")\n logger.error(\"Initialize TTS server engine Failed on device: %s.\" %\n (self.config.voc_sess_conf.device))\n return False\n\n # warm up\n try:\n self.warm_up()\n logger.info(\"Warm up successfully.\")\n except Exception as e:\n logger.error(\"Failed to warm up on tts engine.\")\n return False\n\n logger.info(\"Initialize TTS server engine successfully on device: %s.\" %\n (self.config.voc_sess_conf.device))\n\n return True\n\n def warm_up(self):\n \"\"\"warm up\n \"\"\"\n if self.config.lang == 'zh':\n sentence = \"您好,欢迎使用语音合成服务。\"\n if self.config.lang == 'en':\n sentence = \"Hello and welcome to the speech synthesis service.\"\n logger.info(\"Start to warm up.\")\n for i in range(3):\n for wav in self.executor.infer(\n text=sentence,\n lang=self.config.lang,\n am=self.config.am,\n spk_id=0, ):\n logger.info(\n f\"The first response time of the {i} warm up: {self.executor.first_response_time} s\"\n )\n break\n\n def preprocess(self, text_bese64: str=None, text_bytes: bytes=None):\n # Convert byte to text\n if text_bese64:\n text_bytes = base64.b64decode(text_bese64) # base64 to bytes\n text = text_bytes.decode('utf-8') # bytes to text\n\n return text\n\n def run(self,\n sentence: str,\n spk_id: int=0,\n speed: float=1.0,\n volume: float=1.0,\n sample_rate: int=0,\n save_path: str=None):\n \"\"\" run include inference and postprocess.\n\n Args:\n sentence (str): text to be synthesized\n spk_id (int, optional): speaker id for multi-speaker speech synthesis. Defaults to 0.\n speed (float, optional): speed. Defaults to 1.0.\n volume (float, optional): volume. Defaults to 1.0.\n sample_rate (int, optional): target sample rate for synthesized audio, \n 0 means the same as the model sampling rate. Defaults to 0.\n save_path (str, optional): The save path of the synthesized audio. \n None means do not save audio. Defaults to None.\n\n Returns:\n wav_base64: The base64 format of the synthesized audio.\n \"\"\"\n wav_list = []\n\n for wav in self.executor.infer(\n text=sentence,\n lang=self.config.lang,\n am=self.config.am,\n spk_id=spk_id, ):\n\n # wav type: <class 'numpy.ndarray'> float32, convert to pcm (base64)\n wav = float2pcm(wav) # float32 to int16\n wav_bytes = wav.tobytes() # to bytes\n wav_base64 = base64.b64encode(wav_bytes).decode('utf8') # to base64\n wav_list.append(wav)\n\n yield wav_base64\n\n wav_all = np.concatenate(wav_list, axis=0)\n duration = len(wav_all) / self.config.voc_sample_rate\n logger.info(f\"sentence: {sentence}\")\n logger.info(f\"The durations of audio is: {duration} s\")\n logger.info(\n f\"first response time: {self.executor.first_response_time} s\")\n logger.info(\n f\"final response time: {self.executor.final_response_time} s\")\n logger.info(f\"RTF: {self.executor.final_response_time / duration}\")\n logger.info(\n f\"Other info: front time: {self.executor.frontend_time} s, first am infer time: {self.executor.first_am_infer} s, first voc infer time: {self.executor.first_voc_infer} s,\"\n )\n",
"# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport unittest\n\nimport numpy as np\nimport paddle\nimport paddleaudio\n\nfrom .base import FeatTest\nfrom paddlespeech.s2t.transform.spectrogram import LogMelSpectrogram\n\n\nclass TestLogMelSpectrogram(FeatTest):\n def initParmas(self):\n self.n_fft = 512\n self.hop_length = 128\n self.n_mels = 40\n\n def test_log_melspect(self):\n ps_melspect = LogMelSpectrogram(self.sr, self.n_mels, self.n_fft,\n self.hop_length)\n ps_res = ps_melspect(self.waveform.T).squeeze(1).T\n\n x = paddle.to_tensor(self.waveform)\n # paddlespeech.s2t的特征存在幅度谱和功率谱滥用的情况\n ps_melspect = paddleaudio.features.LogMelSpectrogram(\n self.sr,\n self.n_fft,\n self.hop_length,\n power=1.0,\n n_mels=self.n_mels,\n f_min=0.0)\n pa_res = (ps_melspect(x) / 10.0).squeeze(0).numpy()\n\n np.testing.assert_array_almost_equal(ps_res, pa_res, decimal=5)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.concatenate",
"numpy.load",
"numpy.transpose"
],
[
"numpy.testing.assert_array_almost_equal"
]
] |
spalato/spectro_tools
|
[
"977dc35dfec35ffbc75cacecdeeda9ed4fdbf0cd"
] |
[
"scratch/vibronic.py"
] |
[
"import numpy as np\nfrom scipy.special import assoc_laguerre, factorial\nfrom lmfit.lineshapes import voigt\nimport matplotlib.pyplot as plt\n\n# Normally, all these functions are in a module called `spectro.utils`. \n# I would only perform the following import:\n# from spectro.utils import vibronic_emission\n\n\n# These functions nromally are in a module. As such, I made them a bit \"safer\"\n# by performing checks and conversions on the arguments, as well as some\n# basic asserts. This is not strictly necessary.\n# The overall function (vibronic_emission) is broken up in many small steps.\n# This looks a bit silly, but I ended up needing the separate bits at various\n# points. Breaking things up in very small functions is usually good practice.\ndef fc_factor(m, n, s):\n \"\"\"\n Franck-Condon overlap between states with identical vibrational frequency.\n \n Parameters\n ----------\n m : positive int\n Vibrational quantum number in initial state\n n : positive int\n Vibrational quantum number in final state\n S : positive float\n Huang Rhys parameter\n \"\"\"\n n = np.asarray(n, dtype='float64')\n m = np.asarray(m, dtype='float64')\n s = np.asarray(s, dtype='float64')\n n, m = np.meshgrid(n, m)\n # swap n, m such that n>=m. Otherwise our assoc_laguerre spits a nan.\n d = n-m\n n_ = np.where(n>=m, n, m)\n m_ = np.where(n>=m, m, n)\n\n lag = assoc_laguerre(s, m_, np.abs(d))\n f = factorial(m_)/factorial(n_)\n assert np.all(f>0)\n return np.exp(-s)*np.power(s,np.abs(d))*f*lag*lag\n\n\ndef vibronic_intensity(m, n, s, e_vib, kt=0):\n \"\"\"\n Intensity of a Franck-Condon transition\n\n Parameters\n ----------\n m : array-like, int\n Vibrational quantum number in the initial manifold\n n : array-like, int\n Vibrational quantum number in the final manifold\n s : float\n Huang-Rhys factor S\n e_vib : float\n Vibrational energy\n kt : float\n Thermal energy\n\n Returns\n -------\n intensities: array-like, float\n Intensity of the vibrational bands\n \"\"\"\n # compute boltzmann factors\n boltz_f = np.exp(-m * e_vib / kt) if kt > 0 else [1]\n boltz_f /= np.sum(boltz_f)\n # FC factors\n fcf = fc_factor(m, n, s)\n fcf *= boltz_f[:, np.newaxis]\n return fcf\n\n\ndef vibronic_ls(x, s, sigma, gamma, e_vib, kt=0, n_max=None, m_max=None):\n \"\"\"\n Produce a vibronic (Frank-Condom) lineshape.\n \n The vibronic transition amplitude computed relative to 0 (ie: relative to \n the electronic transition energy). Lines are broadened using a voigt\n profile.\n \n Parameters\n ----------\n x : np.ndarray\n Energy values. x==0 is the 0->0 line (no vibrational quanta change)\n s : float\n Huang-Rhys parameter S\n e_vib : float\n Energy of a vibrational quanta\n sigma : float\n Width (1/e^2) of gaussian component\n gamma : float\n Width of Lorententzian component\n kt : float\n Thermal energy. If >0, will compute transitions from vibrationally\n excited states. Default 0.\n n_max : int\n Largest vibrational number in final manifold. If not supplied, a guess \n is provided, but may not be adequate.\n m_max : int\n Largest vibrational number in orginal manifold. If not supplied, a guess\n is provided, but may not be adequate.\n \"\"\"\n #determine n, m, values\n if m_max is None:\n m_max = 0 if kt==0 else int(kt/e_vib*10) # found that factor with my thumb\n if n_max is None:\n n_max = m_max + int(10*s)\n n = np.arange(n_max+1)\n m = np.arange(m_max+1)\n fcf = vibronic_intensity(m, n, s, e_vib, kt)\n n, m = np.meshgrid(n, m)\n dvib = n-m\n y = np.zeros_like(x)\n for d, f in zip(dvib.flatten(), fcf.flatten()):\n y += voigt(x, f, d*e_vib, sigma, gamma)\n return y\n\n\ndef vibronic_emission(x, amp, x0, s, sigma, gamma, e_vib, kt=0, **kw):\n \"\"\"\n Produce a vibronic (Frank-Condom) lineshape.\n \n The vibronic emission lineshape. Lines are broadened using a voigt profile.\n \n Parameters\n ----------\n x : np.ndarray\n Energy values.\n amp : float\n Transition amplitude.\n x0 : float\n Electronic transition energy. (zero-phonon line)\n s : float\n Huang-Rhys parameter S\n e_vib : float\n Energy of a vibrational quanta\n sigma : float\n Width (1/e^2) of gaussian component\n gamma : float\n Width of Lorententzian component\n kt : float\n Thermal energy. If >0, will compute transitions from vibrationally\n excited states. Default 0.\n n_max : int\n Largest vibrational number in final manifold. If not supplied, a guess \n is provided, but may not be adequate.\n m_max : int\n Largest vibrational number in orginal manifold. If not supplied, a guess\n is provided, but may not be adequate.\n \"\"\"\n return amp*vibronic_ls(-x+x0, s, sigma, gamma, e_vib, kt=kt, **kw)\n\n\nx = np.linspace(1.8, 2.5, 1000)\ne0 = 2.17\ns = 0.5\nsigma = 0.01\ngamma = 0.001\ne_vib = 0.07\ny1 = vibronic_emission(x, 1, e0, s, sigma, gamma, e_vib, 0)\ny2 = vibronic_emission(x, 1, e0, s, sigma, gamma, e_vib, 0.025)\ny3 = vibronic_emission(x, 1, e0, s, sigma, gamma, e_vib, 0.2)\nplt.figure()\nplt.plot(x, y1, label=\"kT=0\")\nplt.plot(x, y2, label=\"kT=RT\")\nplt.plot(x, y3, label=\"kT=200 meV\")\nplt.legend()\nplt.savefig(\"fc_emission.png\", dpi=150)\n"
] |
[
[
"numpy.zeros_like",
"numpy.asarray",
"matplotlib.pyplot.savefig",
"numpy.sum",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.exp",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.arange",
"scipy.special.factorial",
"numpy.abs",
"numpy.all",
"numpy.linspace",
"numpy.meshgrid"
]
] |
Nickfagiano/SLM-Lab
|
[
"dddff5e1ce7454c33b45a803dd0c1d8e133c4508",
"dddff5e1ce7454c33b45a803dd0c1d8e133c4508"
] |
[
"slm_lab/agent/net/net_util.py",
"test/conftest.py"
] |
[
"from functools import partial, wraps\nfrom slm_lab.lib import logger, optimizer, util\nimport os\nimport pydash as ps\nimport torch\nimport torch.nn as nn\n\nlogger = logger.get_logger(__name__)\n\n# register custom torch.optim\nsetattr(torch.optim, 'GlobalAdam', optimizer.GlobalAdam)\nsetattr(torch.optim, 'GlobalRMSprop', optimizer.GlobalRMSprop)\nsetattr(torch.optim, 'Lookahead', optimizer.Lookahead)\nsetattr(torch.optim, 'RAdam', optimizer.RAdam)\n\n\nclass NoOpLRScheduler:\n '''Symbolic LRScheduler class for API consistency'''\n\n def __init__(self, optim):\n self.optim = optim\n\n def step(self, epoch=None):\n pass\n\n def get_lr(self):\n if hasattr(self.optim, 'defaults'):\n return self.optim.defaults['lr']\n else: # TODO retrieve lr more generally\n return self.optim.param_groups[0]['lr']\n\n\ndef build_fc_model(dims, activation=None):\n '''Build a full-connected model by interleaving nn.Linear and activation_fn'''\n assert len(dims) >= 2, 'dims need to at least contain input, output'\n # shift dims and make pairs of (in, out) dims per layer\n dim_pairs = list(zip(dims[:-1], dims[1:]))\n layers = []\n for in_d, out_d in dim_pairs:\n layers.append(nn.Linear(in_d, out_d))\n if activation is not None:\n layers.append(get_activation_fn(activation))\n model = nn.Sequential(*layers)\n return model\n\n\ndef get_nn_name(uncased_name):\n '''Helper to get the proper name in PyTorch nn given a case-insensitive name'''\n for nn_name in nn.__dict__:\n if uncased_name.lower() == nn_name.lower():\n return nn_name\n raise ValueError(f'Name {uncased_name} not found in {nn.__dict__}')\n\n\ndef get_activation_fn(activation):\n '''Helper to generate activation function layers for net'''\n ActivationClass = getattr(nn, get_nn_name(activation))\n return ActivationClass()\n\n\ndef get_loss_fn(cls, loss_spec):\n '''Helper to parse loss param and construct loss_fn for net'''\n LossClass = getattr(nn, get_nn_name(loss_spec['name']))\n loss_spec = ps.omit(loss_spec, 'name')\n loss_fn = LossClass(**loss_spec)\n return loss_fn\n\n\ndef get_lr_scheduler(optim, lr_scheduler_spec):\n '''Helper to parse lr_scheduler param and construct Pytorch optim.lr_scheduler'''\n if ps.is_empty(lr_scheduler_spec):\n lr_scheduler = NoOpLRScheduler(optim)\n elif lr_scheduler_spec['name'] == 'LinearToZero':\n LRSchedulerClass = getattr(torch.optim.lr_scheduler, 'LambdaLR')\n frame = float(lr_scheduler_spec['frame'])\n lr_scheduler = LRSchedulerClass(optim, lr_lambda=lambda x: 1 - x / frame)\n else:\n LRSchedulerClass = getattr(torch.optim.lr_scheduler, lr_scheduler_spec['name'])\n lr_scheduler_spec = ps.omit(lr_scheduler_spec, 'name')\n lr_scheduler = LRSchedulerClass(optim, **lr_scheduler_spec)\n return lr_scheduler\n\n\ndef get_optim(net, optim_spec):\n '''Helper to parse optim param and construct optim for net'''\n OptimClass = getattr(torch.optim, optim_spec['name'])\n optim_spec = ps.omit(optim_spec, 'name')\n if torch.is_tensor(net): # for non-net tensor variable\n optim = OptimClass([net], **optim_spec)\n else:\n optim = OptimClass(net.parameters(), **optim_spec)\n return optim\n\n\ndef get_policy_out_dim(body):\n '''Helper method to construct the policy network out_dim for a body according to is_discrete, action_type'''\n action_dim = body.action_dim\n if body.is_discrete:\n if body.action_type == 'multi_discrete':\n assert ps.is_list(action_dim), action_dim\n policy_out_dim = action_dim\n else:\n assert ps.is_integer(action_dim), action_dim\n policy_out_dim = action_dim\n else:\n assert ps.is_integer(action_dim), action_dim\n if action_dim == 1: # single action, use [loc, scale]\n policy_out_dim = 2\n else: # multi-action, use [locs], [scales]\n policy_out_dim = [action_dim, action_dim]\n return policy_out_dim\n\n\ndef get_out_dim(body, add_critic=False):\n '''Construct the NetClass out_dim for a body according to is_discrete, action_type, and whether to add a critic unit'''\n policy_out_dim = get_policy_out_dim(body)\n if add_critic:\n if ps.is_list(policy_out_dim):\n out_dim = policy_out_dim + [1]\n else:\n out_dim = [policy_out_dim, 1]\n else:\n out_dim = policy_out_dim\n return out_dim\n\n\ndef init_layers(net, init_fn_name):\n '''Primary method to initialize the weights of the layers of a network'''\n if init_fn_name is None:\n return\n\n # get nonlinearity\n nonlinearity = get_nn_name(net.hid_layers_activation).lower()\n if nonlinearity == 'leakyrelu':\n nonlinearity = 'leaky_relu' # guard name\n\n # get init_fn and add arguments depending on nonlinearity\n init_fn = getattr(nn.init, init_fn_name)\n if 'kaiming' in init_fn_name: # has 'nonlinearity' as arg\n assert nonlinearity in ['relu', 'leaky_relu'], f'Kaiming initialization not supported for {nonlinearity}'\n init_fn = partial(init_fn, nonlinearity=nonlinearity)\n elif 'orthogonal' in init_fn_name or 'xavier' in init_fn_name: # has 'gain' as arg\n gain = nn.init.calculate_gain(nonlinearity)\n init_fn = partial(init_fn, gain=gain)\n else:\n pass\n\n # finally, apply init_params to each layer in its modules\n net.apply(partial(init_params, init_fn=init_fn))\n\n\ndef init_params(module, init_fn):\n '''Initialize module's weights using init_fn, and biases to 0.0'''\n bias_init = 0.0\n classname = util.get_class_name(module)\n if 'Net' in classname: # skip if it's a net, not pytorch layer\n pass\n elif classname == 'BatchNorm2d':\n pass # can't init BatchNorm2d\n elif any(k in classname for k in ('Conv', 'Linear')):\n init_fn(module.weight)\n nn.init.constant_(module.bias, bias_init)\n elif 'GRU' in classname:\n for name, param in module.named_parameters():\n if 'weight' in name:\n init_fn(param)\n elif 'bias' in name:\n nn.init.constant_(param, bias_init)\n else:\n pass\n\n\n# params methods\n\n\ndef save(net, model_path):\n '''Save model weights to path'''\n torch.save(net.state_dict(), util.smart_path(model_path))\n\n\ndef save_algorithm(algorithm, ckpt=None):\n '''Save all the nets for an algorithm'''\n agent = algorithm.agent\n net_names = algorithm.net_names\n model_prepath = agent.spec['meta']['model_prepath']\n if ckpt is not None:\n model_prepath += f'_ckpt-{ckpt}'\n for net_name in net_names:\n net = getattr(algorithm, net_name)\n model_path = f'{model_prepath}_{net_name}_model.pt'\n save(net, model_path)\n optim_name = net_name.replace('net', 'optim')\n optim = getattr(algorithm, optim_name, None)\n if optim is not None: # only trainable net has optim\n optim_path = f'{model_prepath}_{net_name}_optim.pt'\n save(optim, optim_path)\n logger.debug(f'Saved algorithm {util.get_class_name(algorithm)} nets {net_names} to {model_prepath}_*.pt')\n\n\ndef load(net, model_path):\n '''Save model weights from a path into a net module'''\n device = None if torch.cuda.is_available() else 'cpu'\n net.load_state_dict(torch.load(util.smart_path(model_path), map_location=device))\n\n\ndef load_algorithm(algorithm):\n '''Save all the nets for an algorithm'''\n agent = algorithm.agent\n net_names = algorithm.net_names\n model_prepath = agent.spec['meta']['model_prepath']\n if util.get_lab_mode() == 'enjoy':\n model_prepath += '_ckpt-best'\n logger.info(f'Loading algorithm {util.get_class_name(algorithm)} nets {net_names} from {model_prepath}_*.pt')\n for net_name in net_names:\n net = getattr(algorithm, net_name)\n model_path = f'{model_prepath}_{net_name}_model.pt'\n load(net, model_path)\n optim_name = net_name.replace('net', 'optim')\n optim = getattr(algorithm, optim_name, None)\n if optim is not None: # only trainable net has optim\n optim_path = f'{model_prepath}_{net_name}_optim.pt'\n load(optim, optim_path)\n\n\ndef copy(src_net, tar_net):\n '''Copy model weights from src to target'''\n tar_net.load_state_dict(src_net.state_dict())\n\n\ndef polyak_update(src_net, tar_net, old_ratio=0.5):\n '''\n Polyak weight update to update a target tar_net, retain old weights by its ratio, i.e.\n target <- old_ratio * source + (1 - old_ratio) * target\n '''\n for src_param, tar_param in zip(src_net.parameters(), tar_net.parameters()):\n tar_param.data.copy_(old_ratio * src_param.data + (1.0 - old_ratio) * tar_param.data)\n\n\ndef to_check_train_step():\n '''Condition for running assert_trained'''\n return os.environ.get('PY_ENV') == 'test' or util.get_lab_mode() == 'dev'\n\n\ndef dev_check_train_step(fn):\n '''\n Decorator to check if net.train_step actually updates the network weights properly\n Triggers only if to_check_train_step is True (dev/test mode)\n @example\n\n @net_util.dev_check_train_step\n def train_step(self, ...):\n ...\n '''\n @wraps(fn)\n def check_fn(*args, **kwargs):\n if not to_check_train_step():\n return fn(*args, **kwargs)\n\n net = args[0] # first arg self\n # get pre-update parameters to compare\n pre_params = [param.clone() for param in net.parameters()]\n\n # run train_step, get loss\n loss = fn(*args, **kwargs)\n assert not torch.isnan(loss).any(), loss\n\n # get post-update parameters to compare\n post_params = [param.clone() for param in net.parameters()]\n if loss == 0.0:\n # if loss is 0, there should be no updates\n # TODO if without momentum, parameters should not change too\n for p_name, param in net.named_parameters():\n assert param.grad.norm() == 0\n else:\n # check parameter updates\n try:\n assert not all(torch.equal(w1, w2) for w1, w2 in zip(pre_params, post_params)), f'Model parameter is not updated in train_step(), check if your tensor is detached from graph. Loss: {loss:g}'\n except Exception as e:\n logger.error(e)\n if os.environ.get('PY_ENV') == 'test':\n # raise error if in unit test\n raise(e)\n\n # check grad norms\n min_norm, max_norm = 0.0, 1e5\n for p_name, param in net.named_parameters():\n try:\n grad_norm = param.grad.norm()\n assert min_norm < grad_norm < max_norm, f'Gradient norm for {p_name} is {grad_norm:g}, fails the extreme value check {min_norm} < grad_norm < {max_norm}. Loss: {loss:g}. Check your network and loss computation.'\n except Exception as e:\n logger.warning(e)\n logger.debug('Passed network parameter update check.')\n # store grad norms for debugging\n net.store_grad_norms()\n return loss\n return check_fn\n\n\ndef get_grad_norms(algorithm):\n '''Gather all the net's grad norms of an algorithm for debugging'''\n grad_norms = []\n for net_name in algorithm.net_names:\n net = getattr(algorithm, net_name)\n if net.grad_norms is not None:\n grad_norms.extend(net.grad_norms)\n return grad_norms\n\n\ndef init_global_nets(algorithm):\n '''\n Initialize global_nets for Hogwild using an identical instance of an algorithm from an isolated Session\n in spec.meta.distributed, specify either:\n - 'shared': global network parameter is shared all the time. In this mode, algorithm local network will be replaced directly by global_net via overriding by identify attribute name\n - 'synced': global network parameter is periodically synced to local network after each gradient push. In this mode, algorithm will keep a separate reference to `global_{net}` for each of its network\n '''\n dist_mode = algorithm.agent.spec['meta']['distributed']\n assert dist_mode in ('shared', 'synced'), f'Unrecognized distributed mode'\n global_nets = {}\n for net_name in algorithm.net_names:\n optim_name = net_name.replace('net', 'optim')\n if not hasattr(algorithm, optim_name): # only for trainable network, i.e. has an optim\n continue\n g_net = getattr(algorithm, net_name)\n g_net.share_memory() # make net global\n if dist_mode == 'shared': # use the same name to override the local net\n global_nets[net_name] = g_net\n else: # keep a separate reference for syncing\n global_nets[f'global_{net_name}'] = g_net\n # if optim is Global, set to override the local optim and its scheduler\n optim = getattr(algorithm, optim_name)\n if hasattr(optim, 'share_memory'):\n optim.share_memory() # make optim global\n global_nets[optim_name] = optim\n if hasattr(optim, 'optimizer'): # for Lookahead with an inner optimizer\n global_nets[f'{optim_name}_optimizer'] = optim.optimizer\n lr_scheduler_name = net_name.replace('net', 'lr_scheduler')\n lr_scheduler = getattr(algorithm, lr_scheduler_name)\n global_nets[lr_scheduler_name] = lr_scheduler\n logger.info(f'Initialized global_nets attr {list(global_nets.keys())} for Hogwild')\n return global_nets\n\n\ndef set_global_nets(algorithm, global_nets):\n '''For Hogwild, set attr built in init_global_nets above. Use in algorithm init.'''\n # set attr first so algorithm always has self.global_{net} to pass into train_step\n for net_name in algorithm.net_names:\n setattr(algorithm, f'global_{net_name}', None)\n # set attr created in init_global_nets\n if global_nets is not None:\n # handle inner-optimizer recovery\n inner_opt_keys = [k for k in global_nets if k.endswith('_optimizer')]\n for inner_opt_key in inner_opt_keys:\n opt = global_nets[inner_opt_key.replace('_optimizer', '')] # optimizer which has a inner optimizer\n setattr(opt, 'optimizer', global_nets.pop(inner_opt_key))\n # set global nets and optims\n util.set_attr(algorithm, global_nets)\n logger.info(f'Set global_nets attr {list(global_nets.keys())} for Hogwild')\n\n\ndef push_global_grads(net, global_net):\n '''Push gradients to global_net, call inside train_step between loss.backward() and optim.step()'''\n for param, global_param in zip(net.parameters(), global_net.parameters()):\n if global_param.grad is not None:\n return # quick skip\n global_param._grad = param.grad\n",
"from slm_lab.experiment.control import make_agent_env\nfrom slm_lab.spec import spec_util\nimport numpy as np\nimport pandas as pd\nimport pytest\n\n\n@pytest.fixture(scope='session')\ndef test_spec():\n spec = spec_util.get('experimental/misc/base.json', 'base_case_openai')\n spec_util.tick(spec, 'trial')\n spec = spec_util.override_spec(spec, 'test')\n return spec\n\n\n@pytest.fixture\ndef test_df():\n data = pd.DataFrame({\n 'integer': [1, 2, 3],\n 'square': [1, 4, 9],\n 'letter': ['a', 'b', 'c'],\n })\n assert isinstance(data, pd.DataFrame)\n return data\n\n\n@pytest.fixture\ndef test_dict():\n data = {\n 'a': 1,\n 'b': 2,\n 'c': 3,\n }\n assert isinstance(data, dict)\n return data\n\n\n@pytest.fixture\ndef test_list():\n data = [1, 2, 3]\n assert isinstance(data, list)\n return data\n\n\n@pytest.fixture\ndef test_obj():\n class Foo:\n bar = 'bar'\n return Foo()\n\n\n@pytest.fixture\ndef test_str():\n data = 'lorem ipsum dolor'\n assert isinstance(data, str)\n return data\n\n\n@pytest.fixture(scope='session', params=[\n (\n 2,\n [\n [np.asarray([1, 1, 1, 1]), 1, 1, np.asarray([2, 2, 2, 2]), 1],\n [np.asarray([2, 2, 2, 2]), 1, 2, np.asarray([3, 3, 3, 3]), 2],\n [np.asarray([3, 3, 3, 3]), 1, 3, np.asarray([4, 4, 4, 4]), 3],\n [np.asarray([4, 4, 4, 4]), 1, 4, np.asarray([5, 5, 5, 5]), 4],\n [np.asarray([5, 5, 5, 5]), 1, 5, np.asarray([6, 6, 6, 6]), 5],\n [np.asarray([6, 6, 6, 6]), 1, 6, np.asarray([7, 7, 7, 7]), 6],\n [np.asarray([7, 7, 7, 7]), 1, 7, np.asarray([8, 8, 8, 8]), 7],\n [np.asarray([8, 8, 8, 8]), 1, 8, np.asarray([9, 9, 9, 9]), 8],\n ]\n ),\n])\ndef test_memory(request):\n spec = spec_util.get('experimental/misc/base.json', 'base_memory')\n spec_util.tick(spec, 'trial')\n agent, env = make_agent_env(spec)\n res = (agent.body.memory, ) + request.param\n return res\n\n\n@pytest.fixture(scope='session', params=[\n (\n 2,\n [\n [np.asarray([1, 1, 1, 1]), 1, 1, np.asarray([2, 2, 2, 2]), 0],\n [np.asarray([2, 2, 2, 2]), 1, 2, np.asarray([3, 3, 3, 3]), 0],\n [np.asarray([3, 3, 3, 3]), 1, 3, np.asarray([4, 4, 4, 4]), 0],\n [np.asarray([4, 4, 4, 4]), 1, 4, np.asarray([5, 5, 5, 5]), 0],\n [np.asarray([5, 5, 5, 5]), 1, 5, np.asarray([6, 6, 6, 6]), 0],\n [np.asarray([6, 6, 6, 6]), 1, 6, np.asarray([7, 7, 7, 7]), 0],\n [np.asarray([7, 7, 7, 7]), 1, 7, np.asarray([8, 8, 8, 8]), 0],\n [np.asarray([8, 8, 8, 8]), 1, 8, np.asarray([9, 9, 9, 9]), 1],\n ]\n ),\n])\ndef test_on_policy_episodic_memory(request):\n spec = spec_util.get('experimental/misc/base.json', 'base_on_policy_memory')\n spec_util.tick(spec, 'trial')\n agent, env = make_agent_env(spec)\n res = (agent.body.memory, ) + request.param\n return res\n\n\n@pytest.fixture(scope='session', params=[\n (\n 4,\n [\n [np.asarray([1, 1, 1, 1]), 1, 1, np.asarray([2, 2, 2, 2]), 0],\n [np.asarray([2, 2, 2, 2]), 1, 2, np.asarray([3, 3, 3, 3]), 0],\n [np.asarray([3, 3, 3, 3]), 1, 3, np.asarray([4, 4, 4, 4]), 0],\n [np.asarray([4, 4, 4, 4]), 1, 4, np.asarray([5, 5, 5, 5]), 0],\n [np.asarray([5, 5, 5, 5]), 1, 5, np.asarray([6, 6, 6, 6]), 0],\n [np.asarray([6, 6, 6, 6]), 1, 6, np.asarray([7, 7, 7, 7]), 0],\n [np.asarray([7, 7, 7, 7]), 1, 7, np.asarray([8, 8, 8, 8]), 0],\n [np.asarray([8, 8, 8, 8]), 1, 8, np.asarray([9, 9, 9, 9]), 1],\n ]\n ),\n])\ndef test_on_policy_batch_memory(request):\n spec = spec_util.get('experimental/misc/base.json', 'base_on_policy_batch_memory')\n spec_util.tick(spec, 'trial')\n agent, env = make_agent_env(spec)\n res = (agent.body.memory, ) + request.param\n return res\n\n\n@pytest.fixture(scope='session', params=[\n (\n 4,\n [\n [np.asarray([1, 1, 1, 1]), 1, 1, np.asarray([2, 2, 2, 2]), 0, 1000],\n [np.asarray([2, 2, 2, 2]), 1, 2, np.asarray([3, 3, 3, 3]), 0, 0],\n [np.asarray([3, 3, 3, 3]), 1, 3, np.asarray([4, 4, 4, 4]), 0, 0],\n [np.asarray([4, 4, 4, 4]), 1, 4, np.asarray([5, 5, 5, 5]), 0, 0],\n [np.asarray([5, 5, 5, 5]), 1, 5, np.asarray([6, 6, 6, 6]), 0, 1000],\n [np.asarray([6, 6, 6, 6]), 1, 6, np.asarray([7, 7, 7, 7]), 0, 0],\n [np.asarray([7, 7, 7, 7]), 1, 7, np.asarray([8, 8, 8, 8]), 0, 0],\n [np.asarray([8, 8, 8, 8]), 1, 8, np.asarray([9, 9, 9, 9]), 1, 1000],\n ]\n ),\n])\ndef test_prioritized_replay_memory(request):\n spec = spec_util.get('experimental/misc/base.json', 'base_prioritized_replay_memory')\n spec_util.tick(spec, 'trial')\n agent, env = make_agent_env(spec)\n res = (agent.body.memory, ) + request.param\n return res\n"
] |
[
[
"torch.nn.Linear",
"torch.isnan",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.is_tensor",
"torch.cuda.is_available",
"torch.nn.init.calculate_gain",
"torch.equal"
],
[
"pandas.DataFrame",
"numpy.asarray"
]
] |
yoyonel/DailyCodingProblem
|
[
"5fb1e844fa975ebc0bd0e2818ea20ed5696170fa"
] |
[
"src/coding_interview_preparation/backtracking_algorithm_n_queens/app.py"
] |
[
"\"\"\"\n\n\"\"\"\nfrom functools import partial\nfrom itertools import permutations\nfrom matplotlib import pyplot\nimport time\nimport timeit\nfrom typing import Iterator, Tuple\n\n\ndef handle_return(generator, func):\n \"\"\"\n https://stackoverflow.com/questions/34073370/best-way-to-receive-the-return-value-from-a-python-generator\n https://www.python.org/dev/peps/pep-0380/\n\n :param generator:\n :param func:\n :return:\n \"\"\"\n returned = yield from generator\n func(returned)\n\n\ndef wrapper_timeit(method):\n def timed(*args, **kw):\n ts = time.time()\n result = method(*args, **kw)\n te = time.time()\n if 'log_time' in kw:\n name = kw.get('log_name', method.__name__.upper())\n kw['log_time'][name] = int((te - ts) * 1000)\n else:\n print(f'{method.__name__} {(te - ts) * 1000} ms')\n return result\n return timed\n\n\ndef find_queens_placements_with_permutations(n: int) -> Iterator[Tuple[int, int]]:\n \"\"\"\n https://en.wikipedia.org/wiki/Eight_queens_puzzle\n\n (Naive) Solution with permutations and generator\n\n :param n:\n :return:\n \"\"\"\n metric_ops = 0\n\n # loop on all permutations\n # and filter them\n for p_q in permutations(range(n), n):\n on_diag = False\n for q_col, q_row in enumerate(p_q[:-1]):\n # loop through the rest of queens (next columns)\n for o_q_col, o_q_row in enumerate(p_q[q_col + 1:], start=1):\n # on diag of 'root' queen (q_col, q_row) ?\n on_diag = (q_row - o_q_col) == o_q_row or (q_row + o_q_col) == o_q_row\n metric_ops += 1\n if on_diag:\n break\n # if one other queen in opposition with the root queen, abort the mission :p\n if on_diag:\n break\n # if no queens on oppositions return/send this queens placement\n if not on_diag:\n yield p_q\n\n return metric_ops\n\n\ndef find_queens_placements_with_backtracking(n: int):\n \"\"\"\n\n :param n:\n :return:\n \"\"\"\n metrics = {'nb_ops' : 0}\n\n queens = []\n all_queens_placements = []\n\n def isSafe(row, col) -> bool:\n for r, c in queens:\n if r == row:\n return False\n if abs(r - row) == abs(c - col):\n return False\n return True\n\n def placeQueens(col: int):\n if col >= n:\n all_queens_placements.append(queens.copy())\n return False\n row = 0\n while row < n:\n metrics['nb_ops'] += 1\n if isSafe(row, col):\n queens.append([row, col])\n if placeQueens(col+1):\n return True\n queens.pop()\n row += 1\n return False\n\n placeQueens(0)\n for queens_placement in all_queens_placements:\n yield queens_placement\n\n return metrics\n\n\ndef plotTC(fn, nMin, nMax, nInc, nTests, label=\"\"):\n \"\"\"\n Run timer and plot time complexity\n \"\"\"\n x = []\n y = []\n for i in range(nMin, nMax, nInc):\n N = i\n testNTimer = timeit.Timer(partial(fn, N))\n t = testNTimer.timeit(number=nTests)\n x.append(i)\n y.append(t)\n return pyplot.plot(x, y, 'o', label=label)\n\n\ndef main():\n def show_return(value):\n print(f'[METRIC] Number of operations: {value}')\n\n n = 10\n\n @wrapper_timeit\n def compute_with_permutations():\n gen_queens_placements = handle_return(find_queens_placements_with_permutations(n), show_return)\n for id_solution, queens_placement in enumerate(gen_queens_placements, 1):\n # print(f\"#{id_solution}: {queens_placement}\")\n pass\n print(f\"Nb queens placements found with n={n} => {id_solution}\")\n\n print(\"Compute with permutations\")\n compute_with_permutations()\n\n print()\n\n @wrapper_timeit\n def compute_with_backtracking():\n gen_queens_placements = handle_return(find_queens_placements_with_backtracking(n), show_return)\n for id_solution, queens_placement in enumerate(gen_queens_placements, 1):\n # print(f\"#{id_solution}: {[q[0] for q in queens_placement]}\")\n pass\n print(f\"Nb queens placements found with n={n} => {id_solution}\")\n\n print(\"Compute with backtracking\")\n compute_with_backtracking()\n\n # nTests = 10\n # testNTimer = timeit.Timer(partial(lambda n: list(find_queens_placements_with_backtracking(n)), n))\n # t = testNTimer.timeit(number=nTests)\n # print(t)\n\n # p1 = plotTC(lambda n: list(find_queens_placements_with_backtracking(n)), 2, n, 1, 10, label=\"with backtracking\")\n # p2 = plotTC(lambda n: list(find_queens_placements_with_permutations(n)), 2, n, 1, 10, label=\"with permutations\")\n # pyplot.legend()\n\n pyplot.show()\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot"
]
] |
Aethiles/ppo-pytorch
|
[
"b3fb6bdb466056cf84115ca7b0af21d2b48185ae"
] |
[
"test/understanding/test_batches.py"
] |
[
"import numpy as np\nimport torch.utils.data.sampler as samplers\nimport unittest\n\n\nclass BatchTest(unittest.TestCase):\n def test_different_batches(self):\n mini_batch_size = 3\n sampler = samplers.BatchSampler(sampler=samplers.SubsetRandomSampler(range(12)),\n batch_size=mini_batch_size,\n drop_last=True)\n runs = []\n for i in range(100):\n indices = []\n for batch in sampler:\n indices.append(batch)\n runs.append(indices)\n for i, j in zip(range(100), range(100)):\n if i != j:\n self.assertFalse(np.array_equal(runs[i], runs[j]))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.array_equal"
]
] |
SuperSaiyan-God/PhotoMath
|
[
"58f7d88dde5f1e0ccc3cabc2499520d08705c734"
] |
[
"object_detection/xml_to_csv.py"
] |
[
"import os\nimport glob\nimport pandas as pd\nimport xml.etree.ElementTree as ET\n\n\ndef xml_to_csv(path):\n xml_list = []\n for xml_file in glob.glob(path + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n for member in root.findall('object'):\n value = (root.find('filename').text,\n int(root.find('size')[0].text),\n int(root.find('size')[1].text),\n member[0].text,\n int(member[4][0].text),\n int(member[4][1].text),\n int(member[4][2].text),\n int(member[4][3].text)\n )\n xml_list.append(value)\n column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']\n xml_df = pd.DataFrame(xml_list, columns=column_name)\n return xml_df\n\n\ndef main():\n for directory in ['train', 'test']:\n image_path = os.path.join(os.getcwd(), 'images/{}'.format(directory))\n xml_df = xml_to_csv(image_path)\n xml_df.to_csv('data/{}_labels.csv'.format(directory), index=None)\n print('Successfully converted xml to csv.')\n\n\nmain()"
] |
[
[
"pandas.DataFrame"
]
] |
shikanggao/meta-dataset
|
[
"7b1e99009516eda3bbd5e740e178ebc37e2d6767"
] |
[
"meta_dataset/models/functional_backbones.py"
] |
[
"# coding=utf-8\n# Copyright 2021 The Meta-Dataset Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python2, python3\n\"\"\"Backbone-related code.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom __future__ import print_function\n\nimport collections\nimport functools\nfrom absl import logging\nimport gin.tf\nfrom six.moves import range\nfrom six.moves import zip\nimport tensorflow.compat.v1 as tf\n\nOPTIMIZER_KEYWORDS = ('adam', 'power', 'global_step',\n 'data_dependent_init_done')\nEMBEDDING_KEYWORDS = ('conv', 'resnet', 'fully_connected')\nHEAD_CLASSIFIER_KEYWORDS = ('fc',)\n\n\ndef is_backbone_variable(variable_name, only_if=lambda x: True):\n \"\"\"Returns True if `variable_name` refers to a backbone variable.\n\n Args:\n variable_name: A string; the name of a `tf.Variable` that will be checked to\n determine whether the variable belongs to the backbone (embedding\n function) of a `Learner`.\n only_if: A callable that returns `True` when the name of a `tf.Variable`\n satisfies some condition; by default `only_if` returns `True` for any\n argument.\n\n Returns:\n `True` if the `tf.Variable` to which `variable_name` refers belongs to a\n backbone (embedding function) and `only_if(variable_name)` is also\n satisfied.\n \"\"\"\n variable_name = variable_name.lower()\n\n # We restore all embedding variables.\n is_embedding_var = any(\n keyword in variable_name for keyword in EMBEDDING_KEYWORDS)\n\n # We exclude all head classifier variables.\n is_head_classifier_var = any(\n keyword in variable_name for keyword in HEAD_CLASSIFIER_KEYWORDS)\n\n # We exclude 'relation*' variables as they are not present in a pretrained\n # checkpoint.\n is_relationnet_var = variable_name.startswith('relation')\n\n # We exclude optimizer variables, as the episodic finetuning procedure is a\n # different optimization problem than the original training objective.\n is_optimizer_var = any(\n keyword in variable_name for keyword in OPTIMIZER_KEYWORDS)\n\n return (only_if(variable_name) and is_embedding_var and\n not is_head_classifier_var and not is_relationnet_var and\n not is_optimizer_var)\n\n\ndef conv2d(x, w, stride=1, b=None, padding='SAME', rate=1):\n \"\"\"conv2d returns a 2d convolution layer with full stride.\"\"\"\n h = tf.nn.conv2d(\n x,\n w,\n strides=[1, stride, stride, 1],\n padding=padding,\n dilations=[1, rate, rate, 1])\n if b is not None:\n h += b\n return h\n\n\ndef relu(x, use_bounded_activation=False):\n if use_bounded_activation:\n return tf.nn.relu6(x)\n else:\n return tf.nn.relu(x)\n\n\n# pylint: disable=g-wrong-blank-lines\n# TODO(tylerzhu): Accumulate batch norm statistics (moving {var, mean})\n# during training and use them during testing. However need to be careful\n# about leaking information across episodes.\n# Note: we should use ema object to accumulate the statistics for compatibility\n# with TF Eager.\n@gin.configurable('bn', allowlist=['use_ema', 'ema_epsilon'])\ndef bn(x,\n params=None,\n moments=None,\n backprop_through_moments=True,\n use_ema=False,\n is_training=True,\n ema_epsilon=.9):\n \"\"\"Batch normalization.\n\n The usage should be as follows: If x is the support images, moments should be\n None so that they are computed from the support set examples. On the other\n hand, if x is the query images, the moments argument should be used in order\n to pass in the mean and var that were computed from the support set.\n\n Args:\n x: inputs.\n params: None or a dict containing the values of the offset and scale params.\n moments: None or a dict containing the values of the mean and var to use for\n batch normalization.\n backprop_through_moments: Whether to allow gradients to flow through the\n given support set moments. Only applies to non-transductive batch norm.\n use_ema: apply moving averages of batch norm statistics, or update them,\n depending on whether we are training or testing. Note that passing\n moments will override this setting, and result in neither updating or\n using ema statistics. This is important to make sure that episodic\n learners don't update ema statistics a second time when processing\n queries.\n is_training: if use_ema=True, this determines whether to apply the moving\n averages, or update them.\n ema_epsilon: if updating moving averages, use this value for the exponential\n moving averages.\n\n Returns:\n output: The result of applying batch normalization to the input.\n params: The updated params.\n moments: The updated moments.\n \"\"\"\n params_keys, params_vars, moments_keys, moments_vars = [], [], [], []\n\n with tf.variable_scope('batch_norm'):\n scope_name = tf.get_variable_scope().name\n\n if use_ema:\n ema_shape = [1, 1, 1, x.get_shape().as_list()[-1]]\n mean_ema = tf.get_variable(\n 'mean_ema',\n shape=ema_shape,\n initializer=tf.initializers.zeros(),\n trainable=False)\n var_ema = tf.get_variable(\n 'var_ema',\n shape=ema_shape,\n initializer=tf.initializers.ones(),\n trainable=False)\n\n if moments is not None:\n if backprop_through_moments:\n mean = moments[scope_name + '/mean']\n var = moments[scope_name + '/var']\n else:\n # This variant does not yield good resutls.\n mean = tf.stop_gradient(moments[scope_name + '/mean'])\n var = tf.stop_gradient(moments[scope_name + '/var'])\n elif use_ema and not is_training:\n mean = mean_ema\n var = var_ema\n else:\n # If not provided, compute the mean and var of the current batch.\n\n replica_ctx = tf.distribute.get_replica_context()\n if replica_ctx:\n # from tensorflow/python/keras/layers/normalization/batch_normalization.py # pylint: disable=line-too-long\n axes = list(range(len(x.shape) - 1))\n local_sum = tf.reduce_sum(x, axis=axes, keepdims=True)\n local_squared_sum = tf.reduce_sum(\n tf.square(x), axis=axes, keepdims=True)\n batch_size = tf.cast(tf.shape(x)[0], tf.float32)\n x_sum, x_squared_sum, global_batch_size = (\n replica_ctx.all_reduce('sum',\n [local_sum, local_squared_sum, batch_size]))\n\n axes_vals = [(tf.shape(x))[i] for i in range(1, len(axes))]\n multiplier = tf.cast(tf.reduce_prod(axes_vals), tf.float32)\n multiplier = multiplier * global_batch_size\n\n mean = x_sum / multiplier\n x_squared_mean = x_squared_sum / multiplier\n # var = E(x^2) - E(x)^2\n var = x_squared_mean - tf.square(mean)\n else:\n mean, var = tf.nn.moments(\n x, axes=list(range(len(x.shape) - 1)), keep_dims=True)\n\n # Only update ema's if training and we computed the moments in the current\n # call. Note: at test time for episodic learners, ema's may be passed\n # from the support set to the query set, even if it's not really needed.\n if use_ema and is_training and moments is None:\n replica_ctx = tf.distribute.get_replica_context()\n mean_upd = tf.assign(mean_ema,\n mean_ema * ema_epsilon + mean * (1.0 - ema_epsilon))\n var_upd = tf.assign(var_ema,\n var_ema * ema_epsilon + var * (1.0 - ema_epsilon))\n updates = tf.group([mean_upd, var_upd])\n if replica_ctx:\n tf.add_to_collection(\n tf.GraphKeys.UPDATE_OPS,\n tf.cond(\n tf.equal(replica_ctx.replica_id_in_sync_group, 0),\n lambda: updates, tf.no_op))\n else:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, updates)\n\n moments_keys += [scope_name + '/mean']\n moments_vars += [mean]\n moments_keys += [scope_name + '/var']\n moments_vars += [var]\n\n if params is None:\n offset = tf.get_variable(\n 'offset',\n shape=mean.get_shape().as_list(),\n initializer=tf.initializers.zeros())\n scale = tf.get_variable(\n 'scale',\n shape=var.get_shape().as_list(),\n initializer=tf.initializers.ones())\n else:\n offset = params[scope_name + '/offset']\n scale = params[scope_name + '/scale']\n\n params_keys += [scope_name + '/offset']\n params_vars += [offset]\n params_keys += [scope_name + '/scale']\n params_vars += [scale]\n\n output = tf.nn.batch_normalization(x, mean, var, offset, scale, 0.00001)\n\n params = collections.OrderedDict(zip(params_keys, params_vars))\n moments = collections.OrderedDict(zip(moments_keys, moments_vars))\n\n return output, params, moments\n\n\ndef weight_variable(shape, weight_decay):\n \"\"\"weight_variable generates a weight variable of a given shape.\"\"\"\n initial = tf.initializers.truncated_normal(stddev=0.1)\n return tf.get_variable(\n 'weight',\n shape=shape,\n initializer=initial,\n regularizer=tf.keras.regularizers.L2(weight_decay))\n\n\ndef bias_variable(shape):\n \"\"\"bias_variable generates a bias variable of a given shape.\"\"\"\n initial = tf.initializers.constant(0.1)\n return tf.get_variable('bias', shape=shape, initializer=initial)\n\n\ndef dense(x, output_size, weight_decay, activation_fn=tf.nn.relu, params=None):\n \"\"\"Fully connected layer implementation.\n\n Args:\n x: tf.Tensor, input.\n output_size: int, number features in the fully connected layer.\n weight_decay: float, scaling constant for L2 weight decay on weight\n variables.\n activation_fn: function, to process pre-activations, namely x*w+b.\n params: None or a dict containing the values of the weight and bias params.\n If None, default variables are used.\n\n Returns:\n output: The result of applying batch normalization to the input.\n params: dict, that includes parameters used during the calculation.\n \"\"\"\n with tf.variable_scope('dense'):\n scope_name = tf.get_variable_scope().name\n\n if len(x.shape) > 2:\n x = tf.layers.flatten(x),\n input_size = x.get_shape().as_list()[-1]\n\n w_name = scope_name + '/kernel'\n b_name = scope_name + '/bias'\n if params is None:\n w = weight_variable([input_size, output_size], weight_decay)\n b = bias_variable([output_size])\n else:\n w = params[w_name]\n b = params[b_name]\n\n x = tf.nn.xw_plus_b(x, w, b)\n params = collections.OrderedDict(zip([w_name, b_name], [w, b]))\n x = activation_fn(x)\n return x, params\n\n\ndef conv(x,\n conv_size,\n depth,\n stride,\n weight_decay,\n padding='SAME',\n rate=1,\n params=None):\n \"\"\"A block that performs convolution.\"\"\"\n params_keys, params_vars = [], []\n scope_name = tf.get_variable_scope().name\n input_depth = x.get_shape().as_list()[-1]\n if params is None:\n w_conv = weight_variable([conv_size[0], conv_size[1], input_depth, depth],\n weight_decay)\n else:\n w_conv = params[scope_name + '/kernel']\n\n params_keys += [scope_name + '/kernel']\n params_vars += [w_conv]\n\n x = conv2d(x, w_conv, stride=stride, padding=padding, rate=rate)\n params = collections.OrderedDict(zip(params_keys, params_vars))\n\n return x, params\n\n\nALLOWLIST = ['batch_norm_fn']\n\n\n@gin.configurable('bn_wrapper', allowlist=ALLOWLIST)\ndef _bn_wrapper(\n x,\n batch_norm_fn=bn,\n params=None,\n moments=None,\n is_training=True,\n backprop_through_moments=True):\n \"\"\"Returns the result of batch normalization.\"\"\"\n return batch_norm_fn(\n x,\n params=params,\n moments=moments,\n is_training=is_training,\n backprop_through_moments=backprop_through_moments)\n\n\ndef conv_bn(\n x,\n conv_size,\n depth,\n stride,\n weight_decay,\n padding='SAME',\n params=None,\n moments=None,\n is_training=True,\n rate=1,\n backprop_through_moments=True,\n):\n \"\"\"A block that performs convolution, followed by batch-norm.\"\"\"\n params_keys, params_vars = [], []\n moments_keys, moments_vars = [], []\n x, conv_params = conv(\n x,\n conv_size,\n depth,\n stride,\n weight_decay,\n padding=padding,\n params=params,\n rate=rate)\n params_keys.extend(conv_params.keys())\n params_vars.extend(conv_params.values())\n\n x, bn_params, bn_moments = _bn_wrapper(\n x,\n params=params,\n moments=moments,\n is_training=is_training,\n backprop_through_moments=backprop_through_moments,\n )\n params_keys.extend(bn_params.keys())\n params_vars.extend(bn_params.values())\n moments_keys.extend(bn_moments.keys())\n moments_vars.extend(bn_moments.values())\n\n params = collections.OrderedDict(zip(params_keys, params_vars))\n moments = collections.OrderedDict(zip(moments_keys, moments_vars))\n\n return x, params, moments\n\n\ndef bottleneck(\n x,\n depth,\n stride,\n weight_decay,\n params=None,\n moments=None,\n use_project=False,\n backprop_through_moments=True,\n is_training=True,\n input_rate=1,\n output_rate=1,\n use_bounded_activation=False,\n):\n \"\"\"ResNet18 residual block.\"\"\"\n params_keys, params_vars = [], []\n moments_keys, moments_vars = [], [] # means and vars of different layers.\n with tf.variable_scope('conv1'):\n h, conv_bn_params, conv_bn_moments = conv_bn(\n x,\n [3, 3],\n depth[0],\n stride,\n weight_decay,\n params=params,\n moments=moments,\n is_training=is_training,\n rate=input_rate,\n backprop_through_moments=backprop_through_moments,\n )\n params_keys.extend(conv_bn_params.keys())\n params_vars.extend(conv_bn_params.values())\n moments_keys.extend(conv_bn_moments.keys())\n moments_vars.extend(conv_bn_moments.values())\n\n h = relu(h, use_bounded_activation=use_bounded_activation)\n\n with tf.variable_scope('conv2'):\n h, conv_bn_params, conv_bn_moments = conv_bn(\n h,\n [3, 3],\n depth[1],\n stride=1,\n weight_decay=weight_decay,\n params=params,\n moments=moments,\n is_training=is_training,\n rate=output_rate,\n backprop_through_moments=backprop_through_moments,\n )\n if use_bounded_activation:\n h = tf.clip_by_value(h, -6.0, 6.0)\n\n params_keys.extend(conv_bn_params.keys())\n params_vars.extend(conv_bn_params.values())\n moments_keys.extend(conv_bn_moments.keys())\n moments_vars.extend(conv_bn_moments.values())\n\n with tf.variable_scope('identity'):\n if use_project:\n with tf.variable_scope('projection_conv'):\n x, conv_bn_params, conv_bn_moments = conv_bn(\n x,\n [1, 1],\n depth[1],\n stride,\n weight_decay,\n params=params,\n moments=moments,\n is_training=is_training,\n rate=1,\n backprop_through_moments=backprop_through_moments,\n )\n params_keys.extend(conv_bn_params.keys())\n params_vars.extend(conv_bn_params.values())\n moments_keys.extend(conv_bn_moments.keys())\n moments_vars.extend(conv_bn_moments.values())\n x = relu(x + h, use_bounded_activation=use_bounded_activation)\n\n params = collections.OrderedDict(zip(params_keys, params_vars))\n moments = collections.OrderedDict(zip(moments_keys, moments_vars))\n return x, params, moments\n\n\ndef _resnet(\n x,\n is_training,\n weight_decay,\n scope,\n reuse=tf.AUTO_REUSE,\n params=None,\n moments=None,\n backprop_through_moments=True,\n use_bounded_activation=False,\n blocks=(2, 2, 2, 2),\n max_stride=None,\n deeplab_alignment=True,\n keep_spatial_dims=False,\n):\n \"\"\"A ResNet network; ResNet18 by default.\"\"\"\n x = tf.stop_gradient(x)\n params_keys, params_vars = [], []\n moments_keys, moments_vars = [], []\n assert max_stride in [None, 4, 8, 16,\n 32], 'max_stride must be 4, 8, 16, 32, or None'\n with tf.variable_scope(scope, reuse=reuse):\n # We use DeepLab feature alignment rule to determine the input size.\n # Since the image size in the meta-dataset pipeline is a multiplier of 42,\n # e.g., [42, 84, 168], we align them to the closest sizes that conform to\n # the alignment rule and at the same time are larger. They are [65, 97, 193]\n # respectively. The aligned image size for 224 used in the ResNet work is\n # 225.\n #\n # References:\n # 1. ResNet https://arxiv.org/abs/1512.03385\n # 2. DeepLab https://arxiv.org/abs/1606.00915\n if deeplab_alignment:\n size = tf.cast(tf.shape(x)[1], tf.float32)\n aligned_size = tf.cast(tf.ceil(size / 32.0), tf.int32) * 32 + 1\n x = tf.image.resize_bilinear(\n x, size=[aligned_size, aligned_size], align_corners=True)\n\n with tf.variable_scope('conv1'):\n x, conv_bn_params, conv_bn_moments = conv_bn(\n x,\n [7, 7],\n 64,\n 2,\n weight_decay,\n params=params,\n moments=moments,\n is_training=is_training,\n backprop_through_moments=backprop_through_moments,\n )\n params_keys.extend(conv_bn_params.keys())\n params_vars.extend(conv_bn_params.values())\n moments_keys.extend(conv_bn_moments.keys())\n moments_vars.extend(conv_bn_moments.values())\n\n x = relu(x, use_bounded_activation=use_bounded_activation)\n\n def _bottleneck(x,\n i,\n depth,\n stride,\n params,\n moments,\n net_stride=1,\n net_rate=1):\n \"\"\"Wrapper for bottleneck.\"\"\"\n input_rate = net_rate\n output_rate = input_rate\n if i == 0:\n if max_stride and stride * net_stride > max_stride:\n output_stride = 1\n output_rate *= stride\n else:\n output_stride = stride\n else:\n output_stride = 1\n use_project = True if i == 0 else False\n\n x, bottleneck_params, bottleneck_moments = bottleneck(\n x,\n (depth, depth),\n output_stride,\n weight_decay,\n params=params,\n moments=moments,\n input_rate=input_rate,\n output_rate=output_rate,\n use_project=use_project,\n is_training=is_training,\n backprop_through_moments=backprop_through_moments,\n )\n net_stride *= output_stride\n return x, bottleneck_params, bottleneck_moments, net_stride, output_rate\n\n net_stride = 4\n net_rate = 1\n\n with tf.variable_scope('conv2_x'):\n x = tf.nn.max_pool(\n x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')\n for i in range(blocks[0]):\n with tf.variable_scope('bottleneck_%d' % i):\n x, bottleneck_params, bottleneck_moments, net_stride, net_rate = _bottleneck(\n x, i, 64, 1, params, moments, net_stride, net_rate)\n params_keys.extend(bottleneck_params.keys())\n params_vars.extend(bottleneck_params.values())\n moments_keys.extend(bottleneck_moments.keys())\n moments_vars.extend(bottleneck_moments.values())\n\n with tf.variable_scope('conv3_x'):\n for i in range(blocks[1]):\n with tf.variable_scope('bottleneck_%d' % i):\n x, bottleneck_params, bottleneck_moments, net_stride, net_rate = _bottleneck(\n x, i, 128, 2, params, moments, net_stride, net_rate)\n params_keys.extend(bottleneck_params.keys())\n params_vars.extend(bottleneck_params.values())\n moments_keys.extend(bottleneck_moments.keys())\n moments_vars.extend(bottleneck_moments.values())\n\n with tf.variable_scope('conv4_x'):\n for i in range(blocks[2]):\n with tf.variable_scope('bottleneck_%d' % i):\n x, bottleneck_params, bottleneck_moments, net_stride, net_rate = _bottleneck(\n x, i, 256, 2, params, moments, net_stride, net_rate)\n params_keys.extend(bottleneck_params.keys())\n params_vars.extend(bottleneck_params.values())\n moments_keys.extend(bottleneck_moments.keys())\n moments_vars.extend(bottleneck_moments.values())\n\n with tf.variable_scope('conv5_x'):\n for i in range(blocks[3]):\n with tf.variable_scope('bottleneck_%d' % i):\n x, bottleneck_params, bottleneck_moments, net_stride, net_rate = _bottleneck(\n x, i, 512, 2, params, moments, net_stride, net_rate)\n params_keys.extend(bottleneck_params.keys())\n params_vars.extend(bottleneck_params.values())\n moments_keys.extend(bottleneck_moments.keys())\n moments_vars.extend(bottleneck_moments.values())\n if not keep_spatial_dims:\n # x.shape: [?, 1, 1, 512]\n x = tf.reduce_mean(x, axis=[1, 2], keepdims=True)\n x = tf.reshape(x, [-1, 512])\n params = collections.OrderedDict(zip(params_keys, params_vars))\n moments = collections.OrderedDict(zip(moments_keys, moments_vars))\n\n return_dict = {'embeddings': x, 'params': params, 'moments': moments}\n return return_dict\n\n\n@gin.configurable(\n 'resnet', allowlist=['weight_decay', 'max_stride', 'deeplab_alignment'])\ndef resnet(x,\n is_training,\n weight_decay,\n params=None,\n moments=None,\n reuse=tf.AUTO_REUSE,\n scope='resnet18',\n backprop_through_moments=True,\n use_bounded_activation=False,\n max_stride=None,\n deeplab_alignment=True,\n keep_spatial_dims=False):\n \"\"\"ResNet18 embedding function.\"\"\"\n return _resnet(\n x,\n is_training,\n weight_decay,\n scope,\n reuse=reuse,\n params=params,\n moments=moments,\n backprop_through_moments=backprop_through_moments,\n use_bounded_activation=use_bounded_activation,\n blocks=(2, 2, 2, 2),\n max_stride=max_stride,\n deeplab_alignment=deeplab_alignment,\n keep_spatial_dims=keep_spatial_dims)\n\n\n\n\n@gin.configurable(\n 'resnet34', allowlist=['weight_decay', 'max_stride', 'deeplab_alignment'])\ndef resnet34(x,\n is_training,\n weight_decay,\n params=None,\n moments=None,\n reuse=tf.AUTO_REUSE,\n scope='resnet34',\n backprop_through_moments=True,\n use_bounded_activation=False,\n max_stride=None,\n deeplab_alignment=True,\n keep_spatial_dims=False):\n \"\"\"ResNet34 embedding function.\"\"\"\n return _resnet(\n x,\n is_training,\n weight_decay,\n scope,\n reuse=reuse,\n params=params,\n moments=moments,\n backprop_through_moments=backprop_through_moments,\n use_bounded_activation=use_bounded_activation,\n blocks=(3, 4, 6, 3),\n max_stride=max_stride,\n deeplab_alignment=deeplab_alignment,\n keep_spatial_dims=keep_spatial_dims)\n\n\ndef wide_resnet_block(x,\n depth,\n stride,\n weight_decay,\n params=None,\n moments=None,\n use_project=False,\n backprop_through_moments=True,\n is_training=True,\n use_bounded_activation=False):\n \"\"\"Wide ResNet residual block.\"\"\"\n params_keys, params_vars = [], []\n moments_keys, moments_vars = [], []\n with tf.variable_scope('conv1'):\n bn_1, bn_params, bn_moments = bn(\n x,\n params=params,\n moments=moments,\n is_training=is_training,\n backprop_through_moments=backprop_through_moments)\n params_keys.extend(bn_params.keys())\n params_vars.extend(bn_params.values())\n moments_keys.extend(bn_moments.keys())\n moments_vars.extend(bn_moments.values())\n\n out_1 = relu(bn_1, use_bounded_activation=use_bounded_activation)\n\n h_1, conv_params = conv(\n out_1, [3, 3], depth, stride, weight_decay, params=params)\n params_keys.extend(conv_params.keys())\n params_vars.extend(conv_params.values())\n with tf.variable_scope('conv2'):\n bn_2, bn_params, bn_moments = bn(\n h_1,\n params=params,\n moments=moments,\n is_training=is_training,\n backprop_through_moments=backprop_through_moments)\n params_keys.extend(bn_params.keys())\n params_vars.extend(bn_params.values())\n moments_keys.extend(bn_moments.keys())\n moments_vars.extend(bn_moments.values())\n\n out_2 = relu(bn_2, use_bounded_activation=use_bounded_activation)\n\n h_2, conv_params = conv(\n out_2, [3, 3],\n depth,\n stride=1,\n weight_decay=weight_decay,\n params=params)\n params_keys.extend(conv_params.keys())\n params_vars.extend(conv_params.values())\n\n h = h_2\n if use_bounded_activation:\n h = tf.clip_by_value(h, -6, 6)\n\n with tf.variable_scope('identity'):\n if use_project:\n with tf.variable_scope('projection_conv'):\n x, conv_params = conv(\n out_1, [1, 1], depth, stride, weight_decay, params=params)\n params_keys.extend(conv_params.keys())\n params_vars.extend(conv_params.values())\n\n params = collections.OrderedDict(zip(params_keys, params_vars))\n moments = collections.OrderedDict(zip(moments_keys, moments_vars))\n\n if use_bounded_activation:\n out = tf.clip_by_value(x + h, -6, 6)\n else:\n out = x + h\n return out, params, moments\n\n\ndef _wide_resnet(x,\n is_training,\n scope,\n n,\n k,\n weight_decay,\n reuse=tf.AUTO_REUSE,\n params=None,\n moments=None,\n backprop_through_moments=True,\n use_bounded_activation=False,\n keep_spatial_dims=False):\n \"\"\"A wide ResNet.\"\"\"\n widths = [i * k for i in (16, 32, 64)]\n params_keys, params_vars = [], []\n moments_keys, moments_vars = [], []\n\n def _update_params_lists(params_dict, params_keys, params_vars):\n params_keys.extend(params_dict.keys())\n params_vars.extend(params_dict.values())\n\n def _update_moments_lists(moments_dict, moments_keys, moments_vars):\n moments_keys.extend(moments_dict.keys())\n moments_vars.extend(moments_dict.values())\n\n with tf.variable_scope(scope, reuse=reuse):\n with tf.variable_scope('conv1'):\n x, conv_params = conv(x, [3, 3], 16, 1, weight_decay, params=params)\n _update_params_lists(conv_params, params_keys, params_vars)\n\n def _wide_resnet_block(x, depths, stride, use_project, moments):\n \"\"\"Wrapper for a wide resnet block.\"\"\"\n x, block_params, block_moments = wide_resnet_block(\n x,\n depths,\n stride,\n weight_decay,\n params=params,\n moments=moments,\n use_project=use_project,\n is_training=is_training,\n backprop_through_moments=backprop_through_moments,\n use_bounded_activation=use_bounded_activation)\n return x, block_params, block_moments\n\n with tf.variable_scope('conv2_x'):\n with tf.variable_scope('wide_block_0'):\n if widths[0] == 16:\n use_project = False\n else:\n use_project = True\n x, block_params, block_moments = _wide_resnet_block(\n x, widths[0], 1, use_project, moments=moments)\n _update_params_lists(block_params, params_keys, params_vars)\n _update_moments_lists(block_moments, moments_keys, moments_vars)\n for i in range(1, n):\n with tf.variable_scope('wide_block_%d' % i):\n x, block_params, block_moments = _wide_resnet_block(\n x, widths[0], 1, use_project, moments=moments)\n _update_params_lists(block_params, params_keys, params_vars)\n _update_moments_lists(block_moments, moments_keys, moments_vars)\n\n with tf.variable_scope('conv3_x'):\n with tf.variable_scope('wide_block_0'):\n x, block_params, block_moments = _wide_resnet_block(\n x, widths[1], 2, True, moments=moments)\n _update_params_lists(block_params, params_keys, params_vars)\n _update_moments_lists(block_moments, moments_keys, moments_vars)\n for i in range(1, n):\n with tf.variable_scope('wide_block_%d' % i):\n x, block_params, block_moments = _wide_resnet_block(\n x, widths[1], 1, use_project, moments=moments)\n _update_params_lists(block_params, params_keys, params_vars)\n _update_moments_lists(block_moments, moments_keys, moments_vars)\n\n with tf.variable_scope('conv4_x'):\n with tf.variable_scope('wide_block_0'):\n x, block_params, block_moments = _wide_resnet_block(\n x, widths[2], 2, True, moments=moments)\n _update_params_lists(block_params, params_keys, params_vars)\n _update_moments_lists(block_moments, moments_keys, moments_vars)\n for i in range(1, n):\n with tf.variable_scope('wide_block_%d' % i):\n x, block_params, block_moments = _wide_resnet_block(\n x, widths[2], 1, use_project, moments=moments)\n _update_params_lists(block_params, params_keys, params_vars)\n _update_moments_lists(block_moments, moments_keys, moments_vars)\n\n with tf.variable_scope('embedding_layer'):\n x, bn_params, bn_moments = bn(\n x,\n params=params,\n moments=moments,\n is_training=is_training,\n backprop_through_moments=backprop_through_moments)\n _update_params_lists(bn_params, params_keys, params_vars)\n _update_moments_lists(bn_moments, moments_keys, moments_vars)\n\n x = relu(x, use_bounded_activation=use_bounded_activation)\n img_w, img_h = x.get_shape().as_list()[1:3]\n x = tf.nn.avg_pool(\n x, ksize=[1, img_w, img_h, 1], strides=[1, 1, 1, 1], padding='VALID')\n # x.shape: [X, 1, 1, 128]\n if not keep_spatial_dims:\n x = tf.reshape(x, [-1, widths[2]])\n params = collections.OrderedDict(zip(params_keys, params_vars))\n moments = collections.OrderedDict(zip(moments_keys, moments_vars))\n\n return_dict = {'embeddings': x, 'params': params, 'moments': moments}\n return return_dict\n\n\n@gin.configurable('wide_resnet', allowlist=['weight_decay'])\ndef wide_resnet(x,\n is_training,\n weight_decay,\n params=None,\n moments=None,\n reuse=tf.AUTO_REUSE,\n scope='wide_resnet',\n backprop_through_moments=True,\n use_bounded_activation=False,\n keep_spatial_dims=False):\n \"\"\"A WideResNet embedding function.\"\"\"\n return _wide_resnet(\n x,\n is_training,\n scope,\n 2,\n 2,\n weight_decay,\n reuse=reuse,\n params=params,\n moments=moments,\n backprop_through_moments=backprop_through_moments,\n use_bounded_activation=use_bounded_activation,\n keep_spatial_dims=keep_spatial_dims)\n\n\ndef _four_layer_convnet(inputs,\n is_training,\n scope,\n weight_decay,\n reuse=tf.AUTO_REUSE,\n params=None,\n moments=None,\n depth_multiplier=1.0,\n backprop_through_moments=True,\n use_bounded_activation=False,\n keep_spatial_dims=False):\n \"\"\"A four-layer-convnet architecture.\"\"\"\n layer = tf.stop_gradient(inputs)\n model_params_keys, model_params_vars = [], []\n moments_keys, moments_vars = [], []\n\n with tf.variable_scope(scope, reuse=reuse):\n for i in range(4):\n with tf.variable_scope('layer_{}'.format(i), reuse=reuse):\n depth = int(64 * depth_multiplier)\n layer, conv_bn_params, conv_bn_moments = conv_bn(\n layer, [3, 3],\n depth,\n stride=1,\n weight_decay=weight_decay,\n params=params,\n moments=moments,\n is_training=is_training,\n backprop_through_moments=backprop_through_moments)\n model_params_keys.extend(conv_bn_params.keys())\n model_params_vars.extend(conv_bn_params.values())\n moments_keys.extend(conv_bn_moments.keys())\n moments_vars.extend(conv_bn_moments.values())\n\n if use_bounded_activation:\n layer = tf.nn.relu6(layer)\n else:\n layer = tf.nn.relu(layer)\n layer = tf.layers.max_pooling2d(layer, [2, 2], 2)\n logging.info('Output of block %d: %s', i, layer.shape)\n\n model_params = collections.OrderedDict(\n zip(model_params_keys, model_params_vars))\n moments = collections.OrderedDict(zip(moments_keys, moments_vars))\n if not keep_spatial_dims:\n layer = tf.layers.flatten(layer)\n return_dict = {\n 'embeddings': layer,\n 'params': model_params,\n 'moments': moments\n }\n\n return return_dict\n\n\n@gin.configurable('four_layer_convnet', allowlist=['weight_decay'])\ndef four_layer_convnet(inputs,\n is_training,\n weight_decay,\n params=None,\n moments=None,\n depth_multiplier=1.0,\n reuse=tf.AUTO_REUSE,\n scope='four_layer_convnet',\n backprop_through_moments=True,\n use_bounded_activation=False,\n keep_spatial_dims=False):\n \"\"\"Embeds inputs using a standard four-layer convnet.\n\n Args:\n inputs: Tensors of shape [None, ] + image shape, e.g. [15, 84, 84, 3]\n is_training: Whether we are in the training phase.\n weight_decay: float, scaling constant for L2 weight decay on weight\n variables.\n params: None will create new params (or reuse from scope), otherwise an\n ordered dict of convolutional kernels and biases such that\n params['kernel_0'] stores the kernel of the first convolutional layer,\n etc.\n moments: A dict of the means and vars of the different layers to use for\n batch normalization. If not provided, the mean and var are computed based\n on the given inputs.\n depth_multiplier: The depth multiplier for the convnet channels.\n reuse: Whether to reuse the network's weights.\n scope: An optional scope for the tf operations.\n backprop_through_moments: Whether to allow gradients to flow through the\n given support set moments. Only applies to non-transductive batch norm.\n use_bounded_activation: Whether to enable bounded activation. This is useful\n for post-training quantization.\n keep_spatial_dims: bool, if True the spatial dimensions are kept.\n\n Returns:\n A 2D Tensor, where each row is the embedding of an input in inputs.\n \"\"\"\n return _four_layer_convnet(\n inputs,\n is_training,\n scope,\n weight_decay=weight_decay,\n reuse=reuse,\n params=params,\n moments=moments,\n depth_multiplier=depth_multiplier,\n backprop_through_moments=backprop_through_moments,\n use_bounded_activation=use_bounded_activation,\n keep_spatial_dims=keep_spatial_dims)\n\n\n@gin.configurable('relation_module', allowlist=['weight_decay'])\ndef relation_module(inputs,\n is_training,\n weight_decay,\n scope='relation_module',\n reuse=tf.AUTO_REUSE,\n params=None,\n moments=None,\n depth_multiplier=1.0,\n backprop_through_moments=True,\n use_bounded_activation=False):\n \"\"\"A 2-layer-convnet architecture with fully connected layers.\"\"\"\n model_params_keys, model_params_vars = [], []\n moments_keys, moments_vars = [], []\n layer = inputs\n with tf.variable_scope(scope, reuse=reuse):\n for i in range(2):\n with tf.variable_scope('layer_{}'.format(i), reuse=reuse):\n depth = int(64 * depth_multiplier)\n # Note that original has `valid` padding where we use `same`.\n layer, conv_bn_params, conv_bn_moments = conv_bn(\n layer, [3, 3],\n depth,\n 1,\n weight_decay,\n params=params,\n moments=moments,\n is_training=is_training,\n backprop_through_moments=backprop_through_moments)\n model_params_keys.extend(conv_bn_params.keys())\n model_params_vars.extend(conv_bn_params.values())\n moments_keys.extend(conv_bn_moments.keys())\n moments_vars.extend(conv_bn_moments.values())\n\n layer = relu(layer, use_bounded_activation=use_bounded_activation)\n # This is a hacky way preventing max pooling if the spatial dimensions\n # are already reduced.\n if layer.shape[1] > 1:\n layer = tf.layers.max_pooling2d(layer, [2, 2], 2)\n tf.logging.info('Output of block %d: %s' % (i, layer.shape))\n\n layer = tf.layers.flatten(layer)\n relu_activation_fn = functools.partial(\n relu, use_bounded_activation=use_bounded_activation)\n with tf.variable_scope('layer_2_fc', reuse=reuse):\n layer, dense_params = dense(\n layer, 8, weight_decay, activation_fn=relu_activation_fn)\n tf.logging.info('Output layer_2_fc: %s' % layer.shape)\n model_params_keys.extend(dense_params.keys())\n model_params_vars.extend(dense_params.values())\n with tf.variable_scope('layer_3_fc', reuse=reuse):\n output, dense_params = dense(\n layer, 1, weight_decay, activation_fn=tf.nn.sigmoid)\n tf.logging.info('Output layer_3_fc: %s' % output.shape)\n model_params_keys.extend(dense_params.keys())\n model_params_vars.extend(dense_params.values())\n\n model_params = collections.OrderedDict(\n zip(model_params_keys, model_params_vars))\n moments = collections.OrderedDict(zip(moments_keys, moments_vars))\n return_dict = {'output': output, 'params': model_params, 'moments': moments}\n\n return return_dict\n\n\n@gin.configurable('relationnet_convnet', allowlist=['weight_decay'])\ndef relationnet_convnet(inputs,\n is_training,\n weight_decay,\n params=None,\n moments=None,\n depth_multiplier=1.0,\n reuse=tf.AUTO_REUSE,\n scope='relationnet_convnet',\n backprop_through_moments=True,\n use_bounded_activation=False,\n keep_spatial_dims=False):\n \"\"\"A 4-layer-convnet architecture for RelationNet embedding.\n\n This is almost like the `four_layer_convnet` embedding function except\n for the following differences: (1) no padding for the first 3 layers, (2) no\n maxpool on the last (4th) layer, and (3) no flatten.\n\n Paper: https://arxiv.org/abs/1711.06025\n Code:\n https://github.com/floodsung/LearningToCompare_FSL/blob/master/miniimagenet/miniimagenet_train_few_shot.py\n\n Args:\n inputs: Tensors of shape [None, ] + image shape, e.g. [15, 84, 84, 3]\n is_training: Whether we are in the training phase.\n weight_decay: float, scaling constant for L2 weight decay on weight\n variables.\n params: None will create new params (or reuse from scope), otherwise an\n ordered dict of convolutional kernels and biases such that\n params['kernel_0'] stores the kernel of the first convolutional layer,\n etc.\n moments: A dict of the means and vars of the different layers to use for\n batch normalization. If not provided, the mean and var are computed based\n on the given inputs.\n depth_multiplier: The depth multiplier for the convnet channels.\n reuse: Whether to reuse the network's weights.\n scope: An optional scope for the tf operations.\n backprop_through_moments: Whether to allow gradients to flow through the\n given support set moments. Only applies to non-transductive batch norm.\n use_bounded_activation: Whether to enable bounded activation. This is useful\n for post-training quantization.\n keep_spatial_dims: bool, if True the spatial dimensions are kept.\n\n Returns:\n A 2D Tensor, where each row is the embedding of an input in inputs.\n \"\"\"\n layer = tf.stop_gradient(inputs)\n model_params_keys, model_params_vars = [], []\n moments_keys, moments_vars = [], []\n\n with tf.variable_scope(scope, reuse=reuse):\n for i in range(4):\n with tf.variable_scope('layer_{}'.format(i), reuse=reuse):\n depth = int(64 * depth_multiplier)\n # The original implementation had VALID padding for the first two layers\n # that are followed by pooling. The rest (last two) had `SAME` padding.\n # In our setting, to avoid OOM, we pool (and apply VALID padding) to\n # the first three layers, and use SAME padding only in the last one.\n layer, conv_bn_params, conv_bn_moments = conv_bn(\n layer, [3, 3],\n depth,\n stride=1,\n weight_decay=weight_decay,\n padding='VALID' if i < 3 else 'SAME',\n params=params,\n moments=moments,\n is_training=is_training,\n backprop_through_moments=backprop_through_moments)\n model_params_keys.extend(conv_bn_params.keys())\n model_params_vars.extend(conv_bn_params.values())\n moments_keys.extend(conv_bn_moments.keys())\n moments_vars.extend(conv_bn_moments.values())\n\n layer = relu(layer, use_bounded_activation=use_bounded_activation)\n if i < 3:\n layer = tf.layers.max_pooling2d(layer, [2, 2], 2)\n tf.logging.info('Output of block %d: %s' % (i, layer.shape))\n\n model_params = collections.OrderedDict(\n zip(model_params_keys, model_params_vars))\n moments = collections.OrderedDict(zip(moments_keys, moments_vars))\n if not keep_spatial_dims:\n layer = tf.layers.flatten(layer)\n return_dict = {\n 'embeddings': layer,\n 'params': model_params,\n 'moments': moments\n }\n\n return return_dict\n\n\n@gin.configurable(\n 'fully_connected_network',\n allowlist=[\n 'n_hidden_units',\n 'use_batchnorm',\n 'weight_decay',\n ])\ndef fully_connected_network(inputs,\n is_training,\n weight_decay,\n params=None,\n moments=None,\n n_hidden_units=(64,),\n use_batchnorm=False,\n reuse=tf.AUTO_REUSE,\n scope='fully_connected',\n use_bounded_activation=False,\n backprop_through_moments=None,\n keep_spatial_dims=None):\n \"\"\"A fully connected linear network.\n\n Args:\n inputs: Tensor of shape [None, num_features], where `num_features` is the\n number of input features.\n is_training: whether it's train or test mode (for batch norm).\n weight_decay: float, scaling constant for L2 weight decay on weight\n variables.\n params: None will create new params (or reuse from scope), otherwise an\n ordered dict of fully connected weights and biases such that\n params['weight_0'] stores the kernel of the first fully-connected layer,\n etc.\n moments: not used.\n n_hidden_units: tuple, Number of hidden units for each layer. If empty, it\n is the identity mapping.\n use_batchnorm: bool, Whether to use batchnorm after layers, except last.\n reuse: Whether to reuse the network's weights.\n scope: An optional scope for the tf operations.\n use_bounded_activation: Whether to enable bounded activation. This is useful\n for post-training quantization.\n backprop_through_moments: Whether to allow gradients to flow through the\n given support set moments. Only applies to non-transductive batch norm.\n keep_spatial_dims: is there only to match the interface. This backbone\n cannot keep spatial dimensions, so it will fail if it's True.\n\n Returns:\n A 2D Tensor, where each row is the embedding of an input in inputs.\n \"\"\"\n assert not keep_spatial_dims\n layer = inputs\n model_params_keys, model_params_vars = [], []\n moments_keys, moments_vars = [], []\n activation_fn = functools.partial(\n relu, use_bounded_activation=use_bounded_activation)\n with tf.variable_scope(scope, reuse=reuse):\n for i, n_unit in enumerate(n_hidden_units):\n with tf.variable_scope('layer_%d' % i, reuse=reuse):\n layer, dense_params = dense(\n layer,\n n_unit,\n weight_decay,\n activation_fn=activation_fn,\n params=params)\n model_params_keys.extend(dense_params.keys())\n model_params_vars.extend(dense_params.values())\n if use_batchnorm:\n layer, bn_params, bn_moments = bn(\n layer,\n params=params,\n moments=moments,\n is_training=is_training,\n backprop_through_moments=backprop_through_moments)\n model_params_keys.extend(bn_params.keys())\n model_params_keys.extend(bn_params.values())\n moments_keys.extend(bn_moments.keys())\n moments_vars.extend(bn_moments.values())\n\n model_params = collections.OrderedDict(\n zip(model_params_keys, model_params_vars))\n moments = collections.OrderedDict(zip(moments_keys, moments_vars))\n return_dict = {\n 'embeddings': layer,\n 'params': model_params,\n 'moments': moments\n }\n return return_dict\n"
] |
[
[
"tensorflow.compat.v1.initializers.zeros",
"tensorflow.compat.v1.nn.avg_pool",
"tensorflow.compat.v1.assign",
"tensorflow.compat.v1.logging.info",
"tensorflow.compat.v1.reduce_prod",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.equal",
"tensorflow.compat.v1.get_variable_scope",
"tensorflow.compat.v1.initializers.constant",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.add_to_collection",
"tensorflow.compat.v1.initializers.truncated_normal",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.nn.conv2d",
"tensorflow.compat.v1.nn.max_pool",
"tensorflow.compat.v1.ceil",
"tensorflow.compat.v1.reduce_mean",
"tensorflow.compat.v1.stop_gradient",
"tensorflow.compat.v1.layers.flatten",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.layers.max_pooling2d",
"tensorflow.compat.v1.nn.relu6",
"tensorflow.compat.v1.nn.batch_normalization",
"tensorflow.compat.v1.group",
"tensorflow.compat.v1.get_variable",
"tensorflow.compat.v1.initializers.ones",
"tensorflow.compat.v1.square",
"tensorflow.compat.v1.distribute.get_replica_context",
"tensorflow.compat.v1.keras.regularizers.L2",
"tensorflow.compat.v1.image.resize_bilinear",
"tensorflow.compat.v1.clip_by_value",
"tensorflow.compat.v1.nn.xw_plus_b",
"tensorflow.compat.v1.nn.relu"
]
] |
jczhao001/winkDetection
|
[
"a0c298ae2541a6719e09c66e18afb2c87dba0d7c"
] |
[
"input1.py"
] |
[
"import tensorflow as tf\nimport numpy as np\nimport os\nimport cv2\nfrom PIL import Image\nimport matplotlib.pyplot as plt\ntrain_dir_path = \"E:/test/eyeclt/0/test/\"\ndef getFile(fileDir):\n close = []\n lableclose = []\n open = []\n lableopen = []\n for file in os.listdir(fileDir):\n\n name = file.split(sep='.')\n if '0' == name[0]:\n close.append(fileDir + file)\n lableclose.append(0)\n else:\n #if '1' == name[0]:\n open.append(fileDir + file)\n lableopen.append(1)\n imageList = np.hstack((close,open))\n labelList = np.hstack((lableclose, lableopen))\n\n print(\"there are %d close\\nthere %d open\" % (len(close),len(open)))\n\n #imageList = np.hstack((cats,dogs))\n #labelList = np.hstack((lableCats,lableDogs))\n\n temp = np.array([imageList,labelList])\n temp = temp.transpose()\n np.random.shuffle(temp)\n\n imageList = list(temp[:,0])\n labelList = list(temp[:,1])\n labelList = [int(i) for i in labelList]\n\n return imageList,labelList\n\n\ndef getBatch(img,lable,img_w,img_h,batchSize,capacity):\n img = tf.cast(img,tf.string)\n\n lable = tf.cast(lable,tf.int32)\n\n inputQueue = tf.train.slice_input_producer([img,lable])\n lable = inputQueue[1]\n imgConents = tf.read_file(inputQueue[0])\n #lable = inputQueue[1]\n img = tf.image.decode_jpeg(imgConents,channels=3)\n img = tf.image.resize_image_with_crop_or_pad(img,img_w,img_h)\n img = tf.image.per_image_standardization(img)\n\n #img = tf.image.resize_images(img,[img_h,img_w],method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n img = tf.cast(img,tf.float32)\n\n imgBatch,lableBatch = tf.train.batch([img,lable],\n batch_size=batchSize,\n num_threads=64,\n capacity=capacity\n )\n\n lableBatch = tf.reshape(lableBatch,[batchSize])\n\n return imgBatch,lableBatch\ngetFile(train_dir_path)"
] |
[
[
"numpy.array",
"tensorflow.train.batch",
"tensorflow.train.slice_input_producer",
"tensorflow.read_file",
"numpy.random.shuffle",
"tensorflow.reshape",
"tensorflow.image.resize_image_with_crop_or_pad",
"tensorflow.image.per_image_standardization",
"numpy.hstack",
"tensorflow.image.decode_jpeg",
"tensorflow.cast"
]
] |
acatwithacomputer/proteus
|
[
"80dfad95da6ab4d18a88a035f55c26b03540a864",
"80dfad95da6ab4d18a88a035f55c26b03540a864",
"80dfad95da6ab4d18a88a035f55c26b03540a864"
] |
[
"proteus/tests/POD/svd_burgers.py",
"proteus/tests/SWFlow/solitary_reef.py",
"proteus/mprans/NCLS3P.py"
] |
[
"#!/usr/bin/env python\n\nfrom __future__ import print_function\nfrom __future__ import division\nfrom past.utils import old_div\nfrom burgers_init import use_deim\nfrom proteus import deim_utils,Archiver\nfrom proteus.deim_utils import read_snapshots\n\nT = 1.0\nnDTout = 100\nDT = old_div(T,float(nDTout))\n\narchive = Archiver.XdmfArchive(\".\",burgers_init.physics.name,readOnly=True)\n\nimport numpy as np\n\n#generate snapshots for solution\nS = read_snapshots(archive,nDTout+1,'u',)\nU, s, V = np.linalg.svd(S, full_matrices=False)\nprint('SVD for solution done!')\nnp.savetxt('SVD_basis', U, delimiter=' ')\nnp.savetxt('Singular_values', s, delimiter=' ')\n\nif use_deim:\n Sf = read_snapshots(archive,nDTout+1,'spatial_residual0')\n Uf,sf,Vf = np.linalg.svd(Sf,full_matrices=False)\n print('SVD for spatial residual done!')\n np.savetxt('Fs_SVD_basis', Uf, delimiter=' ')\n np.savetxt('Fs_Singular_values', sf, delimiter=' ')\n Sm = read_snapshots(archive,nDTout+1,'mass_residual0')\n Um,sm,Vm = np.linalg.svd(Sm,full_matrices=False)\n print('SVD for mass residual done!')\n np.savetxt('Fm_SVD_basis', Um, delimiter=' ')\n np.savetxt('Fm_Singular_values', sm, delimiter=' ')\n \n",
"from __future__ import division\nfrom builtins import object\nfrom past.utils import old_div\nfrom proteus.mprans import (SW2DCV, GN_SW2DCV)\nfrom proteus.Domain import RectangularDomain, PlanarStraightLineGraphDomain\nimport numpy as np\nfrom proteus import (Domain, Context,\n MeshTools as mt)\nfrom proteus.Profiling import logEvent\nfrom proteus.Gauges import PointGauges\nimport proteus.SWFlow.SWFlowProblem as SWFlowProblem\nimport os\n\n\"\"\"\nWe reproduce the 2009-2010 experiments of [Swigler, 2009] and\n[Lynett, 2019] performed at the O.H. Hinsdale Wave Research\nLaboratory of Oregon State University. The experiments were conducted\nto study specific phenomena that are known to occur when solitary\nwaves propagate over irregular bathymetry such as shoaling,\nrefraction, breaking, etc. In the experiment, nine wave gauges (WGs)\nwere placed along the basin to capture the free surface elevation\nalong with three Acoustic Doppler Velocimeters (ADVs) that\nmeasured the velocities in both horizontal directions.\n\"\"\"\n\n# *************************** #\n# ***** GENERAL OPTIONS ***** #\n# *************************** #\nopts = Context.Options([\n ('sw_model', 1, \"sw_model = {0,1} for {SWEs,DSWEs}\"),\n (\"final_time\", 10.0, \"Final time for simulation\"),\n (\"dt_output\", 0.1, \"Time interval to output solution\"),\n (\"cfl\", 0.25, \"Desired CFL restriction\"),\n (\"refinement\", 4, \"Refinement level\"),\n (\"structured\", True, \"Structured or unstructured mesh\"),\n (\"he\", 0.5, \"Mesh size for unstructured mesh\"),\n (\"reflecting_BCs\", False, \"Use reflecting BCs for all boundaries\"),\n (\"want_gauges\", False, \"Output for water height point gauge\")\n])\n\n###################\n# DOMAIN AND MESH #\n###################\nL = (48.8, 26.5) # this is length in x direction and y direction\nrefinement = opts.refinement\nrectangle = RectangularDomain(L=L, x=[0, -13.25, 0])\n\n# CREATE REFINEMENT #\nnnx0 = 6\nnnx = (nnx0 - 1) * (2**refinement) + 1\nnny = old_div((nnx - 1), 2) + 1\nhe = old_div(L[0], float(nnx - 1))\nif opts.structured:\n domain = rectangle\nelse:\n rectangle.writePoly(\"reef\")\n domain = PlanarStraightLineGraphDomain(fileprefix=\"reef\")\n domain.MeshOptions.triangleOptions = \"pAq30Dena%f\" % (0.5 * opts.he**2,)\n nnx = None\n nny = None\n\n###############################\n# CONSTANTS NEEDED FOR SETUP #\n###############################\ng = 9.81\n\n# stuff for solitary wave\nh0 = 0.78\nalpha = 0.4 # 0.5 * h0\nxs = 5.0\nr = np.sqrt(old_div(3. * alpha, (4. * h0**2 * (h0 + alpha))))\nc = np.sqrt(g * (h0 + alpha))\n\n# stuff for bathymetry, including shelf and cone\nrcone = 3.\nhcone = 0.45\nyc = 13.25\n\n\n#####################################\n# Some functions defined here #\n####################################\n\ndef solitary_wave(x, t):\n sechSqd = (1.0 / np.cosh(r * (x - xs - c * t)))**2\n return alpha * sechSqd\n\n\ndef bathymetry_function(X):\n x = X[0]\n y = X[1] + yc\n\n # first define cone topography\n cone = np.maximum(\n hcone - np.sqrt(((x - 17.0)**2 + (y - yc)**2) / (rcone / hcone)**2), 0.0)\n\n # define piecewise function for base\n base = 0. * x\n conds = [x < 10.2, (10.2 < x) & (x <= 17.5), (17.5 <= x) & (x <= 32.5),\n 32.5 < x]\n base_values = [lambda x: 0.0,\n lambda x: (0.5 - 0.0) / (17.5 - 10.20) * (x - 10.2),\n lambda x: 1.0 + (1.0 - 0.5)/(32.5 - 17.5) * (x - 32.5),\n lambda x: 1.]\n\n base = np.piecewise(x, conds, base_values)\n\n # define piecewise function for shelf\n shelf = 0. * x\n dist = 1.0 - np.minimum(1.0, np.abs(y - yc) / yc)\n aux_x = 12.50 + 12.4999 * (1.0 - dist)\n aux_z = 0.70 + 0.050 * (1.0 - dist)\n\n conds = [x < 10.2, (10.2 <= x) & (x <= aux_x), (aux_x <= x) & (x <= 25.),\n (25. < x) & (x <= 32.5), 32.5 < x]\n shelf_values = [0.0,\n aux_z / (aux_x - 10.20) * (x - 10.2),\n 0.75 + (aux_z - 0.75) / (aux_x - 25.) * (x - 25.),\n 1. + (1. - 0.5) / (32.5 - 17.5) * (x - 32.5),\n 1.]\n shelf = np.select(conds, shelf_values)\n\n bath = np.maximum(base, shelf) + cone\n return bath\n\n\n######################\n# INITIAL CONDITIONS #\n######################\n\n\nclass water_height_at_t0(object):\n def uOfXT(self, X, t):\n hTilde = h0 + solitary_wave(X[0], 0)\n h = max(hTilde - bathymetry_function(X), 0.)\n return h\n\n\nclass x_mom_at_t0(object):\n def uOfXT(self, X, t):\n hTilde = h0 + solitary_wave(X[0], 0)\n h = max(hTilde - bathymetry_function(X), 0.)\n return h * c * old_div(hTilde - h0, hTilde)\n\n\nclass y_mom_at_t0(object):\n def uOfXT(self, X, t):\n return 0.\n\n\nclass heta_at_t0(object):\n def uOfXT(self, X, t):\n h = water_height_at_t0().uOfXT(X, t)\n return h**2\n\n\nclass hw_at_t0(object):\n def uOfXT(self, X, t):\n sechSqd = (1.0 / np.cosh(r * (X[0] - xs)))**2.0\n hTilde = h0 + solitary_wave(X[0], 0)\n h = max(hTilde - bathymetry_function(X), 0.)\n hTildePrime = -2.0 * alpha * r * np.tanh(r * (X[0] - xs)) * sechSqd\n hw = -h**2 * old_div(c * h0 * hTildePrime, hTilde**2)\n return hw\n\n\n###############################\n##### BOUNDARY CONDITIONS #####\n###############################\nX_coords = (0.0, 48.8) # this is x domain, used in BCs\nY_coords = (-13.25, 13.25) # this is y domain, used in BCs\n\n\ndef x_mom_DBC(X, flag):\n if X[0] == X_coords[0] or X[0] == X_coords[1]:\n return lambda X, t: 0.0\n\n\ndef y_mom_DBC(X, flag):\n if X[1] == Y_coords[0] or X[1] == Y_coords[1]:\n return lambda X, t: 0.0\n\n\n# ********************************** #\n# ***** Create mySWFlowProblem ***** #\n# ********************************** #\noutputStepping = SWFlowProblem.OutputStepping(\n opts.final_time, dt_output=opts.dt_output)\ninitialConditions = {'water_height': water_height_at_t0(),\n 'x_mom': x_mom_at_t0(),\n 'y_mom': y_mom_at_t0(),\n 'h_times_eta': heta_at_t0(),\n 'h_times_w': hw_at_t0()}\nboundaryConditions = {'water_height': lambda x, flag: None,\n 'x_mom': x_mom_DBC,\n 'y_mom': y_mom_DBC,\n 'h_times_eta': lambda x, flag: None,\n 'h_times_w': lambda x, flag: None}\n# **************************** #\n# ********** GAUGES ********** #\n# **************************** #\nheightPointGauges = PointGauges(gauges=((('h'), ((7.5, 0.0, 0),\n (13.0, 0.0, 0),\n (21.0, 0.0, 0),\n (7.5, 5.0, 0),\n (13.0, 5.0, 0),\n (21.0, 5.0, 0),\n (25.0, 0.0, 0),\n (25.0, 5.0, 0),\n (25.0, 10.0, 0))),),\n activeTime=(0.01, opts.final_time),\n fileName='reef_wave_gauges.csv')\n\n# ********************************************* #\n# ********** Create my SWFlowProblem ********** #\n# ********************************************* #\nmySWFlowProblem = SWFlowProblem.SWFlowProblem(sw_model=opts.sw_model,\n cfl=opts.cfl,\n outputStepping=outputStepping,\n structured=opts.structured,\n he=he,\n nnx=nnx,\n nny=nny,\n domain=domain,\n initialConditions=initialConditions,\n boundaryConditions=boundaryConditions,\n reflectingBCs=opts.reflecting_BCs,\n bathymetry=bathymetry_function,\n analyticalSolution=None)\nmySWFlowProblem.physical_parameters['LINEAR_FRICTION'] = 0\nmySWFlowProblem.physical_parameters['mannings'] = 0.0\nif opts.want_gauges:\n mySWFlowProblem.auxiliaryVariables = [heightPointGauges]\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom builtins import str\nfrom builtins import range\nfrom past.utils import old_div\nimport proteus\nimport numpy as np\nfrom math import fabs\nimport os\nfrom proteus import cfemIntegrals, Quadrature, Norms, Comm\nfrom proteus.NonlinearSolvers import NonlinearEquation\nfrom proteus.FemTools import (DOFBoundaryConditions,\n FluxBoundaryConditions,\n C0_AffineLinearOnSimplexWithNodalBasis)\nfrom proteus.Comm import globalMax\nfrom proteus.Profiling import memory\nfrom proteus.Profiling import logEvent as log\nfrom proteus.Transport import OneLevelTransport\nfrom proteus.TransportCoefficients import TC_base\nfrom proteus.SubgridError import SGE_base\nfrom proteus.ShockCapturing import ShockCapturing_base\nfrom . import cNCLS3P\nfrom . import cArgumentsDict\n\n\nclass SubgridError(proteus.SubgridError.SGE_base):\n\n def __init__(self, coefficients, nd):\n proteus.SubgridError.SGE_base.__init__(self, coefficients, nd, False)\n\n def initializeElementQuadrature(self, mesh, t, cq):\n for ci in range(self.nc):\n cq[('dH_sge', ci, ci)] = cq[('dH', ci, ci)]\n\n def calculateSubgridError(self, q):\n pass\n\n def updateSubgridErrorHistory(self, initializationPhase=False):\n pass\n\n\nclass ShockCapturing(proteus.ShockCapturing.ShockCapturing_base):\n\n def __init__(\n self,\n coefficients,\n nd,\n shockCapturingFactor=0.25,\n lag=True,\n nStepsToDelay=None):\n proteus.ShockCapturing.ShockCapturing_base.__init__(\n self, coefficients, nd, shockCapturingFactor, lag)\n self.nStepsToDelay = nStepsToDelay\n self.nSteps = 0\n if self.lag:\n log(\"NCLS3P.ShockCapturing: lagging requested but must lag the first step; switching lagging off and delaying\")\n self.nStepsToDelay = 1\n self.lag = False\n\n def initializeElementQuadrature(self, mesh, t, cq):\n self.mesh = mesh\n self.numDiff = []\n self.numDiff_last = []\n for ci in range(self.nc):\n self.numDiff.append(cq[('numDiff', ci, ci)])\n self.numDiff_last.append(cq[('numDiff', ci, ci)])\n\n def updateShockCapturingHistory(self):\n self.nSteps += 1\n if self.lag:\n for ci in range(self.nc):\n self.numDiff_last[ci][:] = self.numDiff[ci]\n if self.lag == False and self.nStepsToDelay is not None and self.nSteps > self.nStepsToDelay:\n log(\"NCLS3P.ShockCapturing: switched to lagged shock capturing\")\n self.lag = True\n self.numDiff_last = []\n for ci in range(self.nc):\n self.numDiff_last.append(self.numDiff[ci].copy())\n log(\"NCLS3P: max numDiff %e\" %\n (globalMax(self.numDiff_last[0].max()),))\n\nclass NumericalFlux(\n proteus.NumericalFlux.HamiltonJacobi_DiagonalLesaintRaviart):\n\n def __init__(self, vt, getPointwiseBoundaryConditions,\n getAdvectiveFluxBoundaryConditions,\n getDiffusiveFluxBoundaryConditions):\n proteus.NumericalFlux.HamiltonJacobi_DiagonalLesaintRaviart.__init__(\n self,\n vt,\n getPointwiseBoundaryConditions,\n getAdvectiveFluxBoundaryConditions,\n getDiffusiveFluxBoundaryConditions)\n\n\nclass Coefficients(proteus.TransportCoefficients.TC_base):\n from proteus.ctransportCoefficients import ncLevelSetCoefficientsEvaluate\n def __init__(self,\n V_model=0,\n RD_model=None,\n ME_model=1,\n checkMass=True, epsFact=1.5,\n useMetrics=0.0, sc_uref=1.0, sc_beta=1.0,\n waterline_interval=-1,\n movingDomain=False,\n PURE_BDF=False,\n EXPLICIT_METHOD=False,\n outputQuantDOFs=False):\n \n self.EXPLICIT_METHOD=EXPLICIT_METHOD\n self.outputQuantDOFs=outputQuantDOFs\n self.PURE_BDF=PURE_BDF\n self.movingDomain = movingDomain\n self.useMetrics = useMetrics\n self.epsFact = epsFact\n self.variableNames = ['phi']\n nc = 1\n mass = {0: {0: 'linear'}}\n hamiltonian = {0: {0: 'linear'}}\n advection = {}\n diffusion = {}\n potential = {}\n reaction = {}\n TC_base.__init__(self,\n nc,\n mass,\n advection,\n diffusion,\n potential,\n reaction,\n hamiltonian,\n ['phi'],\n movingDomain=movingDomain)\n self.flowModelIndex = V_model\n self.modelIndex = ME_model\n self.RD_modelIndex = RD_model\n self.checkMass = checkMass\n self.sc_uref = sc_uref\n self.sc_beta = sc_beta\n self.waterline_interval = waterline_interval\n\n def attachModels(self, modelList):\n # the level set model\n self.model = modelList[self.modelIndex]\n\n # the velocity\n if self.flowModelIndex >= 0:\n self.flowModel = modelList[self.flowModelIndex]\n self.q_v = modelList[self.flowModelIndex].q[('velocity', 0)]\n self.ebqe_v = modelList[self.flowModelIndex].ebqe[('velocity', 0)]\n if ('velocity', 0) in modelList[self.flowModelIndex].ebq:\n self.ebq_v = modelList[\n self.flowModelIndex].ebq[\n ('velocity', 0)]\n else:\n self.ebq_v = None\n if ('u', 0) not in self.model.ebq and ('u', 0) in self.flowModel.ebq:\n self.model.ebq[('u', 0)] = np.zeros(\n self.flowModel.ebq[('u', 0)].shape, 'd')\n self.model.ebq[('grad(u)', 0)] = np.zeros(\n self.flowModel.ebq[('grad(u)', 0)].shape, 'd')\n if ('v', 1) in self.flowModel.ebq:\n self.model.u[0].getValuesTrace(\n self.flowModel.ebq[\n ('v', 1)], self.model.ebq[\n ('u', 0)])\n self.model.u[0].getGradientValuesTrace(\n self.flowModel.ebq[\n ('grad(v)', 1)], self.model.ebq[\n ('grad(u)', 0)])\n if self.RD_modelIndex is not None:\n self.rdModel = modelList[self.RD_modelIndex]\n self.rdModel_ebqe = self.rdModel.ebqe[('u',0)]\n else:\n self.rdModel = None\n self.rdModel_ebqe = np.copy(self.model.ebqe[('u',0)])\n \n def initializeElementQuadrature(self, t, cq):\n if self.flowModelIndex is None:\n self.q_v = np.zeros(cq[('grad(u)', 0)].shape, 'd')\n\n def initializeElementBoundaryQuadrature(self, t, cebq, cebq_global):\n if self.flowModelIndex is None:\n self.ebq_v = np.zeros(cebq[('grad(u)', 0)].shape, 'd')\n\n def initializeGlobalExteriorElementBoundaryQuadrature(self, t, cebqe):\n if self.flowModelIndex is None:\n self.ebqe_v = np.zeros(cebqe[('grad(u)', 0)].shape, 'd')\n\n def preStep(self, t, firstStep=False):\n # BOUNDARY CONDITION FROM re-distancing model\n if self.rdModel is None:\n self.rdModel_ebqe[:] = self.model.ebqe[('u',0)]\n \n # Restart flags for stages of taylor galerkin\n self.model.stage = 1\n self.model.auxTaylorGalerkinFlag = 1\n\n # SAVE OLD SOLUTION #\n self.model.u_dof_old[:] = self.model.u[0].dof\n\n # COMPUTE NEW VELOCITY (if given by user) #\n if self.model.hasVelocityFieldAsFunction:\n self.model.updateVelocityFieldAsFunction()\n\n # if self.checkMass:\n # self.m_pre = Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFact,\n # self.model.mesh.elementDiametersArray,\n # self.model.q['dV'],\n # self.model.q[('m',0)],\n # self.model.mesh.nElements_owned)\n # log(\"Phase 0 mass before NCLS3P step = %12.5e\" % (self.m_pre,),level=2)\n # self.m_last = Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFact,\n # self.model.mesh.elementDiametersArray,\n # self.model.q['dV'],\n # self.model.timeIntegration.m_last[0],\n # self.model.mesh.nElements_owned)\n # log(\"Phase 0 mass before NCLS3P step (m_last) = %12.5e\" % (self.m_last,),level=2)\n # #cek todo why is this here\n # if self.flowModelIndex >= 0 and self.flowModel.ebq.has_key(('v',1)):\n # self.model.u[0].getValuesTrace(self.flowModel.ebq[('v',1)],self.model.ebq[('u',0)])\n # self.model.u[0].getGradientValuesTrace(self.flowModel.ebq[('grad(v)',1)],self.model.ebq[('grad(u)',0)])\n copyInstructions = {}\n return copyInstructions\n\n def postStep(self, t, firstStep=False):\n self.model.q['dV_last'][:] = self.model.q['dV']\n # if self.checkMass:\n # self.m_post = Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFact,\n # self.model.mesh.elementDiametersArray,\n # self.model.q['dV'],\n # self.model.q[('u',0)],\n # self.model.mesh.nElements_owned)\n # log(\"Phase 0 mass after NCLS3P step = %12.5e\" % (self.m_post,),level=2)\n # #need a flux here not a velocity\n # self.fluxIntegral = Norms.fluxDomainBoundaryIntegralFromVector(self.flowModel.ebqe['dS'],\n # self.flowModel.ebqe[('velocity',0)],\n # self.flowModel.ebqe['n'],\n # self.model.mesh)\n # log(\"Flux integral = %12.5e\" % (self.fluxIntegral,),level=2)\n # log(\"Phase 0 mass conservation after NCLS3P step = %12.5e\" % (self.m_post - self.m_last + self.model.timeIntegration.dt*self.fluxIntegral,),level=2)\n # self.lsGlobalMass = self.m_post\n # self.fluxGlobal = self.fluxIntegral*self.model.timeIntegration.dt\n # self.totalFluxGlobal += self.fluxGlobal\n # self.lsGlobalMassArray.append(self.lsGlobalMass)\n # self.lsGlobalMassErrorArray.append(self.lsGlobalMass - self.lsGlobalMassArray[0] + self.totalFluxGlobal)\n # self.fluxArray.append(self.fluxIntegral)\n # self.timeArray.append(self.model.timeIntegration.t)\n # if self.flowModelIndex >= 0 and self.flowModel.ebq.has_key(('v',1)):\n # self.model.u[0].getValuesTrace(self.flowModel.ebq[('v',1)],self.model.ebq[('u',0)])\n # self.model.u[0].getGradientValuesTrace(self.flowModel.ebq[('grad(v)',1)],self.model.ebq[('grad(u)',0)])\n copyInstructions = {}\n return copyInstructions\n\n def updateToMovingDomain(self, t, c):\n # in a moving domain simulation the velocity coming in is already for\n # the moving domain\n pass\n\n def evaluate(self, t, c):\n v = None\n if c[('dH', 0, 0)].shape == self.q_v.shape:\n v = self.q_v\n elif c[('dH', 0, 0)].shape == self.ebqe_v.shape:\n v = self.ebqe_v\n elif self.ebq_v is not None and c[('dH', 0, 0)].shape == self.ebq_v.shape:\n v = self.ebq_v\n else:\n raise RuntimeError(\"don't have v for NC Level set of shape = \" + \\\n repr(c[('dH', 0, 0)].shape))\n if v is not None:\n self.ncLevelSetCoefficientsEvaluate(v,\n c[('u', 0)],\n c[('grad(u)', 0)],\n c[('m', 0)],\n c[('dm', 0, 0)],\n c[('H', 0)],\n c[('dH', 0, 0)])\n\n\nclass LevelModel(OneLevelTransport):\n nCalls = 0\n\n def __init__(self,\n uDict,\n phiDict,\n testSpaceDict,\n matType,\n dofBoundaryConditionsDict,\n dofBoundaryConditionsSetterDict,\n coefficients,\n elementQuadrature,\n elementBoundaryQuadrature,\n fluxBoundaryConditionsDict=None,\n advectiveFluxBoundaryConditionsSetterDict=None,\n diffusiveFluxBoundaryConditionsSetterDictDict=None,\n stressTraceBoundaryConditionsSetterDict=None,\n stabilization=None,\n shockCapturing=None,\n conservativeFluxDict=None,\n numericalFluxType=None,\n TimeIntegrationClass=None,\n massLumping=False,\n reactionLumping=False,\n options=None,\n name='defaultName',\n reuse_trial_and_test_quadrature=True,\n sd=True,\n movingDomain=False,\n bdyNullSpace=False):\n self.bdyNullSpace = bdyNullSpace\n #\n # set the objects describing the method and boundary conditions\n #\n self.movingDomain = movingDomain\n self.tLast_mesh = None\n #\n self.name = name\n self.sd = sd\n self.Hess = False\n self.lowmem = True\n self.timeTerm = True # allow turning off the time derivative\n # self.lowmem=False\n self.testIsTrial = True\n self.phiTrialIsTrial = True\n self.u = uDict\n self.ua = {} # analytical solutions\n self.phi = phiDict\n self.dphi = {}\n self.matType = matType\n # mwf try to reuse test and trial information across components if\n # spaces are the same\n self.reuse_test_trial_quadrature = reuse_trial_and_test_quadrature # True#False\n if self.reuse_test_trial_quadrature:\n for ci in range(1, coefficients.nc):\n assert self.u[ci].femSpace.__class__.__name__ == self.u[\n 0].femSpace.__class__.__name__, \"to reuse_test_trial_quad all femSpaces must be the same!\"\n self.u_dof_old = None\n\n # Simplicial Mesh\n # assume the same mesh for all components for now\n self.mesh = self.u[0].femSpace.mesh\n self.testSpace = testSpaceDict\n self.dirichletConditions = dofBoundaryConditionsDict\n # explicit Dirichlet conditions for now, no Dirichlet BC constraints\n self.dirichletNodeSetList = None\n self.coefficients = coefficients\n self.coefficients.initializeMesh(self.mesh)\n self.nc = self.coefficients.nc\n self.stabilization = stabilization\n self.shockCapturing = shockCapturing\n # no velocity post-processing for now\n self.conservativeFlux = conservativeFluxDict\n self.fluxBoundaryConditions = fluxBoundaryConditionsDict\n self.advectiveFluxBoundaryConditionsSetterDict = advectiveFluxBoundaryConditionsSetterDict\n self.diffusiveFluxBoundaryConditionsSetterDictDict = diffusiveFluxBoundaryConditionsSetterDictDict\n # determine whether the stabilization term is nonlinear\n self.stabilizationIsNonlinear = False\n # cek come back\n if self.stabilization is not None:\n for ci in range(self.nc):\n if ci in coefficients.mass:\n for flag in list(coefficients.mass[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.advection:\n for flag in list(coefficients.advection[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.diffusion:\n for diffusionDict in list(coefficients.diffusion[ci].values()):\n for flag in list(diffusionDict.values()):\n if flag != 'constant':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.potential:\n for flag in list(coefficients.potential[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.reaction:\n for flag in list(coefficients.reaction[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n if ci in coefficients.hamiltonian:\n for flag in list(coefficients.hamiltonian[ci].values()):\n if flag == 'nonlinear':\n self.stabilizationIsNonlinear = True\n # determine if we need element boundary storage\n self.elementBoundaryIntegrals = {}\n for ci in range(self.nc):\n self.elementBoundaryIntegrals[ci] = (\n (self.conservativeFlux is not None) or (\n numericalFluxType is not None) or (\n self.fluxBoundaryConditions[ci] == 'outFlow') or (\n self.fluxBoundaryConditions[ci] == 'mixedFlow') or (\n self.fluxBoundaryConditions[ci] == 'setFlow'))\n #\n # calculate some dimensions\n #\n # assume same space dim for all variables\n self.nSpace_global = self.u[0].femSpace.nSpace_global\n self.nDOF_trial_element = [\n u_j.femSpace.max_nDOF_element for u_j in list(self.u.values())]\n self.nDOF_phi_trial_element = [\n phi_k.femSpace.max_nDOF_element for phi_k in list(self.phi.values())]\n self.n_phi_ip_element = [\n phi_k.femSpace.referenceFiniteElement.interpolationConditions.nQuadraturePoints for phi_k in list(self.phi.values())]\n self.nDOF_test_element = [\n femSpace.max_nDOF_element for femSpace in list(self.testSpace.values())]\n self.nFreeDOF_global = [\n dc.nFreeDOF_global for dc in list(self.dirichletConditions.values())]\n self.nVDOF_element = sum(self.nDOF_trial_element)\n self.nFreeVDOF_global = sum(self.nFreeDOF_global)\n #\n NonlinearEquation.__init__(self, self.nFreeVDOF_global)\n #\n # build the quadrature point dictionaries from the input (this\n # is just for convenience so that the input doesn't have to be\n # complete)\n #\n elementQuadratureDict = {}\n elemQuadIsDict = isinstance(elementQuadrature, dict)\n if elemQuadIsDict: # set terms manually\n for I in self.coefficients.elementIntegralKeys:\n if I in elementQuadrature:\n elementQuadratureDict[I] = elementQuadrature[I]\n else:\n elementQuadratureDict[I] = elementQuadrature['default']\n else:\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[I] = elementQuadrature\n if self.stabilization is not None:\n for I in self.coefficients.elementIntegralKeys:\n if elemQuadIsDict:\n if I in elementQuadrature:\n elementQuadratureDict[\n ('stab',) + I[1:]] = elementQuadrature[I]\n else:\n elementQuadratureDict[\n ('stab',) + I[1:]] = elementQuadrature['default']\n else:\n elementQuadratureDict[\n ('stab',) + I[1:]] = elementQuadrature\n if self.shockCapturing is not None:\n for ci in self.shockCapturing.components:\n if elemQuadIsDict:\n if ('numDiff', ci, ci) in elementQuadrature:\n elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature[\n ('numDiff', ci, ci)]\n else:\n elementQuadratureDict[('numDiff', ci, ci)] = elementQuadrature[\n 'default']\n else:\n elementQuadratureDict[\n ('numDiff', ci, ci)] = elementQuadrature\n if massLumping:\n for ci in list(self.coefficients.mass.keys()):\n elementQuadratureDict[('m', ci)] = Quadrature.SimplexLobattoQuadrature(\n self.nSpace_global, 1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[\n ('stab',) + I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)\n if reactionLumping:\n for ci in list(self.coefficients.mass.keys()):\n elementQuadratureDict[('r', ci)] = Quadrature.SimplexLobattoQuadrature(\n self.nSpace_global, 1)\n for I in self.coefficients.elementIntegralKeys:\n elementQuadratureDict[\n ('stab',) + I[1:]] = Quadrature.SimplexLobattoQuadrature(self.nSpace_global, 1)\n elementBoundaryQuadratureDict = {}\n if isinstance(elementBoundaryQuadrature, dict): # set terms manually\n for I in self.coefficients.elementBoundaryIntegralKeys:\n if I in elementBoundaryQuadrature:\n elementBoundaryQuadratureDict[\n I] = elementBoundaryQuadrature[I]\n else:\n elementBoundaryQuadratureDict[\n I] = elementBoundaryQuadrature['default']\n else:\n for I in self.coefficients.elementBoundaryIntegralKeys:\n elementBoundaryQuadratureDict[I] = elementBoundaryQuadrature\n #\n # find the union of all element quadrature points and\n # build a quadrature rule for each integral that has a\n # weight at each point in the union\n # mwf include tag telling me which indices are which quadrature rule?\n (self.elementQuadraturePoints, self.elementQuadratureWeights,\n self.elementQuadratureRuleIndeces) = Quadrature.buildUnion(elementQuadratureDict)\n self.nQuadraturePoints_element = self.elementQuadraturePoints.shape[0]\n self.nQuadraturePoints_global = self.nQuadraturePoints_element * \\\n self.mesh.nElements_global\n #\n # Repeat the same thing for the element boundary quadrature\n #\n (self.elementBoundaryQuadraturePoints, self.elementBoundaryQuadratureWeights,\n self.elementBoundaryQuadratureRuleIndeces) = Quadrature.buildUnion(elementBoundaryQuadratureDict)\n self.nElementBoundaryQuadraturePoints_elementBoundary = self.elementBoundaryQuadraturePoints.shape[\n 0]\n self.nElementBoundaryQuadraturePoints_global = (\n self.mesh.nElements_global *\n self.mesh.nElementBoundaries_element *\n self.nElementBoundaryQuadraturePoints_elementBoundary)\n# if isinstance(self.u[0].femSpace,C0_AffineLinearOnSimplexWithNodalBasis):\n# print self.nQuadraturePoints_element\n# if self.nSpace_global == 3:\n# assert(self.nQuadraturePoints_element == 5)\n# elif self.nSpace_global == 2:\n# assert(self.nQuadraturePoints_element == 6)\n# elif self.nSpace_global == 1:\n# assert(self.nQuadraturePoints_element == 3)\n#\n# print self.nElementBoundaryQuadraturePoints_elementBoundary\n# if self.nSpace_global == 3:\n# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 4)\n# elif self.nSpace_global == 2:\n# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 4)\n# elif self.nSpace_global == 1:\n# assert(self.nElementBoundaryQuadraturePoints_elementBoundary == 1)\n #\n # simplified allocations for test==trial and also check if space is mixed or not\n #\n self.q = {}\n self.ebq = {}\n self.ebq_global = {}\n self.ebqe = {}\n self.phi_ip = {}\n # mesh\n self.q['x'] = np.zeros(\n (self.mesh.nElements_global, self.nQuadraturePoints_element, 3), 'd')\n self.ebqe['x'] = np.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n 3),\n 'd')\n self.q[('dV_u', 0)] = (old_div(1.0, self.mesh.nElements_global)) * \\\n np.ones((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('u', 0)] = np.zeros(\n (self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[\n ('grad(u)',\n 0)] = np.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global),\n 'd')\n self.q[('m_last', 0)] = np.zeros(\n (self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('mt', 0)] = np.zeros(\n (self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q['dV'] = np.zeros(\n (self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q['dV_last'] = -1000 * \\\n np.ones((self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n # np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m_tmp', 0)] = self.q[('u', 0)]\n # np.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n self.q[('m', 0)] = self.q[('u', 0)]\n # cek todo for NCLS3P we really don't need dH because it's just q_v\n # from the flow model\n self.q[('dH', 0, 0)] = np.zeros((self.mesh.nElements_global,\n self.nQuadraturePoints_element, self.nSpace_global), 'd')\n self.q[\n ('dH_sge',\n 0,\n 0)] = np.zeros(\n (self.mesh.nElements_global,\n self.nQuadraturePoints_element,\n self.nSpace_global),\n 'd')\n self.q[('cfl', 0)] = np.zeros(\n (self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('numDiff', 0, 0)] = np.zeros(\n (self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.ebqe[\n ('u',\n 0)] = np.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary),\n 'd')\n self.ebqe[\n ('grad(u)',\n 0)] = np.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global),\n 'd')\n # mwf for running as standalone\n self.ebqe[\n ('dH',\n 0,\n 0)] = np.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.nSpace_global),\n 'd')\n self.q[('dm', 0, 0)] = np.zeros(\n (self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.q[('H', 0)] = np.zeros(\n (self.mesh.nElements_global, self.nQuadraturePoints_element), 'd')\n self.points_elementBoundaryQuadrature = set()\n self.scalars_elementBoundaryQuadrature = set(\n [('u', ci) for ci in range(self.nc)])\n self.vectors_elementBoundaryQuadrature = set()\n self.tensors_elementBoundaryQuadrature = set()\n #\n # allocate residual and Jacobian storage\n #\n self.inflowBoundaryBC = {}\n self.inflowBoundaryBC_values = {}\n self.inflowFlux = {}\n for cj in range(self.nc):\n self.inflowBoundaryBC[cj] = np.zeros(\n (self.mesh.nExteriorElementBoundaries_global,), 'i')\n self.inflowBoundaryBC_values[cj] = np.zeros(\n (self.mesh.nExteriorElementBoundaries_global, self.nDOF_trial_element[cj]), 'd')\n self.inflowFlux[cj] = np.zeros(\n (self.mesh.nExteriorElementBoundaries_global,\n self.nElementBoundaryQuadraturePoints_elementBoundary),\n 'd')\n self.internalNodes = set(range(self.mesh.nNodes_global))\n # identify the internal nodes this is ought to be in mesh\n # \\todo move this to mesh\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n eN_global = self.mesh.elementBoundaryElementsArray[ebN, 0]\n ebN_element = self.mesh.elementBoundaryLocalElementBoundariesArray[\n ebN, 0]\n for i in range(self.mesh.nNodes_element):\n if i != ebN_element:\n I = self.mesh.elementNodesArray[eN_global, i]\n self.internalNodes -= set([I])\n self.nNodes_internal = len(self.internalNodes)\n self.internalNodesArray = np.zeros((self.nNodes_internal,), 'i')\n for nI, n in enumerate(self.internalNodes):\n self.internalNodesArray[nI] = n\n #\n del self.internalNodes\n self.internalNodes = None\n log(\"Updating local to global mappings\", 2)\n self.updateLocal2Global()\n log(\"Building time integration object\", 2)\n log(memory(\"inflowBC, internalNodes,updateLocal2Global\",\n \"OneLevelTransport\"), level=4)\n # mwf for interpolating subgrid error for gradients etc\n if self.stabilization and self.stabilization.usesGradientStabilization:\n self.timeIntegration = TimeIntegrationClass(\n self, integrateInterpolationPoints=True)\n else:\n self.timeIntegration = TimeIntegrationClass(self)\n\n if options is not None:\n self.timeIntegration.setFromOptions(options)\n log(memory(\"TimeIntegration\", \"OneLevelTransport\"), level=4)\n log(\"Calculating numerical quadrature formulas\", 2)\n self.calculateQuadrature()\n\n self.setupFieldStrides()\n\n comm = Comm.get()\n self.comm = comm\n if comm.size() > 1:\n assert numericalFluxType is not None and numericalFluxType.useWeakDirichletConditions, \"You must use a numerical flux to apply weak boundary conditions for parallel runs\"\n\n log(memory(\"stride+offset\", \"OneLevelTransport\"), level=4)\n if numericalFluxType is not None:\n if options is None or options.periodicDirichletConditions is None:\n self.numericalFlux = numericalFluxType(\n self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict)\n else:\n self.numericalFlux = numericalFluxType(\n self,\n dofBoundaryConditionsSetterDict,\n advectiveFluxBoundaryConditionsSetterDict,\n diffusiveFluxBoundaryConditionsSetterDictDict,\n options.periodicDirichletConditions)\n else:\n self.numericalFlux = None\n # set penalty terms\n # cek todo move into numerical flux initialization\n if 'penalty' in self.ebq_global:\n for ebN in range(self.mesh.nElementBoundaries_global):\n for k in range(\n self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebq_global['penalty'][ebN, k] = old_div(self.numericalFlux.penalty_constant, (\n self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power))\n # penalty term\n # cek move to Numerical flux initialization\n if 'penalty' in self.ebqe:\n for ebNE in range(self.mesh.nExteriorElementBoundaries_global):\n ebN = self.mesh.exteriorElementBoundariesArray[ebNE]\n for k in range(\n self.nElementBoundaryQuadraturePoints_elementBoundary):\n self.ebqe['penalty'][ebNE, k] = old_div(self.numericalFlux.penalty_constant, \\\n self.mesh.elementBoundaryDiametersArray[ebN]**self.numericalFlux.penalty_power)\n log(memory(\"numericalFlux\", \"OneLevelTransport\"), level=4)\n self.elementEffectiveDiametersArray = self.mesh.elementInnerDiametersArray\n # use post processing tools to get conservative fluxes, None by default\n from proteus import PostProcessingTools\n self.velocityPostProcessor = PostProcessingTools.VelocityPostProcessingChooser(\n self)\n log(memory(\"velocity postprocessor\", \"OneLevelTransport\"), level=4)\n # helper for writing out data storage\n from proteus import Archiver\n self.elementQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.elementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n self.exteriorElementBoundaryQuadratureDictionaryWriter = Archiver.XdmfWriter()\n # TODO get rid of this\n# for ci,fbcObject in self.fluxBoundaryConditionsObjectsDict.iteritems():\n# self.ebqe[('advectiveFlux_bc_flag',ci)] = np.zeros(self.ebqe[('advectiveFlux_bc',ci)].shape,'i')\n# for t,g in fbcObject.advectiveFluxBoundaryConditionsDict.iteritems():\n# if self.coefficients.advection.has_key(ci):\n# self.ebqe[('advectiveFlux_bc',ci)][t[0],t[1]] = g(self.ebqe[('x')][t[0],t[1]],self.timeIntegration.t)\n# self.ebqe[('advectiveFlux_bc_flag',ci)][t[0],t[1]] = 1\n\n if hasattr(self.numericalFlux, 'setDirichletValues'):\n self.numericalFlux.setDirichletValues(self.ebqe)\n if not hasattr(self.numericalFlux, 'isDOFBoundary'):\n self.numericalFlux.isDOFBoundary = {\n 0: np.zeros(self.ebqe[('u', 0)].shape, 'i')}\n if not hasattr(self.numericalFlux, 'ebqe'):\n self.numericalFlux.ebqe = {\n ('u', 0): np.zeros(self.ebqe[('u', 0)].shape, 'd')}\n # TODO how to handle redistancing calls for\n # calculateCoefficients,calculateElementResidual etc\n self.globalResidualDummy = None\n compKernelFlag = 0\n self.ncls3p = cNCLS3P.newNCLS3P(\n self.nSpace_global,\n self.nQuadraturePoints_element,\n self.u[0].femSpace.elementMaps.localFunctionSpace.dim,\n self.u[0].femSpace.referenceFiniteElement.localFunctionSpace.dim,\n self.testSpace[0].referenceFiniteElement.localFunctionSpace.dim,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n compKernelFlag)\n\n self.forceStrongConditions = False\n if self.forceStrongConditions:\n self.dirichletConditionsForceDOF = DOFBoundaryConditions(\n self.u[0].femSpace,\n dofBoundaryConditionsSetterDict[0],\n weakDirichletConditions=False)\n\n if self.movingDomain:\n self.MOVING_DOMAIN = 1.0\n else:\n self.MOVING_DOMAIN = 0.0\n if self.mesh.nodeVelocityArray is None:\n self.mesh.nodeVelocityArray = np.zeros(\n self.mesh.nodeArray.shape, 'd')\n\n self.waterline_calls = 0\n self.waterline_prints = 0\n\n # mql. Allow the user to provide functions to define the velocity field\n self.hasVelocityFieldAsFunction = False\n if ('velocityField') in dir(options):\n self.velocityField = options.velocityField\n self.hasVelocityFieldAsFunction = True\n\n # interface locator\n self.cell_interface_locator = np.zeros(self.mesh.nElements_global,'d')\n self.interface_locator = np.zeros(self.u[0].dof.shape,'d')\n self.quantDOFs = np.zeros(self.u[0].dof.shape,'d')\n\n # For Taylor Galerkin methods\n self.stage = 1\n self.auxTaylorGalerkinFlag = 1\n self.uTilde_dof = np.zeros(self.u[0].dof.shape,'d')\n if self.coefficients.EXPLICIT_METHOD==True:\n self.useTwoStageNewton = True\n \n # Some asserts for NCLS with Taylor Galerkin\n if self.coefficients.EXPLICIT_METHOD==True:\n assert isinstance(self.timeIntegration,proteus.TimeIntegration.BackwardEuler_cfl), \"If EXPLICIT_METHOD=True, use BackwardEuler_cfl\"\n assert options.levelNonlinearSolver == proteus.NonlinearSolvers.TwoStageNewton, \"If EXPLICIT_METHOD=True, use levelNonlinearSolver=TwoStageNewton\"\n \n # mwf these are getting called by redistancing classes,\n def calculateCoefficients(self):\n pass\n\n def updateVelocityFieldAsFunction(self):\n X = {0: self.q[('x')][:, :, 0],\n 1: self.q[('x')][:, :, 1],\n 2: self.q[('x')][:, :, 2]}\n t = self.timeIntegration.t\n self.coefficients.q_v[..., 0] = self.velocityField[0](X, t)\n self.coefficients.q_v[..., 1] = self.velocityField[1](X, t)\n if (self.nSpace_global == 3):\n self.coefficients.q_v[..., 2] = self.velocityField[2](X, t)\n\n # BOUNDARY\n ebqe_X = {0: self.ebqe['x'][:, :, 0],\n 1: self.ebqe['x'][:, :, 1],\n 2: self.ebqe['x'][:, :, 2]}\n self.coefficients.ebqe_v[..., 0] = self.velocityField[0](ebqe_X, t)\n self.coefficients.ebqe_v[..., 1] = self.velocityField[1](ebqe_X, t)\n if (self.nSpace_global == 3):\n self.coefficients.ebqe_v[..., 2] = self.velocityField[2](ebqe_X, t)\n\n def calculateElementResidual(self):\n if self.globalResidualDummy is not None:\n self.getResidual(self.u[0].dof, self.globalResidualDummy)\n\n def getResidual(self, u, r):\n import pdb\n import copy\n \"\"\"\n Calculate the element residuals and add in to the global residual\n \"\"\"\n # mwf debug\n # pdb.set_trace()\n\n if self.u_dof_old is None:\n # Pass initial condition to u_dof_old\n self.u_dof_old = np.copy(self.u[0].dof)\n\n r.fill(0.0)\n self.cell_interface_locator.fill(0.0) \n self.interface_locator.fill(0.0) \n # Load the unknowns into the finite element dof\n self.timeIntegration.calculateCoefs()\n self.timeIntegration.calculateU(u)\n self.setUnknowns(self.timeIntegration.u)\n # cek can put in logic to skip of BC's don't depend on t or u\n # Dirichlet boundary conditions\n # if hasattr(self.numericalFlux,'setDirichletValues'):\n if (self.stage!=2):\n self.numericalFlux.setDirichletValues(self.ebqe)\n # flux boundary conditions, SHOULDN'T HAVE\n # cNCLS3P.calculateResidual(self.mesh.nElements_global,\n # try to use 1d,2d,3d specific modules\n\n if self.forceStrongConditions:\n for dofN, g in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.items()):\n self.u[0].dof[dofN] = g(\n self.dirichletConditionsForceDOF.DOFBoundaryPointDict[dofN],\n self.timeIntegration.t)\n \n if (self.stage==2 and self.auxTaylorGalerkinFlag==1):\n self.uTilde_dof[:] = self.u[0].dof\n self.auxTaylorGalerkinFlag=0\n\n argsDict = cArgumentsDict.ArgumentsDict()\n argsDict[\"mesh_trial_ref\"] = self.u[0].femSpace.elementMaps.psi\n argsDict[\"mesh_grad_trial_ref\"] = self.u[0].femSpace.elementMaps.grad_psi\n argsDict[\"mesh_dof\"] = self.mesh.nodeArray\n argsDict[\"mesh_velocity_dof\"] = self.mesh.nodeVelocityArray\n argsDict[\"MOVING_DOMAIN\"] = self.MOVING_DOMAIN\n argsDict[\"mesh_l2g\"] = self.mesh.elementNodesArray\n argsDict[\"dV_ref\"] = self.elementQuadratureWeights[('u', 0)]\n argsDict[\"u_trial_ref\"] = self.u[0].femSpace.psi\n argsDict[\"u_grad_trial_ref\"] = self.u[0].femSpace.grad_psi\n argsDict[\"u_test_ref\"] = self.u[0].femSpace.psi\n argsDict[\"u_grad_test_ref\"] = self.u[0].femSpace.grad_psi\n argsDict[\"mesh_trial_trace_ref\"] = self.u[0].femSpace.elementMaps.psi_trace\n argsDict[\"mesh_grad_trial_trace_ref\"] = self.u[0].femSpace.elementMaps.grad_psi_trace\n argsDict[\"dS_ref\"] = self.elementBoundaryQuadratureWeights[('u', 0)]\n argsDict[\"u_trial_trace_ref\"] = self.u[0].femSpace.psi_trace\n argsDict[\"u_grad_trial_trace_ref\"] = self.u[0].femSpace.grad_psi_trace\n argsDict[\"u_test_trace_ref\"] = self.u[0].femSpace.psi_trace\n argsDict[\"u_grad_test_trace_ref\"] = self.u[0].femSpace.grad_psi_trace\n argsDict[\"normal_ref\"] = self.u[0].femSpace.elementMaps.boundaryNormals\n argsDict[\"boundaryJac_ref\"] = self.u[0].femSpace.elementMaps.boundaryJacobians\n argsDict[\"nElements_global\"] = self.mesh.nElements_global\n argsDict[\"useMetrics\"] = self.coefficients.useMetrics\n argsDict[\"alphaBDF\"] = self.timeIntegration.alpha_bdf\n argsDict[\"lag_shockCapturing\"] = self.shockCapturing.lag\n argsDict[\"shockCapturingDiffusion\"] = self.shockCapturing.shockCapturingFactor\n argsDict[\"sc_uref\"] = self.coefficients.sc_uref\n argsDict[\"sc_alpha\"] = self.coefficients.sc_beta\n argsDict[\"u_l2g\"] = self.u[0].femSpace.dofMap.l2g\n argsDict[\"elementDiameter\"] = self.mesh.elementDiametersArray\n argsDict[\"u_dof\"] = self.u[0].dof\n argsDict[\"u_dof_old\"] = self.u_dof_old\n argsDict[\"velocity\"] = self.coefficients.q_v\n argsDict[\"q_m\"] = self.timeIntegration.m_tmp[0]\n argsDict[\"q_u\"] = self.q[('u', 0)]\n argsDict[\"q_n\"] = self.q[('grad(u)', 0)]\n argsDict[\"q_dH\"] = self.q[('dH_sge', 0, 0)]\n argsDict[\"q_m_betaBDF\"] = self.timeIntegration.beta_bdf[0]\n argsDict[\"q_dV\"] = self.q['dV']\n argsDict[\"q_dV_last\"] = self.q['dV_last']\n argsDict[\"cfl\"] = self.q[('cfl', 0)]\n argsDict[\"q_numDiff_u\"] = self.shockCapturing.numDiff[0]\n argsDict[\"q_numDiff_u_last\"] = self.shockCapturing.numDiff_last[0]\n argsDict[\"offset_u\"] = self.offset[0]\n argsDict[\"stride_u\"] = self.stride[0]\n argsDict[\"globalResidual\"] = r\n argsDict[\"nExteriorElementBoundaries_global\"] = self.mesh.nExteriorElementBoundaries_global\n argsDict[\"exteriorElementBoundariesArray\"] = self.mesh.exteriorElementBoundariesArray\n argsDict[\"elementBoundaryElementsArray\"] = self.mesh.elementBoundaryElementsArray\n argsDict[\"elementBoundaryLocalElementBoundariesArray\"] = self.mesh.elementBoundaryLocalElementBoundariesArray\n argsDict[\"ebqe_velocity_ext\"] = self.coefficients.ebqe_v\n argsDict[\"isDOFBoundary_u\"] = self.numericalFlux.isDOFBoundary[0]\n argsDict[\"ebqe_rd_u_ext\"] = self.coefficients.rdModel_ebqe\n argsDict[\"ebqe_bc_u_ext\"] = self.numericalFlux.ebqe[('u', 0)]\n argsDict[\"ebqe_u\"] = self.ebqe[('u', 0)]\n argsDict[\"cell_interface_locator\"] = self.cell_interface_locator\n argsDict[\"interface_locator\"] = self.interface_locator\n argsDict[\"EXPLICIT_METHOD\"] = self.coefficients.EXPLICIT_METHOD\n argsDict[\"degree_polynomial\"] = float(self.u[0].femSpace.order)\n argsDict[\"stage\"] = self.stage\n argsDict[\"uTilde_dof\"] = self.uTilde_dof\n argsDict[\"dt\"] = self.timeIntegration.dt\n argsDict[\"PURE_BDF\"] = self.coefficients.PURE_BDF\n self.ncls3p.calculateResidual(argsDict)\n\n #if self.coefficients.EXPLICIT_METHOD:\n # self.taylorGalerkinStage = 2\n \n self.quantDOFs[:] = self.interface_locator\n \n if self.forceStrongConditions:\n for dofN, g in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.items()):\n r[dofN] = 0\n\n # print \"velocity in ncls\",self.coefficients.q_v,\n # print \"cfl\",self.q[('cfl',0)]\n if self.stabilization:\n self.stabilization.accumulateSubgridMassHistory(self.q)\n log(\"Global residual\", level=9, data=r)\n # mwf debug\n # pdb.set_trace()\n # mwf decide if this is reasonable for keeping solver statistics\n self.nonlinear_function_evaluations += 1\n if self.globalResidualDummy is None:\n self.globalResidualDummy = np.zeros(r.shape, 'd')\n\n def getJacobian(self, jacobian):\n #import superluWrappers\n #import np\n import pdb\n cfemIntegrals.zeroJacobian_CSR(self.nNonzerosInJacobian,\n jacobian)\n # mwf debug\n # pdb.set_trace()\n # cNCLS3P.calculateJacobian(self.mesh.nElements_global,\n\n (rowptr, colind, globalJacobian) = jacobian.getCSRrepresentation()\n argsDict = cArgumentsDict.ArgumentsDict()\n argsDict[\"mesh_trial_ref\"] = self.u[0].femSpace.elementMaps.psi\n argsDict[\"mesh_grad_trial_ref\"] = self.u[0].femSpace.elementMaps.grad_psi\n argsDict[\"mesh_dof\"] = self.mesh.nodeArray\n argsDict[\"mesh_velocity_dof\"] = self.mesh.nodeVelocityArray\n argsDict[\"MOVING_DOMAIN\"] = self.MOVING_DOMAIN\n argsDict[\"mesh_l2g\"] = self.mesh.elementNodesArray\n argsDict[\"dV_ref\"] = self.elementQuadratureWeights[('u', 0)]\n argsDict[\"u_trial_ref\"] = self.u[0].femSpace.psi\n argsDict[\"u_grad_trial_ref\"] = self.u[0].femSpace.grad_psi\n argsDict[\"u_test_ref\"] = self.u[0].femSpace.psi\n argsDict[\"u_grad_test_ref\"] = self.u[0].femSpace.grad_psi\n argsDict[\"mesh_trial_trace_ref\"] = self.u[0].femSpace.elementMaps.psi_trace\n argsDict[\"mesh_grad_trial_trace_ref\"] = self.u[0].femSpace.elementMaps.grad_psi_trace\n argsDict[\"dS_ref\"] = self.elementBoundaryQuadratureWeights[('u', 0)]\n argsDict[\"u_trial_trace_ref\"] = self.u[0].femSpace.psi_trace\n argsDict[\"u_grad_trial_trace_ref\"] = self.u[0].femSpace.grad_psi_trace\n argsDict[\"u_test_trace_ref\"] = self.u[0].femSpace.psi_trace\n argsDict[\"u_grad_test_trace_ref\"] = self.u[0].femSpace.grad_psi_trace\n argsDict[\"normal_ref\"] = self.u[0].femSpace.elementMaps.boundaryNormals\n argsDict[\"boundaryJac_ref\"] = self.u[0].femSpace.elementMaps.boundaryJacobians\n argsDict[\"nElements_global\"] = self.mesh.nElements_global\n argsDict[\"useMetrics\"] = self.coefficients.useMetrics\n argsDict[\"alphaBDF\"] = self.timeIntegration.alpha_bdf\n argsDict[\"lag_shockCapturing\"] = self.shockCapturing.lag\n argsDict[\"shockCapturingDiffusion\"] = self.shockCapturing.shockCapturingFactor\n argsDict[\"u_l2g\"] = self.u[0].femSpace.dofMap.l2g\n argsDict[\"elementDiameter\"] = self.mesh.elementDiametersArray\n argsDict[\"u_dof\"] = self.u[0].dof\n argsDict[\"velocity\"] = self.coefficients.q_v\n argsDict[\"q_m_betaBDF\"] = self.timeIntegration.beta_bdf[0]\n argsDict[\"cfl\"] = self.q[('cfl', 0)]\n argsDict[\"q_numDiff_u_last\"] = self.shockCapturing.numDiff_last[0]\n argsDict[\"csrRowIndeces_u_u\"] = self.csrRowIndeces[(0, 0)]\n argsDict[\"csrColumnOffsets_u_u\"] = self.csrColumnOffsets[(0, 0)]\n argsDict[\"globalJacobian\"] = globalJacobian\n argsDict[\"nExteriorElementBoundaries_global\"] = self.mesh.nExteriorElementBoundaries_global\n argsDict[\"exteriorElementBoundariesArray\"] = self.mesh.exteriorElementBoundariesArray\n argsDict[\"elementBoundaryElementsArray\"] = self.mesh.elementBoundaryElementsArray\n argsDict[\"elementBoundaryLocalElementBoundariesArray\"] = self.mesh.elementBoundaryLocalElementBoundariesArray\n argsDict[\"ebqe_velocity_ext\"] = self.coefficients.ebqe_v\n argsDict[\"isDOFBoundary_u\"] = self.numericalFlux.isDOFBoundary[0]\n argsDict[\"ebqe_rd_u_ext\"] = self.coefficients.rdModel_ebqe\n argsDict[\"ebqe_bc_u_ext\"] = self.numericalFlux.ebqe[('u', 0)]\n argsDict[\"csrColumnOffsets_eb_u_u\"] = self.csrColumnOffsets_eb[(0, 0)]\n argsDict[\"EXPLICIT_METHOD\"] = self.coefficients.EXPLICIT_METHOD\n argsDict[\"PURE_BDF\"] = self.coefficients.PURE_BDF\n self.ncls3p.calculateJacobian(argsDict)\n\n # Load the Dirichlet conditions directly into residual\n if self.forceStrongConditions:\n scaling = 1.0 # probably want to add some scaling to match non-dirichlet diagonals in linear system\n for dofN in list(self.dirichletConditionsForceDOF.DOFBoundaryConditionsDict.keys()):\n global_dofN = dofN\n for i in range(\n self.rowptr[global_dofN],\n self.rowptr[\n global_dofN + 1]):\n if (self.colind[i] == global_dofN):\n # print \"RBLES forcing residual cj = %s dofN= %s\n # global_dofN= %s was self.nzval[i]= %s now =%s \" %\n # (cj,dofN,global_dofN,self.nzval[i],scaling)\n self.nzval[i] = scaling\n else:\n self.nzval[i] = 0.0\n # print \"RBLES zeroing residual cj = %s dofN= %s\n # global_dofN= %s \" % (cj,dofN,global_dofN)\n\n log(\"Jacobian \", level=10, data=jacobian)\n # mwf decide if this is reasonable for solver statistics\n self.nonlinear_function_jacobian_evaluations += 1\n return jacobian\n\n def calculateElementQuadrature(self):\n \"\"\"\n Calculate the physical location and weights of the quadrature rules\n and the shape information at the quadrature points.\n\n This function should be called only when the mesh changes.\n \"\"\"\n self.u[0].femSpace.elementMaps.getValues(self.elementQuadraturePoints,\n self.q['x'])\n self.u[0].femSpace.elementMaps.getBasisValuesRef(\n self.elementQuadraturePoints)\n self.u[0].femSpace.elementMaps.getBasisGradientValuesRef(\n self.elementQuadraturePoints)\n self.u[0].femSpace.getBasisValuesRef(self.elementQuadraturePoints)\n self.u[0].femSpace.getBasisGradientValuesRef(\n self.elementQuadraturePoints)\n self.coefficients.initializeElementQuadrature(\n self.timeIntegration.t, self.q)\n if self.stabilization is not None:\n self.stabilization.initializeElementQuadrature(\n self.mesh, self.timeIntegration.t, self.q)\n self.stabilization.initializeTimeIntegration(self.timeIntegration)\n if self.shockCapturing is not None:\n self.shockCapturing.initializeElementQuadrature(\n self.mesh, self.timeIntegration.t, self.q)\n\n def calculateElementBoundaryQuadrature(self):\n pass\n\n def calculateExteriorElementBoundaryQuadrature(self):\n \"\"\"\n Calculate the physical location and weights of the quadrature rules\n and the shape information at the quadrature points on global element boundaries.\n\n This function should be called only when the mesh changes.\n \"\"\"\n #\n # get physical locations of element boundary quadrature points\n #\n # assume all components live on the same mesh\n self.u[0].femSpace.elementMaps.getBasisValuesTraceRef(\n self.elementBoundaryQuadraturePoints)\n self.u[0].femSpace.elementMaps.getBasisGradientValuesTraceRef(\n self.elementBoundaryQuadraturePoints)\n self.u[0].femSpace.getBasisValuesTraceRef(\n self.elementBoundaryQuadraturePoints)\n self.u[0].femSpace.getBasisGradientValuesTraceRef(\n self.elementBoundaryQuadraturePoints)\n self.u[0].femSpace.elementMaps.getValuesGlobalExteriorTrace(\n self.elementBoundaryQuadraturePoints, self.ebqe['x'])\n self.fluxBoundaryConditionsObjectsDict = dict([(cj, FluxBoundaryConditions(self.mesh,\n self.nElementBoundaryQuadraturePoints_elementBoundary,\n self.ebqe[('x')],\n self.advectiveFluxBoundaryConditionsSetterDict[cj],\n self.diffusiveFluxBoundaryConditionsSetterDictDict[cj]))\n for cj in list(self.advectiveFluxBoundaryConditionsSetterDict.keys())])\n self.coefficients.initializeGlobalExteriorElementBoundaryQuadrature(\n self.timeIntegration.t, self.ebqe)\n\n def estimate_mt(self):\n pass\n\n def calculateSolutionAtQuadrature(self):\n pass\n\n def calculateAuxiliaryQuantitiesAfterStep(self):\n pass\n\n def computeWaterline(self, t):\n self.waterline_calls += 1\n if self.coefficients.waterline_interval > 0 and self.waterline_calls % self.coefficients.waterline_interval == 0:\n self.waterline_npoints = np.zeros((1,), 'i')\n self.waterline_data = np.zeros(\n (self.mesh.nExteriorElementBoundaries_global, self.nSpace_global), 'd')\n argsDict = cArgumentsDict.ArgumentsDict()\n argsDict[\"wlc\"] = self.waterline_npoints\n argsDict[\"waterline\"] = self.waterline_data\n argsDict[\"mesh_trial_ref\"] = self.u[0].femSpace.elementMaps.psi\n argsDict[\"mesh_grad_trial_ref\"] = self.u[0].femSpace.elementMaps.grad_psi\n argsDict[\"mesh_dof\"] = self.mesh.nodeArray\n argsDict[\"mesh_velocity_dof\"] = self.mesh.nodeVelocityArray\n argsDict[\"MOVING_DOMAIN\"] = self.MOVING_DOMAIN\n argsDict[\"mesh_l2g\"] = self.mesh.elementNodesArray\n argsDict[\"dV_ref\"] = self.elementQuadratureWeights[('u', 0)]\n argsDict[\"u_trial_ref\"] = self.u[0].femSpace.psi\n argsDict[\"u_grad_trial_ref\"] = self.u[0].femSpace.grad_psi\n argsDict[\"u_test_ref\"] = self.u[0].femSpace.psi\n argsDict[\"u_grad_test_ref\"] = self.u[0].femSpace.grad_psi\n argsDict[\"mesh_trial_trace_ref\"] = self.u[0].femSpace.elementMaps.psi_trace\n argsDict[\"mesh_grad_trial_trace_ref\"] = self.u[0].femSpace.elementMaps.grad_psi_trace\n argsDict[\"dS_ref\"] = self.elementBoundaryQuadratureWeights[('u', 0)]\n argsDict[\"u_trial_trace_ref\"] = self.u[0].femSpace.psi_trace\n argsDict[\"u_grad_trial_trace_ref\"] = self.u[0].femSpace.grad_psi_trace\n argsDict[\"u_test_trace_ref\"] = self.u[0].femSpace.psi_trace\n argsDict[\"u_grad_test_trace_ref\"] = self.u[0].femSpace.grad_psi_trace\n argsDict[\"normal_ref\"] = self.u[0].femSpace.elementMaps.boundaryNormals\n argsDict[\"boundaryJac_ref\"] = self.u[0].femSpace.elementMaps.boundaryJacobians\n argsDict[\"nElements_global\"] = self.mesh.nElements_global\n argsDict[\"useMetrics\"] = self.coefficients.useMetrics\n argsDict[\"alphaBDF\"] = self.timeIntegration.alpha_bdf\n argsDict[\"lag_shockCapturing\"] = self.shockCapturing.lag\n argsDict[\"shockCapturingDiffusion\"] = self.shockCapturing.shockCapturingFactor\n argsDict[\"sc_uref\"] = self.coefficients.sc_uref\n argsDict[\"sc_alpha\"] = self.coefficients.sc_beta\n argsDict[\"u_l2g\"] = self.u[0].femSpace.dofMap.l2g\n argsDict[\"elementDiameter\"] = self.mesh.elementDiametersArray\n argsDict[\"u_dof\"] = self.u[0].dof\n argsDict[\"u_dof_old\"] = self.u_dof_old\n argsDict[\"velocity\"] = self.coefficients.q_v\n argsDict[\"q_m\"] = self.timeIntegration.m_tmp[0]\n argsDict[\"q_u\"] = self.q[('u', 0)]\n argsDict[\"q_n\"] = self.q[('grad(u)', 0)]\n argsDict[\"q_dH\"] = self.q[('dH_sge', 0, 0)]\n argsDict[\"q_m_betaBDF\"] = self.timeIntegration.beta_bdf[0]\n argsDict[\"cfl\"] = self.q[('cfl', 0)]\n argsDict[\"q_numDiff_u\"] = self.shockCapturing.numDiff[0]\n argsDict[\"q_numDiff_u_last\"] = self.shockCapturing.numDiff_last[0]\n argsDict[\"offset_u\"] = self.offset[0]\n argsDict[\"stride_u\"] = self.stride[0]\n argsDict[\"nExteriorElementBoundaries_global\"] = self.mesh.nExteriorElementBoundaries_global\n argsDict[\"exteriorElementBoundariesArray\"] = self.mesh.exteriorElementBoundariesArray\n argsDict[\"elementBoundaryElementsArray\"] = self.mesh.elementBoundaryElementsArray\n argsDict[\"elementBoundaryLocalElementBoundariesArray\"] = self.mesh.elementBoundaryLocalElementBoundariesArray\n argsDict[\"elementBoundaryMaterialTypes\"] = self.mesh.elementBoundaryMaterialTypes\n argsDict[\"ebqe_velocity_ext\"] = self.coefficients.ebqe_v\n argsDict[\"isDOFBoundary_u\"] = self.numericalFlux.isDOFBoundary[0]\n argsDict[\"ebqe_bc_u_ext\"] = self.numericalFlux.ebqe[('u', 0)]\n argsDict[\"ebqe_u\"] = self.ebqe[('u', 0)]\n self.ncls3p.calculateWaterline(argsDict)\n from proteus import Comm\n comm = Comm.get()\n filename = os.path.join(self.coefficients.opts.dataDir,\n \"waterline.\" + str(comm.rank()) + \".\" + str(self.waterline_prints))\n np.save(\n filename, self.waterline_data[\n 0:self.waterline_npoints[0]])\n self.waterline_prints += 1\n\n def updateAfterMeshMotion(self):\n pass\n"
] |
[
[
"numpy.savetxt",
"numpy.linalg.svd"
],
[
"numpy.cosh",
"numpy.piecewise",
"numpy.tanh",
"numpy.sqrt",
"numpy.abs",
"numpy.select",
"numpy.maximum"
],
[
"numpy.copy",
"numpy.ones",
"numpy.save",
"numpy.zeros"
]
] |
asellappen/bokeh
|
[
"e003b82b18c8ee7fb36f23c5f877e5e16b792827"
] |
[
"tests/unit/bokeh/core/test_properties.py"
] |
[
"#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport pytest ; pytest\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# External imports\nimport numpy as np\n\n# Bokeh imports\nfrom bokeh._testing.util.api import verify_all\nfrom bokeh.core.has_props import HasProps\nfrom bokeh.core.properties import (\n Dict,\n Enum,\n Float,\n Instance,\n Int,\n List,\n NumberSpec,\n Override,\n String,\n)\nfrom bokeh.models import Plot\n\n# Module under test\nimport bokeh.core.properties as bcp # isort:skip\n\n#-----------------------------------------------------------------------------\n# Setup\n#-----------------------------------------------------------------------------\n\nALL = (\n 'Alpha',\n 'AlphaSpec',\n 'Angle',\n 'AngleSpec',\n 'Any',\n 'AnyRef',\n 'Array',\n 'Auto',\n 'Base64String',\n 'Bool',\n 'Byte',\n 'Color',\n 'ColorHex',\n 'ColorSpec',\n 'ColumnData',\n 'Complex',\n 'DashPattern',\n 'DataDistanceSpec',\n 'DataSpec',\n 'Date',\n 'Datetime',\n 'Dict',\n 'DistanceSpec',\n 'Either',\n 'Enum',\n 'Float',\n 'FontSize',\n 'FontSizeSpec',\n 'HatchPatternSpec',\n 'HatchPatternType',\n 'Image',\n 'Include',\n 'Instance',\n 'Int',\n 'Interval',\n 'JSON',\n 'List',\n 'MarkerSpec',\n 'MarkerType',\n 'MinMaxBounds',\n 'NonNegativeInt',\n 'NumberSpec',\n 'Override',\n 'PandasDataFrame',\n 'PandasGroupBy',\n 'Percent',\n 'PositiveInt',\n 'PropertyUnitsSpec',\n 'RGB',\n 'Regex',\n 'RelativeDelta',\n 'RestrictedDict',\n 'ScreenDistanceSpec',\n 'Seq',\n 'Size',\n 'String',\n 'StringSpec',\n 'Struct',\n 'TimeDelta',\n 'Tuple',\n 'UnitsSpec',\n 'expr',\n 'field',\n 'validate',\n 'value',\n 'without_property_validation'\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#----------------------------------------------------------------------------\n\n# TODO (bev) These tests should be moved to better places\n\n\nclass Basictest:\n def test_simple_class(self) -> None:\n class Foo(HasProps):\n x = Int(12)\n y = String(\"hello\")\n z = List(Int, [1, 2, 3])\n zz = Dict(String, Int)\n s = String(None)\n\n f = Foo()\n assert f.x == 12\n assert f.y == \"hello\"\n assert np.array_equal(np.array([1, 2, 3]), f.z)\n assert f.s is None\n\n\n assert {\"x\", \"y\", \"z\", \"zz\", \"s\"} == f.properties()\n with_defaults = f.properties_with_values(include_defaults=True)\n assert dict(x=12, y=\"hello\", z=[1,2,3], zz={}, s=None) == with_defaults\n without_defaults = f.properties_with_values(include_defaults=False)\n assert dict() == without_defaults\n\n f.x = 18\n assert f.x == 18\n\n f.y = \"bar\"\n assert f.y == \"bar\"\n\n without_defaults = f.properties_with_values(include_defaults=False)\n assert dict(x=18, y=\"bar\") == without_defaults\n\n f.z[0] = 100\n\n without_defaults = f.properties_with_values(include_defaults=False)\n assert dict(x=18, y=\"bar\", z=[100,2,3]) == without_defaults\n\n f.zz = {'a': 10}\n\n without_defaults = f.properties_with_values(include_defaults=False)\n assert dict(x=18, y=\"bar\", z=[100,2,3], zz={'a': 10}) == without_defaults\n\n def test_enum(self) -> None:\n class Foo(HasProps):\n x = Enum(\"blue\", \"red\", \"green\") # the first item is the default\n y = Enum(\"small\", \"medium\", \"large\", default=\"large\")\n\n f = Foo()\n assert f.x == \"blue\"\n assert f.y == \"large\"\n\n f.x = \"red\"\n assert f.x == \"red\"\n\n with pytest.raises(ValueError):\n f.x = \"yellow\"\n\n f.y = \"small\"\n assert f.y == \"small\"\n\n with pytest.raises(ValueError):\n f.y = \"yellow\"\n\n def test_inheritance(self) -> None:\n class Base(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n class Child(Base):\n z = Float(3.14)\n\n c = Child()\n assert frozenset(['x', 'y', 'z']) == frozenset(c.properties())\n assert c.y == \"hello\"\n\n def test_set(self) -> None:\n class Foo(HasProps):\n x = Int(12)\n y = Enum(\"red\", \"blue\", \"green\")\n z = String(\"blah\")\n\n f = Foo()\n assert f.x == 12\n assert f.y == \"red\"\n assert f.z == \"blah\"\n f.update(**dict(x=20, y=\"green\", z=\"hello\"))\n assert f.x == 20\n assert f.y == \"green\"\n assert f.z == \"hello\"\n with pytest.raises(ValueError):\n f.update(y=\"orange\")\n\n def test_no_parens(self) -> None:\n class Foo(HasProps):\n x = Int\n y = Int()\n f = Foo()\n assert f.x == f.y\n f.x = 13\n assert f.x == 13\n\n def test_accurate_properties_sets(self) -> None:\n class Base(HasProps):\n num = Int(12)\n container = List(String)\n child = Instance(HasProps)\n\n class Mixin(HasProps):\n mixin_num = Int(12)\n mixin_container = List(String)\n mixin_child = Instance(HasProps)\n\n class Sub(Base, Mixin):\n sub_num = Int(12)\n sub_container = List(String)\n sub_child = Instance(HasProps)\n\n b = Base()\n assert {\"child\"} == b.properties_with_refs()\n assert {\"container\"} == b.properties_containers()\n assert {\"num\", \"container\", \"child\"} == b.properties()\n assert {\"num\", \"container\", \"child\"} == b.properties(with_bases=True)\n assert {\"num\", \"container\", \"child\"} == b.properties(with_bases=False)\n\n m = Mixin()\n assert m.properties_with_refs() == {\"mixin_child\"}\n assert m.properties_containers() == {\"mixin_container\"}\n assert m.properties() == {\"mixin_num\", \"mixin_container\", \"mixin_child\"}\n assert m.properties(with_bases=True) == {\"mixin_num\", \"mixin_container\", \"mixin_child\"}\n assert m.properties(with_bases=False) == {\"mixin_num\", \"mixin_container\", \"mixin_child\"}\n\n s = Sub()\n assert s.properties_with_refs() == {\"child\", \"sub_child\", \"mixin_child\"}\n assert s.properties_containers() == {\"container\", \"sub_container\", \"mixin_container\"}\n assert s.properties() == \\\n {\"num\", \"container\", \"child\", \"mixin_num\", \"mixin_container\", \"mixin_child\", \"sub_num\", \"sub_container\", \"sub_child\"}\n assert s.properties(with_bases=True) == \\\n {\"num\", \"container\", \"child\", \"mixin_num\", \"mixin_container\", \"mixin_child\", \"sub_num\", \"sub_container\", \"sub_child\"}\n assert s.properties(with_bases=False) == {\"sub_num\", \"sub_container\", \"sub_child\"}\n\n # verify caching\n assert s.properties_with_refs() is s.properties_with_refs()\n assert s.properties_containers() is s.properties_containers()\n assert s.properties() is s.properties()\n assert s.properties(with_bases=True) is s.properties(with_bases=True)\n # this one isn't cached because we store it as a list __properties__ and wrap it\n # in a new set every time\n #assert s.properties(with_bases=False) is s.properties(with_bases=False)\n\n def test_accurate_dataspecs(self) -> None:\n class Base(HasProps):\n num = NumberSpec(12)\n not_a_dataspec = Float(10)\n\n class Mixin(HasProps):\n mixin_num = NumberSpec(14)\n\n class Sub(Base, Mixin):\n sub_num = NumberSpec(16)\n\n base = Base()\n mixin = Mixin()\n sub = Sub()\n\n assert {\"num\"} == base.dataspecs()\n assert {\"mixin_num\"} == mixin.dataspecs()\n assert {\"num\", \"mixin_num\", \"sub_num\"} == sub.dataspecs()\n\n assert dict(num=base.lookup(\"num\")) == base.dataspecs_with_props()\n assert dict(mixin_num=mixin.lookup(\"mixin_num\")) == mixin.dataspecs_with_props()\n assert dict(num=sub.lookup(\"num\"), mixin_num=sub.lookup(\"mixin_num\"), sub_num=sub.lookup(\"sub_num\")) == sub.dataspecs_with_props()\n\n def test_not_serialized(self) -> None:\n class NotSerialized(HasProps):\n x = Int(12, serialized=False)\n y = String(\"hello\")\n\n o = NotSerialized()\n assert o.x == 12\n assert o.y == 'hello'\n\n # non-serialized props are still in the list of props\n assert 'x' in o.properties()\n assert 'y' in o.properties()\n\n # but they aren't in the dict of props with values, since their\n # values are not important (already included in other values,\n # as with the _units properties)\n assert 'x' not in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n\n o.x = 42\n o.y = 'world'\n\n assert 'x' not in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' in o.properties_with_values(include_defaults=False)\n\n def test_readonly(self) -> None:\n class Readonly(HasProps):\n x = Int(12, readonly=True) # with default\n y = Int(readonly=True) # without default\n z = String(\"hello\")\n\n o = Readonly()\n assert o.x == 12\n assert o.y == None\n assert o.z == 'hello'\n\n # readonly props are still in the list of props\n assert 'x' in o.properties()\n assert 'y' in o.properties()\n assert 'z' in o.properties()\n\n # but they aren't in the dict of props with values\n assert 'x' not in o.properties_with_values(include_defaults=True)\n assert 'y' not in o.properties_with_values(include_defaults=True)\n assert 'z' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n assert 'z' not in o.properties_with_values(include_defaults=False)\n\n with pytest.raises(RuntimeError):\n o.x = 7\n with pytest.raises(RuntimeError):\n o.y = 7\n o.z = \"xyz\"\n\n assert o.x == 12\n assert o.y == None\n assert o.z == 'xyz'\n\n def test_include_defaults(self) -> None:\n class IncludeDefaultsTest(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n o = IncludeDefaultsTest()\n assert o.x == 12\n assert o.y == 'hello'\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n\n o.x = 42\n o.y = 'world'\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' in o.properties_with_values(include_defaults=False)\n assert 'y' in o.properties_with_values(include_defaults=False)\n\n def test_include_defaults_with_kwargs(self) -> None:\n class IncludeDefaultsKwargsTest(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n o = IncludeDefaultsKwargsTest(x=14, y=\"world\")\n assert o.x == 14\n assert o.y == 'world'\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' in o.properties_with_values(include_defaults=False)\n assert 'y' in o.properties_with_values(include_defaults=False)\n\n def test_include_defaults_set_to_same(self) -> None:\n class IncludeDefaultsSetToSameTest(HasProps):\n x = Int(12)\n y = String(\"hello\")\n\n o = IncludeDefaultsSetToSameTest()\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n\n # this should no-op\n o.x = 12\n o.y = \"hello\"\n\n assert 'x' in o.properties_with_values(include_defaults=True)\n assert 'y' in o.properties_with_values(include_defaults=True)\n assert 'x' not in o.properties_with_values(include_defaults=False)\n assert 'y' not in o.properties_with_values(include_defaults=False)\n\n def test_override_defaults(self) -> None:\n class FooBase(HasProps):\n x = Int(12)\n\n class FooSub(FooBase):\n x = Override(default=14)\n\n def func_default():\n return 16\n\n class FooSubSub(FooBase):\n x = Override(default=func_default)\n\n f_base = FooBase()\n f_sub = FooSub()\n f_sub_sub = FooSubSub()\n\n assert f_base.x == 12\n assert f_sub.x == 14\n assert f_sub_sub.x == 16\n\n assert 12 == f_base.properties_with_values(include_defaults=True)['x']\n assert 14 == f_sub.properties_with_values(include_defaults=True)['x']\n assert 16 == f_sub_sub.properties_with_values(include_defaults=True)['x']\n\n assert 'x' not in f_base.properties_with_values(include_defaults=False)\n assert 'x' not in f_sub.properties_with_values(include_defaults=False)\n assert 'x' not in f_sub_sub.properties_with_values(include_defaults=False)\n\n # def test_kwargs_init(self) -> None:\n # class Foo(HasProps):\n # x = String\n # y = Int\n # z = Float\n # f = Foo(x = \"hello\", y = 14)\n # assert f.x == \"hello\"\n # assert f.y == 14\n\n # with pytest.raises(TypeError):\n # # This should raise a TypeError: object.__init__() takes no parameters\n # g = Foo(z = 3.14, q = \"blah\")\n\nclass Foo(HasProps):\n pass\n\nclass Bar(HasProps):\n pass\n\nclass Baz(HasProps):\n pass\n\ndef test_HasProps_equals() -> None:\n class Foo(HasProps):\n x = Int(12)\n y = String(\"hello\")\n z = List(Int, [1,2,3])\n\n class FooUnrelated(HasProps):\n x = Int(12)\n y = String(\"hello\")\n z = List(Int, [1,2,3])\n\n v = Foo().equals(Foo())\n assert v is True\n\n v = Foo(x=1).equals(Foo(x=1))\n assert v is True\n\n v = Foo(x=1).equals(Foo(x=2))\n assert v is False\n\n v = Foo(x=1).equals(1)\n assert v is False\n\n v = Foo().equals(FooUnrelated())\n assert v is False\n\ndef test_HasProps_clone() -> None:\n p1 = Plot(plot_width=1000)\n c1 = p1.properties_with_values(include_defaults=False)\n p2 = p1._clone()\n c2 = p2.properties_with_values(include_defaults=False)\n assert c1 == c2\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\nTest___all__ = verify_all(bcp, ALL)\n"
] |
[
[
"numpy.array"
]
] |
edward-io/captum
|
[
"8f959950baaad00f2f9a3404d583b9f9292e35c7"
] |
[
"captum/_utils/models/linear_model/train.py"
] |
[
"import time\nfrom typing import Any, Callable, Dict, List, Optional\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\nfrom captum._utils.models.linear_model.model import LinearModel\n\n\ndef l2_loss(x1, x2, weights=None):\n if weights is None:\n return torch.mean((x1 - x2) ** 2) / 2.0\n else:\n return torch.sum((weights / weights.norm(p=1)) * ((x1 - x2) ** 2)) / 2.0\n\n\ndef sgd_train_linear_model(\n model: LinearModel,\n dataloader: DataLoader,\n construct_kwargs: Dict[str, Any],\n max_epoch: int = 100,\n reduce_lr: bool = True,\n initial_lr: float = 0.01,\n alpha: float = 1.0,\n loss_fn: Callable = l2_loss,\n reg_term: Optional[int] = 1,\n patience: int = 10,\n threshold: float = 1e-4,\n running_loss_window: Optional[int] = None,\n device: Optional[str] = None,\n init_scheme: str = \"zeros\",\n debug: bool = False,\n) -> Dict[str, float]:\n r\"\"\"\n Trains a linear model with SGD. This will continue to iterate your\n dataloader until we converged to a solution or alternatively until we have\n exhausted `max_epoch`.\n\n Convergence is defined by the loss not changing by `threshold` amount for\n `patience` number of iterations.\n\n Args:\n model\n The model to train\n dataloader\n The data to train it with. We will assume the dataloader produces\n either pairs or triples of the form (x, y) or (x, y, w). Where x and\n y are typical pairs for supervised learning and w is a weight\n vector.\n\n We will call `model._construct_model_params` with construct_kwargs\n and the input features set to `x.shape[1]` (`x.shape[0]` corresponds\n to the batch size). We assume that `len(x.shape) == 2`, i.e. the\n tensor is flat. The number of output features will be set to\n y.shape[1] or 1 (if `len(y.shape) == 1`); we require `len(y.shape)\n <= 2`.\n max_epoch\n The maximum number of epochs to exhaust\n reduce_lr\n Whether or not to reduce the learning rate as iterations progress.\n Halves the learning rate when the training loss does not move. This\n uses torch.optim.lr_scheduler.ReduceLROnPlateau and uses the\n parameters `patience` and `threshold`\n initial_lr\n The initial learning rate to use.\n alpha\n A constant for the regularization term.\n loss_fn\n The loss to optimise for. This must accept three parameters:\n x1 (predicted), x2 (labels) and a weight vector\n reg_term\n Regularization is defined by the `reg_term` norm of the weights.\n Please use `None` if you do not wish to use regularization.\n patience\n Defines the number of iterations in a row the loss must remain\n within `threshold` in order to be classified as converged.\n threshold\n Threshold for convergence detection.\n running_loss_window\n Used to report the training loss once we have finished training and\n to determine when we have converged (along with reducing the\n learning rate).\n\n The reported training loss will take the last `running_loss_window`\n iterations and average them.\n\n If `None` we will approximate this to be the number of examples in\n an epoch.\n init_scheme\n Initialization to use prior to training the linear model.\n device\n The device to send the model and data to. If None then no `.to` call\n will be used.\n debug\n Whether to print the loss, learning rate per iteration\n\n Returns\n This will return the final training loss (averaged with\n `running_loss_window`)\n \"\"\"\n\n loss_window: List[torch.Tensor] = []\n min_avg_loss = None\n convergence_counter = 0\n converged = False\n\n def get_point(datapoint):\n if len(datapoint) == 2:\n x, y = datapoint\n w = None\n else:\n x, y, w = datapoint\n\n if device is not None:\n x = x.to(device)\n y = y.to(device)\n if w is not None:\n w = w.to(device)\n\n return x, y, w\n\n # get a point and construct the model\n data_iter = iter(dataloader)\n x, y, w = get_point(next(data_iter))\n\n model._construct_model_params(\n in_features=x.shape[1],\n out_features=y.shape[1] if len(y.shape) == 2 else 1,\n **construct_kwargs,\n )\n model.train()\n\n assert model.linear is not None\n\n if init_scheme is not None:\n assert init_scheme in [\"xavier\", \"zeros\"]\n\n with torch.no_grad():\n if init_scheme == \"xavier\":\n torch.nn.init.xavier_uniform_(model.linear.weight)\n else:\n model.linear.weight.zero_()\n\n if model.linear.bias is not None:\n model.linear.bias.zero_()\n\n optim = torch.optim.SGD(model.parameters(), lr=initial_lr)\n if reduce_lr:\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optim, factor=0.5, patience=patience, threshold=threshold\n )\n\n t1 = time.time()\n epoch = 0\n i = 0\n while epoch < max_epoch:\n while True: # for x, y, w in dataloader\n if running_loss_window is None:\n running_loss_window = x.shape[0] * len(dataloader)\n\n y = y.view(x.shape[0], -1)\n if w is not None:\n w = w.view(x.shape[0], -1)\n\n i += 1\n\n out = model(x)\n\n loss = loss_fn(y, out, w)\n if reg_term is not None:\n reg = torch.norm(model.linear.weight, p=reg_term)\n loss += reg.sum() * alpha\n\n if len(loss_window) >= running_loss_window:\n loss_window = loss_window[1:]\n loss_window.append(loss.clone().detach())\n assert len(loss_window) <= running_loss_window\n\n average_loss = torch.mean(torch.stack(loss_window))\n if min_avg_loss is not None:\n # if we haven't improved by at least `threshold`\n if average_loss > min_avg_loss or torch.isclose(\n min_avg_loss, average_loss, atol=threshold\n ):\n convergence_counter += 1\n if convergence_counter >= patience:\n converged = True\n break\n else:\n convergence_counter = 0\n if min_avg_loss is None or min_avg_loss >= average_loss:\n min_avg_loss = average_loss.clone()\n\n if debug:\n print(\n f\"lr={optim.param_groups[0]['lr']}, Loss={loss},\"\n + \"Aloss={average_loss}, min_avg_loss={min_avg_loss}\"\n )\n\n loss.backward()\n\n optim.step()\n model.zero_grad()\n if scheduler:\n scheduler.step(average_loss)\n\n temp = next(data_iter, None)\n if temp is None:\n break\n x, y, w = get_point(temp)\n\n if converged:\n break\n\n epoch += 1\n data_iter = iter(dataloader)\n x, y, w = get_point(next(data_iter))\n\n t2 = time.time()\n return {\n \"train_time\": t2 - t1,\n \"train_loss\": torch.mean(torch.stack(loss_window)).item(),\n \"train_iter\": i,\n \"train_epoch\": epoch,\n }\n\n\nclass NormLayer(nn.Module):\n def __init__(self, mean, std, n=None, eps=1e-8) -> None:\n super().__init__()\n self.mean = mean\n self.std = std\n self.eps = eps\n\n def forward(self, x):\n return (x - self.mean) / (self.std + self.eps)\n\n\ndef sklearn_train_linear_model(\n model: LinearModel,\n dataloader: DataLoader,\n construct_kwargs: Dict[str, Any],\n sklearn_trainer: str = \"Lasso\",\n norm_input: bool = False,\n **fit_kwargs,\n):\n r\"\"\"\n Alternative method to train with sklearn. This does introduce some slight\n overhead as we convert the tensors to numpy and then convert the resulting\n trained model to a `LinearModel` object. However, this conversion\n should be negligible.\n\n Please note that this assumes:\n\n 0. You have sklearn and numpy installed\n 1. The dataset can fit into memory\n\n Args\n model\n The model to train.\n dataloader\n The data to use. This will be exhausted and converted to numpy\n arrays. Therefore please do not feed an infinite dataloader.\n norm_input\n Whether or not to normalize the input\n sklearn_trainer\n The sklearn model to use to train the model. Please refer to\n sklearn.linear_model for a list of modules to use.\n construct_kwargs\n Additional arguments provided to the `sklearn_trainer` constructor\n fit_kwargs\n Other arguments to send to `sklearn_trainer`'s `.fit` method\n \"\"\"\n from functools import reduce\n\n try:\n import numpy as np\n except ImportError:\n raise ValueError(\"numpy is not available. Please install numpy.\")\n\n try:\n import sklearn\n import sklearn.linear_model\n import sklearn.svm\n except ImportError:\n raise ValueError(\"sklearn is not available. Please install sklearn >= 0.23\")\n\n assert (\n sklearn.__version__ >= \"0.23.0\"\n ), \"Must have sklearn version 0.23.0 or higher to use \"\n \"sample_weight in Lasso regression.\"\n\n num_batches = 0\n xs, ys, ws = [], [], []\n for data in dataloader:\n if len(data) == 3:\n x, y, w = data\n else:\n assert len(data) == 2\n x, y = data\n w = None\n\n xs.append(x.cpu().numpy())\n ys.append(y.cpu().numpy())\n if w is not None:\n ws.append(w.cpu().numpy())\n num_batches += 1\n\n x = np.concatenate(xs, axis=0)\n y = np.concatenate(ys, axis=0)\n if len(ws) > 0:\n w = np.concatenate(ws, axis=0)\n else:\n w = None\n\n if norm_input:\n mean, std = x.mean(0), x.std(0)\n x -= mean\n x /= std\n\n t1 = time.time()\n sklearn_model = reduce(\n lambda val, el: getattr(val, el), [sklearn] + sklearn_trainer.split(\".\")\n )(**construct_kwargs)\n sklearn_model.fit(x, y, sample_weight=w, **fit_kwargs)\n t2 = time.time()\n\n # Convert weights to pytorch\n num_outputs = 1 if len(y.shape) == 1 else y.shape[1]\n weight_values = torch.FloatTensor(sklearn_model.coef_) # type: ignore\n bias_values = torch.FloatTensor([sklearn_model.intercept_]) # type: ignore\n model._construct_model_params(\n norm_type=None,\n weight_values=weight_values.view(num_outputs, -1),\n bias_value=bias_values.squeeze().unsqueeze(0),\n )\n\n if norm_input:\n model.norm = NormLayer(mean, std)\n\n return {\"train_time\": t2 - t1}\n"
] |
[
[
"numpy.concatenate",
"torch.stack",
"torch.norm",
"torch.FloatTensor",
"torch.no_grad",
"torch.nn.init.xavier_uniform_",
"torch.isclose",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.mean"
]
] |
ricardoprins/strawberryfields
|
[
"739ad44950624434375a24693a060e8a5e74c951"
] |
[
"strawberryfields/circuitspecs/xunitary.py"
] |
[
"# Copyright 2019-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Circuit class specification for the X class of circuits.\"\"\"\n\nimport copy\n\nimport numpy as np\nfrom thewalrus.symplectic import expand\n\nfrom strawberryfields.program_utils import CircuitError, Command, group_operations\n\nimport strawberryfields.ops as ops\n\nfrom .circuit_specs import CircuitSpecs, Ranges\nfrom .gbs import GBSSpecs\nfrom .gaussian_unitary import GaussianUnitary\n\n\nclass Xunitary(CircuitSpecs):\n \"\"\"Circuit specifications for the X class of circuits.\"\"\"\n\n short_name = \"Xunitary\"\n modes = None\n remote = True\n local = True\n interactive = False\n allowed_sq_ranges = Ranges([0], [1.0], variable_name=\"r\")\n sq_amplitude = 1.0\n\n primitives = {\n \"S2gate\",\n \"Sgate\",\n \"MeasureFock\",\n \"Rgate\",\n \"BSgate\",\n \"MZgate\",\n \"Interferometer\",\n }\n\n decompositions = {\n \"BipartiteGraphEmbed\": {\"mesh\": \"rectangular_symmetric\", \"drop_identity\": False,},\n }\n\n def compile(self, seq, registers):\n # the number of modes in the provided program\n n_modes = len(registers)\n\n # Number of modes must be even\n if n_modes % 2 != 0:\n raise CircuitError(\"The X series only supports programs with an even number of modes.\")\n half_n_modes = n_modes // 2\n # Call the GBS compiler to do basic measurement validation.\n # The GBS compiler also merges multiple measurement commands\n # into a single MeasureFock command at the end of the circuit.\n seq = GBSSpecs().compile(seq, registers)\n\n # ensure that all modes are measured\n if len(seq[-1].reg) != n_modes:\n raise CircuitError(\"All modes must be measured.\")\n\n # Check circuit begins with two-mode squeezers\n # --------------------------------------------\n A, B, C = group_operations(seq, lambda x: isinstance(x, ops.S2gate))\n # If there are no two-mode squeezers add squeezers at the beginning with squeezing param equal to zero.\n if B == []:\n initS2 = [\n Command(ops.S2gate(0, 0), [registers[i], registers[i + half_n_modes]])\n for i in range(half_n_modes)\n ]\n seq = initS2 + seq\n A, B, C = group_operations(seq, lambda x: isinstance(x, ops.S2gate))\n\n if A != []:\n raise CircuitError(\"There can be no operations before the S2gates.\")\n\n regrefs = set()\n\n if B:\n # get set of circuit registers as a tuple for each S2gate\n regrefs = {(cmd.reg[0].ind, cmd.reg[1].ind) for cmd in B}\n\n # the set of allowed mode-tuples the S2gates must have\n allowed_modes = set(zip(range(0, half_n_modes), range(half_n_modes, n_modes)))\n\n if not regrefs.issubset(allowed_modes):\n raise CircuitError(\"S2gates do not appear on the correct modes.\")\n\n # determine which modes do not have input S2gates specified\n missing = allowed_modes - regrefs\n\n for i, j in missing:\n # insert S2gates with 0 squeezing\n B.insert(0, Command(ops.S2gate(0, 0), [registers[i], registers[j]]))\n\n sqs = [cmd.op.p[0] for cmd in B]\n\n # ensure provided S2gates all have the allowed squeezing values\n if not all(s in self.allowed_sq_ranges for s in sqs):\n wrong_sq_values = [np.round(s, 4) for s in sqs if s not in self.allowed_sq_ranges]\n raise CircuitError(\n \"Incorrect squeezing value(s) r={}. Allowed squeezing \"\n \"value(s) are {}.\".format(wrong_sq_values, self.allowed_sq_ranges)\n )\n # This could in principle be changed\n phases = [cmd.op.p[1] for cmd in B]\n if not np.allclose(phases, 0):\n raise CircuitError(\n \"Incorrect phase value(s) phi={}. Allowed squeezing \"\n \"value(s) are 0.0.\".format(phases)\n )\n\n meas_seq = [C[-1]]\n seq = GaussianUnitary().compile(C[:-1], registers)\n\n # extract the compiled symplectic matrix\n if seq == []:\n S = np.identity(2 * n_modes)\n used_modes = list(range(n_modes))\n else:\n S = seq[0].op.p[0]\n # determine the modes that are acted on by the symplectic transformation\n used_modes = [x.ind for x in seq[0].reg]\n\n if not np.allclose(S @ S.T, np.identity(len(S))):\n raise CircuitError(\n \"The operations after squeezing do not correspond to an interferometer.\"\n )\n\n if len(used_modes) != n_modes:\n # The symplectic transformation acts on a subset of\n # the programs registers. We must expand the symplectic\n # matrix to one that acts on all registers.\n # simply extract the computed symplectic matrix\n S = expand(seq[0].op.p[0], used_modes, n_modes)\n\n U = S[:n_modes, :n_modes] - 1j * S[:n_modes, n_modes:]\n U11 = U[:half_n_modes, :half_n_modes]\n U12 = U[:half_n_modes, half_n_modes:]\n U21 = U[half_n_modes:, :half_n_modes]\n U22 = U[half_n_modes:, half_n_modes:]\n if not np.allclose(U12, 0) or not np.allclose(U21, 0):\n # Not a bipartite graph\n raise CircuitError(\n \"The applied unitary cannot mix between the modes {}-{} and modes {}-{}.\".format(\n 0, half_n_modes - 1, half_n_modes, n_modes - 1\n )\n )\n\n if not np.allclose(U11, U22):\n # Not a symmetric bipartite graph\n raise CircuitError(\n \"The applied unitary on modes {}-{} must be identical to the applied unitary on modes {}-{}.\".format(\n 0, half_n_modes - 1, half_n_modes, n_modes - 1\n )\n )\n U1 = ops.Interferometer(U11, mesh=\"rectangular_symmetric\", drop_identity=False)._decompose(\n registers[:half_n_modes]\n )\n U2 = copy.deepcopy(U1)\n\n for Ui in U2:\n Ui.reg = [registers[r.ind + half_n_modes] for r in Ui.reg]\n\n return B + U1 + U2 + meas_seq\n"
] |
[
[
"numpy.identity",
"numpy.allclose",
"numpy.round"
]
] |
benoitmartin88/pytorchtrainer
|
[
"7d73acd0802e00c3589d28bce6c42a489dcd46ea"
] |
[
"pytorchtrainer/metric/mean_absolute_error.py"
] |
[
"import torch\n\nfrom . import Metric\n\n\nclass MeanAbsoluteError(Metric):\n def __init__(self):\n super().__init__(\"mae\", default_value=float('inf'))\n self._absolute_error_sum = 0.\n self._total = 0\n\n def step(self, y: torch.Tensor, y_pred: torch.Tensor):\n absolute_errors = torch.abs(y - y_pred)\n self._absolute_error_sum += torch.sum(absolute_errors).item()\n self._total += y.size(dim=0) # dim 0 should be batch size\n return torch.sum(absolute_errors)\n\n def compute(self):\n if self._total == 0:\n raise ZeroDivisionError(\"Mean absolute error is not computable.\")\n return self._absolute_error_sum / self._total\n\n def reset(self):\n self._absolute_error_sum = 0.\n self._total = 0\n"
] |
[
[
"torch.abs",
"torch.sum"
]
] |
janaSunrise/Fashion-MNIST-Tensorflow
|
[
"a31d23e8b06ab8b70235d692b458e5a7c14aa2f1"
] |
[
"src/train.py"
] |
[
"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow import keras\n\nprint(f\"Using tensorflow version: {tf.__version__}\")\n\n# Get the datasets\ndata = keras.datasets.fashion_mnist\n\n(train_images, train_labels), (test_images, test_labels) = data.load_data()\n\n# The class names for each of fashion category\nclass_names = [\n 'T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'\n]\n\n# Clean the data of the train, and test sets\ntrain_images = train_images / 255.0\n\ntest_images = test_images / 255.0\n\n# Visualize the clothing category.\nplt.figure(figsize=(10,10))\nfor i in range(25):\n plt.subplot(5,5,i+1)\n plt.xticks([])\n plt.yticks([])\n plt.grid(False)\n plt.imshow(train_images[i], cmap=plt.cm.binary)\n plt.xlabel(class_names[train_labels[i]])\nplt.show()\n\n# Create the keras model.\nmodel = keras.Sequential([\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(128, activation='relu'),\n keras.layers.Dense(10, activation=\"softmax\")\n])\n\n# Compile the model\nmodel.compile(optimizer='adam', loss=\"sparse_categorical_crossentropy\", metrics=['accuracy'])\n\n# Fit and train the model.\nmodel.fit(train_images, train_labels, epochs=15)\n\n# Test the model\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n\nprint(f\"Test Accuracy: {test_acc}\\nTest loss: {test_loss}\")\n\n# Save the model\n# model.save(\"model.h5\")\n"
] |
[
[
"tensorflow.keras.layers.Flatten",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplot"
]
] |
HochulHwang/gc_test
|
[
"c42e9ec3392bc02eef5e3943ec3bf79456e91bf9"
] |
[
"src/inference/train_val.py"
] |
[
"from utils.meter import *\nimport time\nimport torch\nimport ipdb\nimport sys\nimport torch.nn as nn\n# from utils.others import *\nimport matplotlib\nfrom utils.loss import *\n\n\ndef make_variable_all_input(dict_input, cuda=False):\n dict_input_var = {}\n for k, v in dict_input.items():\n var = torch.autograd.Variable(v)\n dict_input_var[k] = var.cuda() if cuda else var\n return dict_input_var\n\n\ndef forward_backward(model, options, input_var, criterion, optimizer=None, cuda=False):\n # compute output\n output, pose, attention_points = model(input_var) # (B, 60) - (B, T, 100)\n\n # pose L2 loss\n loss_pose = pose_l2_loss(input_var['skeleton'], pose)\n\n # activity loss\n loss_activity = criterion(output, input_var['target'])\n\n # attraction loss\n if options['glimpse_clouds']:\n loss_attraction = loss_regularizer_glimpses(input_var['skeleton'], attention_points)\n else:\n loss_attraction = init_loss()\n\n # Full loss\n # ipdb.set_trace()\n # print('pose: ', loss_pose, 'activity: ', loss_activity, 'attraction: ', loss_attraction)\n loss = 0.1 * loss_pose + loss_activity + loss_attraction\n\n # backward if needed\n if optimizer is not None:\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n return output, loss\n\n\ndef update_metric_loss(input, output, metric, loss, losses):\n # loss\n losses.update(loss.detach(), input['clip'].size(0))\n\n # metrics\n target = input['target']\n\n target = target.cpu()\n preds = output.view(-1, output.size(-1)).data.cpu()\n\n list_idx_correct_preds = metric.add(preds, target)\n\n metric_val, metric_avg, _ = metric.value()\n\n return metric_val, metric_avg, list_idx_correct_preds\n\n\ndef train(train_loader, model, criterion, optimizer, metric, epoch, options, cuda=False, print_freq=1):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n metric.reset()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n print(\"\")\n for i, input in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n # Make Variables\n input_var = make_variable_all_input(input, cuda=cuda)\n\n # compute output\n output, loss = forward_backward(model,\n options,\n input_var,\n criterion,\n optimizer,\n cuda)\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % print_freq == 0:\n # measure accuracy and record loss\n metric_val, metric_avg, *_ = update_metric_loss(input, output, metric, loss, losses)\n\n time_done = get_time_to_print(batch_time.avg * (i + 1))\n time_remaining = get_time_to_print(batch_time.avg * len(train_loader))\n\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) [{done} => {remaining}]\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Metric {metric_val:.3f} ({metric_avg:.3f})'.format(\n epoch, i + 1, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, metric_val=metric_val,\n metric_avg=metric_avg,\n done=time_done, remaining=time_remaining)\n )\n sys.stdout.flush()\n\n return losses.avg, metric_avg\n\n\ndef val(val_loader, model, criterion, optimizer, metric, epoch, options, cuda=False, print_freq=1, NB_CROPS=1):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n metric.reset()\n\n # switch to train mode\n model.eval()\n\n end = time.time()\n print(\"\")\n with torch.no_grad():\n for i, input in enumerate(val_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n # Make Variables\n input_var = make_variable_all_input(input, cuda=cuda)\n\n output, loss = None, None\n for j in range(NB_CROPS):\n input_var_j = {'clip': input_var['clip'][:, j],\n 'skeleton': input_var['skeleton'][:, j],\n 'target': input_var['target']\n }\n # compute output\n output_j, loss_j = forward_backward(model,\n options,\n input_var_j,\n criterion,\n None,\n cuda)\n # Append\n output = output_j if output is None else output + output_j\n loss = loss_j if loss is None else loss + loss_j\n\n # Div\n output /= NB_CROPS\n loss /= NB_CROPS\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % print_freq == 0:\n # measure accuracy and record loss\n metric_val, metric_avg, *_ = update_metric_loss(input, output, metric, loss, losses)\n\n time_done = get_time_to_print(batch_time.avg * (i + 1))\n time_remaining = get_time_to_print(batch_time.avg * len(val_loader))\n\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) [{done} => {remaining}]\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Metric {metric_val:.3f} ({metric_avg:.3f})'.format(\n epoch, i + 1, len(val_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, metric_val=metric_val,\n metric_avg=metric_avg,\n done=time_done, remaining=time_remaining)\n )\n sys.stdout.flush()\n\n return losses.avg, metric_avg\n"
] |
[
[
"torch.autograd.Variable",
"torch.no_grad"
]
] |
zhaolotelli/FedLearn
|
[
"b5ddb26acbee3218b11894fb7ca7ce24677c0b50"
] |
[
"flearn/utils/sub.py"
] |
[
"import copy\r\nimport numpy as np\r\n\r\ndef boot(client_data, sub_rate):\r\n n = len(client_data)\r\n rand_ind = np.random.choice(n, np.int(np.floor(n * sub_rate)))\r\n sub_data = copy.deepcopy(client_data)\r\n X = client_data.X\r\n y = client_data.y\r\n sub_data.X = X[rand_ind]\r\n sub_data.y = y[rand_ind]\r\n return sub_data\r\n\r\ndef boot_agg(params, sub_params, sub_rate):\r\n final_params = [np.zeros(len(p)) for p in params]\r\n for i, (param, sub_param) in enumerate(zip(params, sub_params)):\r\n final_params[i] = (param - sub_rate * sub_param) / (1 - sub_rate)\r\n return final_params"
] |
[
[
"numpy.floor"
]
] |
karamach/gtsam
|
[
"5cbb9dfd6c5bc6a38edd230fd0b9d9c7e5006b0b"
] |
[
"cython/gtsam/tests/test_SimpleCamera.py"
] |
[
"import math\nimport numpy as np\nimport unittest\n\nfrom gtsam import Pose2, Point3, Rot3, Pose3, Cal3_S2, SimpleCamera\n\nK = Cal3_S2(625, 625, 0, 0, 0)\n\nclass TestSimpleCamera(unittest.TestCase):\n\n def test_constructor(self):\n pose1 = Pose3(Rot3(np.diag([1, -1, -1])), Point3(0, 0, 0.5))\n camera = SimpleCamera(pose1, K)\n self.assertTrue(camera.calibration().equals(K, 1e-9))\n self.assertTrue(camera.pose().equals(pose1, 1e-9))\n\n def test_level2(self):\n # Create a level camera, looking in Y-direction\n pose2 = Pose2(0.4,0.3,math.pi/2.0)\n camera = SimpleCamera.Level(K, pose2, 0.1)\n\n # expected\n x = Point3(1,0,0)\n y = Point3(0,0,-1)\n z = Point3(0,1,0)\n wRc = Rot3(x,y,z)\n expected = Pose3(wRc,Point3(0.4,0.3,0.1))\n self.assertTrue(camera.pose().equals(expected, 1e-9))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.diag"
]
] |
spaicer/explanation-generator-module
|
[
"9c7c9322ad43a73e452d9063cb3ae6c0a9309237"
] |
[
"src/core/aeas.py"
] |
[
"# credit https://github.com/ronniemi/explainAnomaliesUsingSHAP\n\nimport numpy as np\nimport pandas as pd\n\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Dense\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras.callbacks import EarlyStopping\n\nimport shap\nimport warnings\nimport logging\n\nwarnings.filterwarnings(\"ignore\")\nlogger = logging.getLogger('shap')\nlogger.disabled = True\n\n\nclass AEAS:\n '''\n This class implements method described in 'Explaining Anomalies Detected by Autoencoders Using SHAP' to explain\n anomalies revealed by an unsupervised Autoencoder model using SHAP.\n '''\n\n autoencoder = None\n num_anomalies_to_explain = None\n reconstruction_error_percent = None\n shap_values_selection = None\n counter = None\n\n def __init__(self, num_anomalies_to_explain=10, reconstruction_error_percent=0.5, shap_values_selection='mean'):\n \"\"\"\n Args:\n num_anomalies_to_explain (int): number of top ranked anomalies (ranked by anomaly score that is the mse) to\n explain.\n reconstruction_error_percent (float): Number between 0 to 1- see explanation to this parameter in\n 'Explaining Anomalies Detected by Autoencoders Using SHAP' under\n ReconstructionErrorPercent.\n shap_values_selection (str): One of the possible methods to choose explaining features by their SHAP values.\n Can be: 'mean', 'median', 'constant'. See explanation to this parameter in\n 'Explaining Anomalies Detected by Autoencoders Using SHAP' under\n SHAPvaluesSelection.\n \"\"\"\n\n self.num_anomalies_to_explain = num_anomalies_to_explain\n self.reconstruction_error_percent = reconstruction_error_percent\n self.shap_values_selection = shap_values_selection\n\n def train_model(self, x_train, nb_epoch=1000, batch_size=64):\n \"\"\"\n Train 6-layer Autoencoder model on the given x_train data.\n Args:\n x_train (data frame): The data to train the Autoencoder model on\n nb_epoch (int): Number of epoch the model will perform\n batch_size (int): Size of each batch of data enter to the model\n Returns:\n model: Trained autoencoder\n \"\"\"\n \n input_dim = x_train.shape[1]\n\n input_layer = Input(shape=(input_dim,))\n\n encoder = Dense(int(input_dim / 2), activation=\"relu\", activity_regularizer=regularizers.l1(10e-7))(\n input_layer)\n\n encoder = Dense(int(input_dim / 4), activation=\"relu\", kernel_regularizer=regularizers.l2(10e-7))(encoder)\n\n decoder = Dense(int(input_dim / 2), activation='relu', kernel_regularizer=regularizers.l2(10e-7))(encoder)\n\n decoder = Dense(input_dim, activation='sigmoid', kernel_regularizer=regularizers.l2(10e-7))(decoder)\n\n self.autoencoder = Model(inputs=input_layer, outputs=decoder)\n\n self.autoencoder.summary()\n\n self.autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['mse'])\n\n earlystopper = EarlyStopping(monitor='val_loss', patience=5, verbose=1)\n self.autoencoder.fit(x_train, x_train, epochs=nb_epoch, batch_size=batch_size, shuffle=True,\n validation_split=0.1, verbose=2, callbacks=[earlystopper])\n\n return self.autoencoder\n\n def get_top_anomaly_to_explain(self, x_explain):\n \"\"\"\n Sort all records in x_explain by their MSE calculated according to their prediction by the trained Autoencoder\n and return the top num_anomalies_to_explain (its value given by the user at class initialization) records.\n Args:\n x_explain (data frame): Set of records we want to explain the most anomalous ones from it.\n Returns:\n list: List of index of the top num_anomalies_to_explain records with highest MSE that will be explained.\n \"\"\"\n \n predictions = self.autoencoder.predict(x_explain)\n square_errors = np.power(x_explain - predictions, 2)\n mse_series = pd.Series(np.mean(square_errors, axis=1))\n\n most_anomal_trx = mse_series.sort_values(ascending=False)\n columns = [\"id\", \"mse_all_columns\"]\n columns.extend([\"squared_error_\" + x for x in list(x_explain.columns)])\n items = []\n for x in most_anomal_trx.iteritems():\n item = [x[0], x[1]]\n item.extend(square_errors.loc[x[0]])\n items.append(item)\n\n df_anomalies = pd.DataFrame(items, columns=columns)\n df_anomalies.set_index('id', inplace=True)\n\n top_anomalies_to_explain = df_anomalies.head(self.num_anomalies_to_explain).index\n return top_anomalies_to_explain\n\n def get_num_features_with_highest_reconstruction_error(self, total_squared_error, errors_df):\n \"\"\"\n Calculate the number of features whose reconstruction errors sum to reconstruction_error_percent of the\n total_squared_error of the records that selected to be explained at the moment. This is the number of the\n top reconstructed errors features that going to be explained and eventually this features together with their\n explanation will build up the features explanation set of this record.\n Args:\n total_squared_error (int): MSE of the records selected to be explained\n errors_df (data frame): The reconstruction error of each feature- this is the first output output of\n get_errors_df_per_record function\n Returns:\n int: Number of features whose reconstruction errors sum to reconstruction_error_percent of the\n total_squared_error of the records that selected to be explained at the moment\n \"\"\"\n\n error = 0\n for num_of_features, index in enumerate(errors_df.index):\n error += errors_df.loc[index, 'err']\n if error >= self.reconstruction_error_percent * total_squared_error:\n break\n return num_of_features + 1\n\n def get_background_set(self, x_train, background_size=200):\n \"\"\"\n Get the first background_size records from x_train data and return it. Used for SHAP explanation process.\n Args:\n x_train (data frame): the data we will get the background set from\n background_size (int): The number of records to select from x_train. Default value is 200.\n Returns:\n data frame: Records from x_train that will be the background set of the explanation of the record that we\n explain at that moment using SHAP.\n \"\"\"\n\n background_set = x_train.head(background_size)\n return background_set\n\n def get_errors_df_per_record(self, record):\n \"\"\"\n Create data frame of the reconstruction errors of each features of the given record. Eventually we get data\n frame so each row contain the index of feature, its name, and its reconstruction error based on the record\n prediction provided by the trained autoencoder. This data frame is sorted by the reconstruction error of the\n features\n Args:\n record (pandas series): The record we explain at the moment; values of all its features.\n Returns:\n data frame: Data frame of all features reconstruction error sorted by the reconstruction error.\n \"\"\"\n\n prediction = self.autoencoder.predict(np.array([[record]])[0])[0]\n square_errors = np.power(record - prediction, 2)\n errors_df = pd.DataFrame({'col_name': square_errors.index, 'err': square_errors}).reset_index(drop=True)\n total_mse = np.mean(square_errors)\n errors_df.sort_values(by='err', ascending=False, inplace=True)\n return errors_df, total_mse\n\n def get_highest_shap_values(self, shap_values_df):\n \"\"\"\n Choosing explaining features based on their SHAP values by shap_values_selection method (mean, median, constant)\n i.e. remove all features with SHAP values that do not meet the method requirements as described in 'Explaining\n Anomalies Detected by Autoencoders Using SHAP' under SHAPvaluesSelection.\n Args:\n shap_values_df (data frame): Data frame with all existing features and their SHAP values.\n Returns:\n data frame: Data frame that contain for each feature we explain (features with high reconstruction error)\n its explaining features that selected by the shap_values_selection method and their SHAP values.\n \"\"\"\n\n all_explaining_features_df = pd.DataFrame()\n\n for i in range(shap_values_df.shape[0]):\n shap_values = shap_values_df.iloc[i]\n\n if self.shap_values_selection == 'mean':\n treshold_val = np.mean(shap_values)\n\n elif self.shap_values_selection == 'median':\n treshold_val = np.median(shap_values)\n\n elif self.shap_values_selection == 'constant':\n num_explaining_features = 5\n explaining_features = shap_values_df[i:i + 1].stack().nlargest(num_explaining_features)\n all_explaining_features_df = pd.concat([all_explaining_features_df, explaining_features], axis=0)\n continue\n\n else:\n raise ValueError('unknown SHAP value selection method')\n\n num_explaining_features = 0\n for j in range(len(shap_values)):\n if shap_values[j] > treshold_val:\n num_explaining_features += 1\n explaining_features = shap_values_df[i:i + 1].stack().nlargest(num_explaining_features)\n all_explaining_features_df = pd.concat([all_explaining_features_df, explaining_features], axis=0)\n return all_explaining_features_df\n\n def func_predict_feature(self, record):\n \"\"\"\n Predict the value of specific feature (with 'counter' index) using the trained autoencoder\n Args:\n record (pandas series): The record we explain at the moment; values of all its features.\n Returns:\n list: List the size of the number of features, contain the value of the predicted features with 'counter'\n index (the feature we explain at the moment)\n \"\"\"\n\n record_prediction = self.autoencoder.predict(record)[:, self.counter]\n return record_prediction\n\n def explain_unsupervised_data(self, x_train, x_explain, autoencoder=None, return_shap_values=False):\n \"\"\"\n First, if Autoencoder model not provided ('autoencoder' is None) train Autoencoder model on given x_train data.\n Then, for each record in 'top_records_to_explain' selected from given 'x_explain' as described in\n 'get_top_anomaly_to_explain' function, we use SHAP to explain the features with the highest reconstruction\n error based on the output of 'get_num_features_with_highest_reconstruction_error' function described above.\n Then, after we got the SHAP value of each feature in the explanation of the high reconstructed error feature,\n we select the explaining features using 'highest_contributing_features' function described above. Eventually,\n when we got the explaining features for each one of the features with highest reconstruction error, we build the\n explaining features set so the feature with the highest reconstruction error and its explaining features enter\n first to the explaining features set, then the next feature with highest reconstruction error and its explaining\n features enter to the explaining features set only if they don't already exist in the explaining features set\n and so on (full explanation + example exist in 'Explaining Anomalies Detected by Autoencoders Using SHAP')\n Args:\n x_train (data frame): The data to train the autoencoder model on and to select the background set from (for\n SHAP explanation process)\n x_explain (data frame): The data from which the top 'num_anomalies_to_explain' records are selected by their\n MSE to be explained.\n autoencoder (model): Trained Autoencoder model that will be used to explain x_explain data. If None (model \n not provided) then we will build and train from scratch a Autoencoder model as described \n in train_model function.\n return_shap_values (bool): If False, the resulting explnation featues set for each record will include only \n the names of the explaining features. If True, in addition to explaining feature name,\n the explnation featues set will include the SHAP value of each feature in the explnation\n featues set so the explnation featues set will be composed of tupels of (str, float)\n when str will be the name of the explaining feature and float will be its SHAP value.\n Note that for the explained features (features with high reconstraction error), if they \n did not appear in previuse feature explanation (explnation of feature with higher \n recustraction error), they will not have any SHAP values. Therefore they get unique\n value of -1.\n \n Returns:\n dict: Return all_sets_explaining_features dictionary that contain the explanation for\n 'top_records_to_explain' records so that the keys are int; the records indexes and the values are\n lists; the explanation features sets.\n \"\"\"\n \n self.autoencoder = autoencoder\n if self.autoencoder is None:\n self.train_model(x_train)\n \n top_records_to_explain = self.get_top_anomaly_to_explain(x_explain)\n all_sets_explaining_features = {}\n\n for record_idx in top_records_to_explain:\n\n record_to_explain = x_explain.loc[record_idx]\n\n df_err, total_mse = self.get_errors_df_per_record(record_to_explain)\n num_of_features = self.get_num_features_with_highest_reconstruction_error(total_mse * df_err.shape[0],\n df_err)\n\n df_top_err = df_err.head(num_of_features)\n all_sets_explaining_features[record_idx] = []\n shap_values_all_features = [[] for num in range(num_of_features)]\n\n backgroungd_set = self.get_background_set(x_train, 200).values\n for i in range(num_of_features):\n self.counter = df_top_err.index[i]\n explainer = shap.KernelExplainer(self.func_predict_feature, backgroungd_set)\n shap_values = explainer.shap_values(record_to_explain, nsamples='auto')\n shap_values_all_features[i] = shap_values\n\n shap_values_all_features = np.fabs(shap_values_all_features)\n\n shap_values_all_features = pd.DataFrame(data=shap_values_all_features, columns=x_train.columns)\n highest_contributing_features = self.get_highest_shap_values(shap_values_all_features)\n \n for idx_explained_feature in range(num_of_features):\n set_explaining_features =[]\n for idx, row in highest_contributing_features.iterrows():\n if idx[0] == idx_explained_feature:\n set_explaining_features.append((idx[1], row[0]))\n explained_feature_index = df_top_err.index[idx_explained_feature]\n set_explaining_features.append((x_train.columns[explained_feature_index], -1))\n\n all_sets_explaining_features[record_idx].append(set_explaining_features)\n\n final_set_features = []\n final_set_items = []\n for item in sum(all_sets_explaining_features[record_idx], []):\n if item[0] not in final_set_features:\n final_set_features.append(item[0])\n final_set_items.append(item)\n \n if return_shap_values:\n all_sets_explaining_features[record_idx] = final_set_items\n else:\n all_sets_explaining_features[record_idx] = final_set_features\n\n return all_sets_explaining_features"
] |
[
[
"numpy.array",
"tensorflow.keras.regularizers.l1",
"tensorflow.keras.layers.Input",
"numpy.median",
"pandas.DataFrame",
"tensorflow.keras.models.Model",
"numpy.mean",
"numpy.fabs",
"numpy.power",
"tensorflow.keras.regularizers.l2",
"pandas.concat",
"tensorflow.keras.callbacks.EarlyStopping"
]
] |
cgvvxx/Font_Style_Transfer
|
[
"1ff1e9f7bd1fef8f7736da90f917ad11b8fce964"
] |
[
"data_modules/crawling.py"
] |
[
"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport pandas as pd\nimport warnings\nimport zipfile\nimport shutil\nimport numpy\nimport time\nimport glob\nimport re\nimport os\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n\n# Crawling code\ndef crawling():\n # Execute webdriver\n wd = webdriver.Chrome(ChromeDriverManager().install()) # webdriver path\n wd.get('https://fonts.google.com/?category=Serif,Sans+Serif,Display&subset=latin')\n wd.maximize_window()\n\n font_list = []\n for i in range(150):\n if len(font_list) < 1000:\n last_font = []\n win_height = 925 * i # height of chrome window\n scroll = \"window.scrollTo(1920, \" + str(win_height) + \");\"\n wd.execute_script(scroll)\n html = wd.page_source\n font_name = re.findall(\"(-6\\\"\\>+[0-9a-zA-Z\\s]+\\<)\", html)\n for font in font_name:\n style = font[4:-1]\n if style not in font_list:\n font_list.append(style)\n print(style)\n else:\n print(\"overlap!\")\n pass\n\n for font in font_list:\n if ' SC' in str(font):\n font_list.remove(font)\n print(\"remove_small_caps:\", font)\n else:\n pass\n\n for font in font_list:\n if 'Barcode' in str(font):\n font_list.remove(font)\n print(\"remove_barcode\", font)\n else:\n pass\n\n print(len(font_list))\n time.sleep(1.5)\n\n else:\n break\n\n wd.close()\n\n if 'Kumar One' in font_list:\n font_list.remove('Kumar One')\n if 'Kumar One Outline' in font_list:\n font_list.remove('Kumar One Outline')\n\n # font_list = font_list[:100] # 원하는 폰트 수\n font_df = pd.DataFrame(font_list)\n font_df.columns = {\"font_name\"}\n\n return font_df.to_csv(\"./font_list.csv\", encoding='utf-8', mode='w', index=False)\n\n\ndef remove_overlap():\n df = pd.read_csv(\"./font_list.csv\")\n font_list = df['font_name'].to_list()\n\n overlap_list = []\n for i in font_list:\n font_list_compared = df['font_name'].to_list()\n font_list_compared.remove(i)\n for j in font_list_compared:\n if str(i) in str(j):\n overlap_list.append(j)\n else:\n pass\n\n font_list = [x for x in font_list if x not in overlap_list]\n print(len(font_list))\n font_df = pd.DataFrame(font_list)\n font_df.columns = {\"font_name\"}\n\n return font_df.to_csv(\"./font_list.csv\", encoding='utf-8', mode='w', index=False)\n\n\n# Download ttf files\ndef download_ttfs():\n # load font list\n font_list_df = pd.read_csv('./font_list.csv')\n font_list = list(font_list_df.font_name)\n style_name = [i.replace(\" \", \"+\") for i in font_list]\n style_name.sort()\n\n # execute webdriver\n os.makedirs(\"./ttf_zips\", exist_ok=True)\n options = webdriver.ChromeOptions()\n options.add_experimental_option(\"prefs\", {\n \"download.default_directory\": os.path.abspath('./ttf_zips')})\n wd = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)\n wd.get(\"https://fonts.google.com/\")\n wd.maximize_window()\n\n # ttf file crawling by zip file\n for style in style_name:\n wd.get(\"https://fonts.google.com/specimen/\" + style)\n WebDriverWait(wd, 60).until(EC.presence_of_element_located((By.CSS_SELECTOR,\n \"#main-content > gf-sticky-header > div > div > \"\n \"button > span.mat-button-wrapper > span\")))\n button = wd.find_element_by_css_selector(\n \"#main-content > gf-sticky-header > div > div > button > span.mat-button-wrapper > span\")\n wd.implicitly_wait(60) # setting waiting time for downloading bigsize files\n button.click()\n\n\n# Unzip\ndef unzip():\n # load font list\n zip_list = os.listdir(\"./ttf_zips\")\n print(zip_list)\n os.makedirs(\"./ttfs\", exist_ok=True)\n\n # decompressing zip files\n for zip in zip_list:\n ttf_zip = zipfile.ZipFile(\"./ttf_zips/\" + zip)\n for file in ttf_zip.namelist():\n ttf_zip.extract(file, \"./ttfs/\")\n ttf_zip.close()\n\n\n# Collect list of all ttf files\ndef read_all_file(path):\n output = os.listdir(path)\n file_list = []\n\n for i in output:\n if os.path.isdir(path + \"/\" + i):\n file_list.extend(read_all_file(path + \"/\" + i))\n elif os.path.isfile(path + \"/\" + i):\n file_list.append(path + \"/\" + i)\n\n return file_list\n\n\n# Copy files in new path\ndef copy_all_file(file_list, new_path):\n for src_path in file_list:\n file = src_path.split(\"/\")[-1]\n shutil.copyfile(src_path, new_path + \"/\" + file)\n\n\n# Execute arranging\ndef arrange():\n if os.path.isdir(\"./ttfs/static\"):\n file_list = read_all_file(\"./ttfs/static\")\n copy_all_file(file_list, \"./ttfs\")\n shutil.rmtree(\"./ttfs/static\")\n os.chdir(\"./ttfs\")\n otfs = glob.glob(\"*.otf\")\n txts = glob.glob(\"*.txt\")\n for_remove = otfs + txts\n for file in for_remove:\n if not os.path.exists(\"./\" + file):\n pass\n else:\n os.remove(\"./\" + file)\n else:\n pass\n\n\ndef get_regular():\n ttf_list = os.listdir('./ttfs')\n for ttf in ttf_list:\n if 'Regular' not in str(ttf):\n os.remove(\"./ttfs/\" + ttf)\n elif 'Semi' in str(ttf):\n os.remove(\"./ttfs/\" + ttf)\n else:\n continue\n\n return print(\"finished filtering\")\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
interesaaat/onnxconverter-common
|
[
"5f670d71184abb1f4601193f552bdda6c1f59555"
] |
[
"onnxconverter_common/onnx_ops.py"
] |
[
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n###############################################################################\n\n# This file contains some high-level APIs for applying operations on variables specified by names. We should try our\n# best to use those functions because they can produce ONNX operators according to the ONNX version specified in the\n# `container` argument. Notice that those function behaviors are defined in a way very similar to ONNX-1.2.\n\nimport numpy as np\nfrom onnx import onnx_pb as onnx_proto\nfrom onnx.mapping import NP_TYPE_TO_TENSOR_TYPE\n\n\ndef _create_name_or_use_existing_one(scope, op_type, name):\n if name is None:\n return scope.get_unique_operator_name(op_type)\n else:\n return name\n\n\ndef _apply_unary_operation(scope, op_type, input_name, output_name, container, operator_name, **attrs):\n name = _create_name_or_use_existing_one(scope, op_type, operator_name)\n\n attrs['name'] = name\n if container.target_opset < 6:\n attrs['consumed_inputs'] = [0]\n op_version = 1\n else:\n op_version = 6\n\n container.add_node(op_type, input_name, output_name, op_version=op_version, **attrs)\n\n\ndef _apply_basic_numerical_operation(scope, op_type, input_names, output_name, container, operator_name,\n axis, broadcast):\n name = _create_name_or_use_existing_one(scope, op_type, operator_name)\n\n attrs = {}\n if container.target_opset < 7:\n # Before ONNX-1.2 (opset 7), broadcasting behavior is Caffe2-like.\n if axis is not None:\n attrs['axis'] = axis\n if broadcast is not None:\n attrs['broadcast'] = broadcast\n\n if container.target_opset < 6:\n attrs['consumed_inputs'] = [0, 0]\n op_version = 1\n else:\n op_version = 6\n else:\n # Since ONNX-1.2 (opset 7), broadcasting behavior is Numpy-like, so we don't need to specify any attributes\n op_version = 7\n\n container.add_node(op_type, input_names, output_name, op_version=op_version, name=name, **attrs)\n\n\ndef _apply_pointwise_operation(scope, op_type, input_names, output_name, container, operator_name):\n name = _create_name_or_use_existing_one(scope, op_type, operator_name)\n attrs = {}\n\n if container.target_opset < 6:\n attrs['consumed_inputs'] = [0] * len(input_names)\n op_version = 1\n elif container.target_opset < 8:\n op_version = 6\n else:\n if container.target_opset < 12 or op_type == 'Mean':\n op_version = 8\n else:\n op_version = 12\n\n container.add_node(op_type, input_names, output_name, op_version=op_version, name=name, **attrs)\n\n\ndef apply_abs(scope, input_name, output_name, container, operator_name=None):\n _apply_unary_operation(scope, 'Abs', input_name, output_name, container, operator_name=operator_name)\n\n\ndef apply_add(scope, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):\n _apply_basic_numerical_operation(scope, 'Add', input_names, output_name, container, operator_name=operator_name,\n axis=axis, broadcast=broadcast)\n\n\ndef apply_argmax(scope, input_name, output_name, container, operator_name=None, axis=0, keepdims=1,\n select_last_index=0):\n name = _create_name_or_use_existing_one(scope, 'ArgMax', operator_name)\n attrs = {'axis': axis, 'keepdims': keepdims}\n if container.target_opset < 11:\n op_version = 1\n elif container.target_opset < 12:\n op_version = 11\n else:\n op_version = 12\n attrs['select_last_index'] = select_last_index\n container.add_node('ArgMax', input_name, output_name, op_version=op_version, name=name, **attrs)\n\n\ndef apply_argmin(scope, input_name, output_name, container, operator_name=None, axis=0, keepdims=1,\n select_last_index=0):\n name = _create_name_or_use_existing_one(scope, 'ArgMin', operator_name)\n attrs = {'axis': axis, 'keepdims': keepdims}\n if container.target_opset < 11:\n op_version = 1\n elif container.target_opset < 12:\n op_version = 11\n else:\n op_version = 12\n attrs['select_last_index'] = select_last_index\n container.add_node('ArgMin', input_name, output_name, op_version=op_version, name=name, **attrs)\n\n\ndef apply_affine(scope, input_name, output_name, container, operator_name=None, alpha=1., beta=0.):\n if container.target_opset < 9:\n op_type = 'Affine'\n name = _create_name_or_use_existing_one(scope, 'Affine', operator_name)\n attrs = {'name': name, 'alpha': alpha, 'beta': beta}\n container.add_node(op_type, input_name, output_name, **attrs)\n else:\n name = _create_name_or_use_existing_one(scope, 'Affine', operator_name)\n # Define a and b.\n aName = scope.get_unique_variable_name(name + '_alpha')\n container.add_initializer(aName, onnx_proto.TensorProto.FLOAT, [1], [alpha])\n bName = scope.get_unique_variable_name(name + '_beta')\n container.add_initializer(bName, onnx_proto.TensorProto.FLOAT, [1], [beta])\n\n # Compute Z = a * X, where X is the original input.\n zName = scope.get_unique_variable_name(name + '_scaled')\n apply_mul(scope, [aName, input_name], zName, container)\n\n # Compute Y = Z + b, where Y is the final output.\n apply_add(scope, [zName, bName], output_name, container)\n\n\ndef apply_batch_norm(scope, input_names, output_names, container, operator_name=None,\n epsilon=None, is_test=None, momentum=None, spatial=None):\n name = _create_name_or_use_existing_one(scope, 'BatchNormalization', operator_name)\n attrs = {'name': name, 'epsilon': epsilon, 'momentum': momentum}\n\n if container.target_opset < 9:\n attrs['spatial'] = spatial\n if container.target_opset < 7:\n attrs['is_test'] = is_test\n\n if container.target_opset < 6:\n attrs['consumed_inputs'] = [0] * len(input_names)\n if len(input_names) > 3:\n attrs['consumed_inputs'][3] = 1\n if len(input_names) > 4:\n attrs['consumed_inputs'][4] = 2\n op_version = 1\n elif container.target_opset < 7:\n op_version = 6\n elif container.target_opset < 9:\n op_version = 7\n else:\n op_version = 9\n\n container.add_node('BatchNormalization', input_names, output_names, op_version=op_version, **attrs)\n\n\ndef apply_cast(scope, input_name, output_name, container, operator_name=None, to=None):\n '''\n :param to: enum defined in ONNX TensorProto.DataType, for example, TensorProto.FLOAT and TensorProto.INT64.\n '''\n name = _create_name_or_use_existing_one(scope, 'Cast', operator_name)\n attrs = {'name': name}\n\n d = onnx_proto.TensorProto.DataType.DESCRIPTOR\n allowed_type_name_and_type_enum_pairs = {v.number: k for k, v in d.values_by_name.items()}\n if to not in allowed_type_name_and_type_enum_pairs:\n raise ValueError('Attribute \"to\" must be one of %s' % allowed_type_name_and_type_enum_pairs.keys())\n\n if container.target_opset < 9:\n if to in [onnx_proto.TensorProto.STRING, onnx_proto.TensorProto.COMPLEX64, onnx_proto.TensorProto.COMPLEX128]:\n raise ValueError('Attribute \"to\" cannot correspond to a String or Complex TensorProto type.')\n\n if container.target_opset < 6:\n # Convert enum to string, for example, TensorProto.INT64 to 'INT64'\n attrs['to'] = allowed_type_name_and_type_enum_pairs[to]\n op_version = 1\n else:\n # Enum, for example, TensorProto.INT64\n attrs['to'] = to\n op_version = 6\n else:\n # Enum value, for example, TensorProto.INT64\n # String casting is supported in opset 9\n if to in [onnx_proto.TensorProto.COMPLEX64, onnx_proto.TensorProto.COMPLEX128]:\n raise ValueError('Attribute \"to\" cannot correspond to a Complex TensorProto type.')\n attrs['to'] = to\n op_version = 9\n\n container.add_node('Cast', input_name, output_name, op_version=op_version, **attrs)\n\n\ndef apply_clip(scope, input_name, output_name, container, operator_name=None, max=None, min=None):\n name = _create_name_or_use_existing_one(scope, 'Clip', operator_name)\n attrs = {'name': name}\n\n if container.target_opset < 11:\n if max is not None:\n attrs['max'] = float(max)\n if min is not None:\n attrs['min'] = float(min)\n\n if container.target_opset < 6:\n attrs['consumed_inputs'] = [0]\n op_version = 1\n else:\n op_version = 6\n\n container.add_node('Clip', input_name, output_name, op_version=op_version, **attrs)\n else:\n if container.target_opset < 12:\n op_version = 11\n else:\n op_version = 12\n if min is None and max is not None:\n raise RuntimeError(\"Operator 'Clip': min must be specified if max is.\")\n inputs = [input_name]\n\n if min is not None:\n if isinstance(min, (np.ndarray, float, int)):\n # add initializer\n if isinstance(min, np.ndarray):\n if len(min.shape) == 0:\n min = [min]\n elif min.shape == (1,):\n min = list(min[0]) if hasattr(min[0], '__iter__') else list(min)\n else:\n raise RuntimeError(\"min must be an array of one element.\")\n else:\n min = [min]\n\n # container in sklearn-onnx stores the computation type in\n # container.dtype.\n min_name = scope.get_unique_variable_name('clip_min')\n if op_version < 12:\n min = np.array(min, dtype=getattr(container, 'dtype', np.float32))\n container.add_initializer(min_name, getattr(container, 'proto_dtype',\n onnx_proto.TensorProto.FLOAT), [], [min[0]])\n else:\n min = np.array(min)\n container.add_initializer(min_name, NP_TYPE_TO_TENSOR_TYPE[min.dtype], [], [min[0]])\n min = min_name\n if isinstance(min, str):\n inputs.append(min)\n else:\n raise RuntimeError(\"Parameter 'min' must be a string or a float.\")\n\n if max is not None:\n if min is None:\n raise RuntimeError(\"Parameter 'min' must be specified if 'max' is.\")\n if isinstance(max, (np.ndarray, float, int)):\n # add initializer\n if isinstance(max, np.ndarray):\n if len(max.shape) == 0:\n max = [max]\n elif max.shape == (1,):\n max = list(max[0]) if hasattr(max[0], '__iter__') else list(max)\n else:\n raise RuntimeError(\"max must be an array of one element.\")\n else:\n max = [max]\n\n max_name = scope.get_unique_variable_name('clip_max')\n if op_version < 12:\n max = np.array(max, dtype=getattr(container, 'dtype', np.float32))\n container.add_initializer(max_name, getattr(container, 'proto_dtype',\n onnx_proto.TensorProto.FLOAT), [], [max[0]])\n else:\n max = np.array(max)\n container.add_initializer(max_name, NP_TYPE_TO_TENSOR_TYPE[max.dtype], [], [max[0]])\n max = max_name\n if isinstance(max, str):\n inputs.append(max)\n else:\n raise RuntimeError(\"Parameter 'max' must be a string or a float.\")\n\n container.add_node('Clip', inputs, output_name, op_version=op_version,\n **attrs)\n\n\ndef apply_concat(scope, input_names, output_name, container, operator_name=None, axis=0):\n name = _create_name_or_use_existing_one(scope, 'Concat', operator_name)\n\n if container.target_opset < 4:\n op_version = 1\n elif container.target_opset < 11:\n op_version = 4\n else:\n op_version = 11\n\n container.add_node('Concat', input_names, output_name, op_version=op_version, name=name, axis=axis)\n\n\ndef apply_constant(scope, output_name, container, operator_name=None, value=None):\n name = _create_name_or_use_existing_one(scope, 'Constant', operator_name)\n\n if value is None:\n raise ValueError('Attribute \"value\" is a required argument.')\n\n if container.target_opset < 9:\n op_version = 1\n elif container.target_opset < 11:\n op_version = 9\n elif container.target_opset < 12:\n op_version = 11\n else:\n op_version = 12\n\n if op_version < 12:\n attrs = {'name': name, 'value': value}\n else:\n if isinstance(value, float):\n attrs = {'name': name, 'value_float': value}\n elif isinstance(value, int):\n attrs = {'name': name, 'value_int': value}\n elif isinstance(value, str):\n attrs = {'name': name, 'value_string': value}\n else:\n attrs = {'name': name, 'value': value}\n\n container.add_node('Constant', [], output_name, op_version=op_version, **attrs)\n\n\ndef apply_constant2(scope, input_names, output_name, container, operator_name=None, value=None):\n assert len(input_names) == 0 # only a placeholder to standardize the argument list.\n return apply_constant(scope, output_name, container, operator_name, value)\n\n\ndef apply_constant_of_shape(scope, input_names, output_name, container, operator_name=None, value=None):\n name = _create_name_or_use_existing_one(scope, 'ConstantOfShape', operator_name)\n container.add_node('ConstantOfShape', input_names, output_name, name=name, op_version=9, value=value)\n\n\ndef apply_conv(scope, input_names, output_name, container, operator_name=None, **attrs):\n name = _create_name_or_use_existing_one(scope, 'Conv', operator_name)\n\n if container.target_opset < 11:\n op_version = 1\n else:\n op_version = 11\n\n container.add_node('Conv', input_names, output_name, name=name, op_version=op_version, **attrs)\n\n\ndef apply_crop_height_width(scope, input_name, output_name, container, operator_name=None,\n top_border=0, bottom_border=0, left_border=0, right_border=0):\n name = scope.get_unique_operator_name('CropHeightWidth')\n if container.target_opset < 9:\n # If operator set < 9, we can use the experimental Crop in ONNX.\n attrs = {'name': name, 'border': [left_border, top_border, right_border, bottom_border]}\n container.add_node('Crop', input_name, output_name, **attrs)\n else:\n # The experimental Crop in ONNX is removed after operator set 9, so we\n # switch to ONNX DynamicSlice operator.\n\n # CoreML only crops H- and W-axes.\n axes = [2, 3]\n axes_name = scope.get_unique_variable_name(name + '_axes')\n container.add_initializer(axes_name, onnx_proto.TensorProto.INT64,\n [len(axes)], axes)\n\n # Number of cropped pixels is the starting index of the remained region.\n starts = [top_border, left_border]\n starts_name = scope.get_unique_variable_name(name + '_starts')\n container.add_initializer(starts_name, onnx_proto.TensorProto.INT64,\n [len(starts)], starts)\n\n # First we assume no cropping is needed at the end of those axes.\n # We will change this right below depending on Crop's configuration.\n ends = [np.iinfo(np.int64).max] * 2\n\n # Crop n pixel means the end index (exclusive) is -n. Note that indexing\n # system is zero-based.\n if bottom_border > 0:\n ends[0] = -bottom_border\n if right_border > 0:\n ends[1] = -right_border\n\n # Add the adjusted ends.\n ends_name = scope.get_unique_variable_name(name + '_ends')\n container.add_initializer(ends_name, onnx_proto.TensorProto.INT64,\n [len(ends)], ends)\n\n # Collect all input names as a list because DynamicSlice has multiple inputs.\n input_list = [input_name, starts_name, ends_name, axes_name]\n container.add_node('DynamicSlice', input_list, output_name, op_version=9)\n\n\ndef apply_div(scope, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):\n _apply_basic_numerical_operation(scope, 'Div', input_names, output_name, container, operator_name=operator_name,\n axis=axis, broadcast=broadcast)\n\n\ndef apply_elu(scope, input_name, output_name, container, operator_name=None, alpha=1.0):\n _apply_unary_operation(scope, 'Elu', input_name, output_name, container, operator_name, alpha=alpha)\n\n\ndef apply_equal(scope, input_names, output_name, container, operator_name=None):\n name = _create_name_or_use_existing_one(scope, 'equal', operator_name)\n if container.target_opset < 7:\n op_version = 1\n elif container.target_opset < 9:\n op_version = 7\n else:\n op_version = 9\n container.add_node('Equal', input_names, output_name, name=name, op_version=op_version)\n\n\ndef apply_exp(scope, input_name, output_name, container, operator_name=None):\n _apply_unary_operation(scope, 'Exp', input_name, output_name, container, operator_name=operator_name)\n\n\ndef apply_floor(scope, input_name, output_name, container, operator_name=None):\n _apply_unary_operation(scope, 'Floor', input_name, output_name, container, operator_name=operator_name)\n\n\ndef apply_flatten(scope, input_name, output_name, container, operator_name=None, axis=1):\n name = _create_name_or_use_existing_one(scope, 'Flatten', operator_name)\n if container.target_opset < 9:\n op_version = 1\n elif container.target_opset < 11:\n op_version = 9\n else:\n op_version = 11\n container.add_node('Flatten', input_name, output_name, name=name, op_version=op_version, axis=axis)\n\n\ndef apply_gather(scope, input_names, output_name, container, operator_name=None, axis=0):\n name = _create_name_or_use_existing_one(scope, 'Gather', operator_name)\n if container.target_opset < 11:\n op_version = 1\n else:\n op_version = 11\n\n container.add_node('Gather', input_names, output_name, name=name, op_version=op_version, axis=axis)\n\n\ndef apply_gemm(scope, input_name, output_name, container, operator_name=None, alpha=1.0, beta=1.0,\n transA=0, transB=0):\n \"\"\"\n Applies operator `gemm <https://github.com/onnx/onnx/blob/master/docs/Operators.md#gemm>`.\n \"\"\"\n name = _create_name_or_use_existing_one(scope, 'Gemm', operator_name)\n attrs = {'alpha': alpha, 'beta': beta, 'transA': transA, 'transB': transB}\n if container.target_opset < 5:\n attrs['op_version'] = 1\n attrs['broadcast'] = 1\n elif container.target_opset < 7:\n attrs['op_version'] = 6\n attrs['broadcast'] = 1\n elif container.target_opset < 11:\n attrs['op_version'] = 7\n else:\n attrs['op_version'] = 11\n\n container.add_node('Gemm', input_name, output_name, name=name, **attrs)\n\n\ndef apply_greater(scope, input_names, output_name, container, operator_name=None):\n name = _create_name_or_use_existing_one(scope, 'Greater', operator_name)\n if container.target_opset < 7:\n op_version = 1\n elif container.target_opset < 9:\n op_version = 7\n else:\n op_version = 9\n\n container.add_node('Greater', input_names, output_name, name=name, op_version=op_version)\n\n\ndef _convert_compare_equal(scope, input_names, output_name, container, operator_name, tf_op_string, onnx_op_string_rev,\n onnx_op_string):\n if container.target_opset < 7:\n raise ValueError(tf_op_string + \" op is not supported for opset < 7\")\n elif container.target_opset < 9:\n op_version = 7\n elif container.target_opset < 12:\n op_version = 9\n else:\n op_version = 12\n name = _create_name_or_use_existing_one(scope, tf_op_string, operator_name)\n if op_version < 9:\n compare_input_0 = scope.get_unique_variable_name(name + '_input_0_cast')\n container.add_node('Cast', [input_names[0]], compare_input_0, name=name + '_input_0_cast', to=1)\n compare_input_1 = scope.get_unique_variable_name(name + '_input_1_cast')\n container.add_node('Cast', [input_names[1]], compare_input_1, name=name + '_input_1_cast', to=1)\n less_out = scope.get_unique_variable_name(name + '_less_out')\n container.add_node(onnx_op_string_rev, [compare_input_0, compare_input_1], less_out,\n name=name + '_' + onnx_op_string_rev.lower(),\n op_version=op_version)\n container.add_node('Not', less_out, output_name, name=name + '_not')\n elif op_version < 12:\n compare_node = scope.get_unique_variable_name(name + '_compare_node')\n container.add_node(onnx_op_string_rev, input_names, compare_node,\n name=name + '_' + onnx_op_string_rev.lower(),\n op_version=op_version)\n container.add_node('Not', [compare_node], output_name, name=name)\n else:\n container.add_node(onnx_op_string, input_names, output_name,\n name=name + '_' + onnx_op_string_rev.lower(), op_version=op_version)\n\n\ndef apply_greater_or_equal(scope, input_names, output_name, container, operator_name=None):\n _convert_compare_equal(scope, input_names, output_name, container, operator_name, 'GreaterEqual', 'Less',\n 'GreaterOrEqual')\n\n\ndef apply_less_or_equal(scope, input_names, output_name, container, operator_name=None):\n _convert_compare_equal(scope, input_names, output_name, container, operator_name, 'LessEqual', 'Greater',\n 'LessOrEqual')\n\n\ndef apply_gru(scope, input_names, output_names, container, operator_name=None, output_seq=0, reset_after=0, **attrs):\n name = _create_name_or_use_existing_one(scope, 'GRU', operator_name)\n if container.target_opset < 3:\n op_version = 1\n attrs['output_sequence'] = 1 if output_seq else 0\n else:\n attrs['linear_before_reset'] = 1 if reset_after else 0\n if container.target_opset <= 5:\n attrs['output_sequence'] = 1 if output_seq else 0\n op_version = 3\n else:\n op_version = 7\n\n container.add_node('GRU', input_names, output_names, name=name, op_version=op_version, **attrs)\n\n\ndef apply_hard_sigmoid(scope, input_name, output_name, container, operator_name=None, alpha=None, beta=None):\n _apply_unary_operation(scope, 'HardSigmoid', input_name, output_name, container, operator_name,\n alpha=alpha, beta=beta)\n\n\ndef apply_identity(scope, input_name, output_name, container, operator_name=None):\n name = _create_name_or_use_existing_one(scope, 'Identity', operator_name)\n container.add_node('Identity', input_name, output_name, name=name)\n\n\ndef apply_instance_norm(scope, input_names, output_name, container, operator_name=None, epsilon=1e-5):\n name = _create_name_or_use_existing_one(scope, 'InstanceNormalization', operator_name)\n attrs = {'name': name, 'epsilon': epsilon}\n\n if container.target_opset < 2:\n attrs['consumed_inputs'] = [0] * len(input_names)\n op_version = 1\n else:\n op_version = 6\n\n container.add_node('InstanceNormalization', input_names, output_name, op_version=op_version, **attrs)\n\n\ndef apply_inverse(scope, input_name, output_name, container, operator_name=None):\n if container.target_opset < 12:\n raise ValueError(\"tf op MatrixInverse is not supported for opset < 12\")\n else:\n op_version = 12\n name = _create_name_or_use_existing_one(scope, 'Inverse', operator_name)\n container.add_node('Inverse', input_name, output_name, name=name, op_version=op_version)\n\n\ndef apply_leaky_relu(scope, input_name, output_name, container, operator_name=None, alpha=0.01):\n _apply_unary_operation(scope, 'LeakyRelu', input_name, output_name, container, operator_name, alpha=alpha)\n\n\ndef apply_less(scope, input_names, output_name, container, operator_name=None):\n name = _create_name_or_use_existing_one(scope, 'Less', operator_name)\n if container.target_opset < 7:\n op_version = 1\n elif container.target_opset < 9:\n op_version = 7\n else:\n op_version = 9\n\n container.add_node('Less', input_names, output_name, name=name, op_version=op_version)\n\n\ndef apply_log(scope, input_name, output_name, container, operator_name=None):\n _apply_unary_operation(scope, 'Log', input_name, output_name, container, operator_name=operator_name)\n\n\ndef apply_lstm(scope, input_names, output_names, container, operator_name=None, output_seq=0, **attrs):\n name = _create_name_or_use_existing_one(scope, 'LSTM', operator_name)\n if container.target_opset <= 6:\n attrs['output_sequence'] = 1 if output_seq else 0\n op_version = 1\n else:\n op_version = 7\n container.add_node('LSTM', input_names, output_names, name=name, op_version=op_version, **attrs)\n\n\ndef apply_matmul(scope, input_names, output_name, container, operator_name=None):\n op_type = 'MatMul'\n name = _create_name_or_use_existing_one(scope, op_type, operator_name)\n if container.target_opset <= 9:\n op_version = 1\n else:\n op_version = 9\n container.add_node(op_type, input_names, output_name, op_version=op_version, name=name)\n\n\ndef apply_max(scope, input_names, output_name, container, operator_name=None):\n _apply_pointwise_operation(scope, 'Max', input_names, output_name, container, operator_name)\n\n\ndef apply_mean(scope, input_names, output_name, container, operator_name=None):\n _apply_pointwise_operation(scope, 'Mean', input_names, output_name, container, operator_name)\n\n\ndef apply_min(scope, input_names, output_name, container, operator_name=None):\n _apply_pointwise_operation(scope, 'Min', input_names, output_name, container, operator_name)\n\n\ndef apply_mul(scope, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):\n _apply_basic_numerical_operation(scope, 'Mul', input_names, output_name, container, operator_name=operator_name,\n axis=axis, broadcast=broadcast)\n\n\ndef apply_neg(scope, input_name, output_name, container, operator_name=None):\n _apply_unary_operation(scope, 'Neg', input_name, output_name, container, operator_name)\n\n\ndef apply_normalization(scope, input_name, output_name, container, operator_name=None, axis=1, p=2):\n name = _create_name_or_use_existing_one(scope, 'LpNormalization', operator_name)\n container.add_node('LpNormalization', input_name, output_name, name=name, p=p, axis=axis)\n\n\ndef apply_not_op(scope, input_name, output_name, container, operator_name=None):\n _apply_unary_operation(scope, 'Not', input_name, output_name, container, operator_name)\n\n\ndef apply_pad(scope, input_name, output_name, container, operator_name=None, mode=None, pads=None, value=None,\n onnx_type=onnx_proto.TensorProto.FLOAT):\n name = _create_name_or_use_existing_one(scope, 'Pad', operator_name)\n attrs = {'name': name}\n inputs = input_name if isinstance(input_name, list) else [input_name]\n\n if mode is not None:\n attrs['mode'] = mode\n\n if container.target_opset < 11:\n if isinstance(pads, str):\n raise ValueError(\"Dynamic pad is not supported for opset < 11.\")\n if value is not None:\n attrs['value'] = value\n if container.target_opset < 2:\n attrs['paddings'] = pads\n op_version = 1\n else:\n attrs['pads'] = pads\n op_version = 2\n else:\n op_version = 11\n if isinstance(pads, str):\n inputs.append(pads)\n else:\n pads_name = scope.get_unique_variable_name(name + '_pads')\n container.add_initializer(pads_name, onnx_proto.TensorProto.INT64, [len(pads)], pads)\n inputs.append(pads_name)\n if value is not None:\n value_name = scope.get_unique_variable_name(name + '_value')\n container.add_initializer(value_name, onnx_type, [], [value])\n inputs.append(value_name)\n\n container.add_node('Pad', inputs, output_name, op_version=op_version, **attrs)\n\n\ndef apply_parametric_softplus(scope, input_name, output_name, container, operator_name=None, alpha=None, beta=None):\n if alpha is None:\n alpha = [1.0]\n if beta is None:\n beta = [0.]\n\n name = _create_name_or_use_existing_one(scope, 'ParametricSoftplus', operator_name)\n if container.target_opset < 9:\n if len(alpha) != 1 or len(beta) != 1:\n raise ValueError('alpha and beta must be 1-element lists')\n op_type = 'ParametricSoftplus'\n attrs = {'name': name, 'alpha': alpha[0], 'beta': beta[0]}\n container.add_node(op_type, input_name, output_name, **attrs)\n else:\n # Define three scalars: a, b, 1.\n aName = scope.get_unique_variable_name(name + '_alpha')\n aShape = [len(alpha)] if len(alpha) == 1 else [len(alpha), 1, 1]\n container.add_initializer(aName, onnx_proto.TensorProto.FLOAT, aShape, alpha)\n bShape = [len(beta)] if len(beta) == 1 else [len(beta), 1, 1]\n bName = scope.get_unique_variable_name(name + '_beta')\n container.add_initializer(bName, onnx_proto.TensorProto.FLOAT, bShape, beta)\n oneName = scope.get_unique_variable_name(name + '_one')\n container.add_initializer(oneName, onnx_proto.TensorProto.FLOAT, [1], [1.])\n\n # c = b * x\n cName = scope.get_unique_variable_name(name + '_c')\n apply_mul(scope, [input_name, bName], cName, container)\n\n # d = exp(c)\n dName = scope.get_unique_variable_name(name + '_d')\n apply_exp(scope, cName, dName, container)\n\n # e = 1 + d\n eName = scope.get_unique_variable_name(name + '_e')\n apply_add(scope, [dName, oneName], eName, container)\n\n # f = log(e)\n fName = scope.get_unique_variable_name(name + '_f')\n apply_log(scope, eName, fName, container)\n\n # g = a * f\n apply_mul(scope, [fName, aName], output_name, container)\n\n\ndef apply_pow(scope, input_names, output_name, container, operator_name=None, axis=None, broadcast=None):\n name = _create_name_or_use_existing_one(scope, 'Pow', operator_name)\n\n attrs = {'name': name}\n if container.target_opset < 7:\n # Before ONNX-1.2, broadcasting behavior is Caffe2-like.\n if axis is not None:\n attrs['axis'] = axis\n if broadcast is not None:\n attrs['broadcast'] = broadcast\n op_version = 1\n elif container.target_opset < 12:\n # Since ONNX-1.2, broadcasting behavior is Numpy-like, so we don't need to specify any attributes\n op_version = 7\n else:\n op_version = 12\n\n container.add_node('Pow', input_names, output_name, op_version=op_version, **attrs)\n\n\ndef apply_prelu(scope, input_name, output_name, container, operator_name=None, slope=None):\n name = _create_name_or_use_existing_one(scope, 'PRelu', operator_name)\n slope_tensor_name = scope.get_unique_variable_name('slope')\n s_shape = slope.shape\n if container.target_opset < 7:\n s_shape = [len(slope.flatten())]\n container.add_initializer(slope_tensor_name, onnx_proto.TensorProto.FLOAT, s_shape, slope.flatten())\n\n if container.target_opset < 6:\n container.add_node('PRelu', [input_name, slope_tensor_name], output_name, op_version=1, name=name,\n consumed_inputs=[0, 0])\n else:\n if container.target_opset < 7:\n op_version = 6\n elif container.target_opset < 9:\n op_version = 7\n else:\n # opset 9 supports unidirectional broadcasting\n op_version = 9\n\n container.add_node('PRelu', [input_name, slope_tensor_name], output_name, op_version=op_version, name=name)\n\n\ndef apply_range(scope, input_name, output_name, container, operator_name=None):\n name = _create_name_or_use_existing_one(scope, 'Range', operator_name)\n container.add_node('Range', input_name, output_name, op_version=11, name=name)\n\n\ndef apply_reciprocal(scope, input_name, output_name, container, operator_name=None):\n _apply_unary_operation(scope, 'Reciprocal', input_name, output_name, container, operator_name=operator_name)\n\n\ndef apply_relu(scope, input_name, output_name, container, operator_name=None):\n _apply_unary_operation(scope, 'Relu', input_name, output_name, container, operator_name)\n\n\ndef apply_relu_6(scope, input_name, output_name, container, operator_name=None, zero_value=0.0):\n name_relu = _create_name_or_use_existing_one(scope, 'relu', operator_name)\n name_relu_op = _create_name_or_use_existing_one(scope, 'relu6', operator_name)\n apply_relu(scope, input_name, name_relu, container, name_relu_op+'_relu')\n apply_clip(scope, name_relu, output_name, container, name_relu_op + '_clip', zero_value+6, zero_value)\n\n\ndef apply_reshape(scope, input_name, output_name, container, operator_name=None, desired_shape=None):\n if not isinstance(desired_shape, str) and len(list(i for i in desired_shape if i is not None and i < 0)) > 1:\n raise ValueError('There can only be one -1 in the targeted shape of a Reshape but got %s' % desired_shape)\n\n name = _create_name_or_use_existing_one(scope, 'Reshape', operator_name)\n\n if container.target_opset < 5:\n container.add_node('Reshape', input_name, output_name, op_version=1, name=name, shape=desired_shape,\n consumed_inputs=[0])\n else:\n if isinstance(desired_shape, str):\n desired_shape_name = desired_shape\n else:\n desired_shape_name = scope.get_unique_variable_name('shape_tensor')\n container.add_initializer(desired_shape_name, onnx_proto.TensorProto.INT64, [len(desired_shape)],\n desired_shape)\n\n # Create ONNX Reshape operator\n if isinstance(input_name, list):\n input_name.append(desired_shape_name)\n else:\n input_name = [input_name, desired_shape_name]\n container.add_node('Reshape', input_name, output_name, op_version=5, name=name)\n\n\ndef apply_resize(scope, input_name, output_name, container, operator_name=None, mode='nearest',\n coordinate_transformation_mode='asymmetric', scales=None):\n '''\n :param mode: \"nearest\" or \"linear\"\n :param scales: a float tensor for scaling (upsampling or downsampling) all input dimensions\n '''\n name = _create_name_or_use_existing_one(scope, 'Resize', operator_name)\n attrs = {'name': name}\n attrs['mode'] = mode.lower()\n\n inputs = [input_name]\n\n if container.target_opset < 11:\n op_version = 10\n else:\n op_version = 11\n roi_tensor_name = scope.get_unique_variable_name(name + '_roi')\n roi = [0.0] * len(scales) + [1.0] * len(scales)\n container.add_initializer(roi_tensor_name, onnx_proto.TensorProto.FLOAT, [2 * len(scales)], roi)\n inputs.append(roi_tensor_name)\n attrs['coordinate_transformation_mode'] = coordinate_transformation_mode\n if attrs['mode'] == 'nearest':\n attrs['nearest_mode'] = 'floor'\n\n scales_tensor_name = scope.get_unique_variable_name(name + '_scales')\n container.add_initializer(scales_tensor_name, onnx_proto.TensorProto.FLOAT, [len(scales)], scales)\n inputs.append(scales_tensor_name)\n container.add_node('Resize', inputs, output_name, op_version=op_version, **attrs)\n\n\ndef apply_rnn(scope, input_names, output_names, container, operator_name=None, output_seq=0, **attrs):\n name = _create_name_or_use_existing_one(scope, 'RNN', operator_name)\n if container.target_opset <= 6:\n attrs['output_sequence'] = 1 if output_seq else 0\n op_version = 1\n else:\n op_version = 7\n container.add_node('RNN', input_names, output_names, name=name, op_version=op_version, **attrs)\n\n\ndef apply_shape(scope, input_name, output_name, container, operator_name=None):\n name = _create_name_or_use_existing_one(scope, 'Shape', operator_name)\n container.add_node('Shape', input_name, output_name, name=name, op_version=1)\n\n\ndef apply_sigmoid(scope, input_name, output_name, container, operator_name=None):\n _apply_unary_operation(scope, 'Sigmoid', input_name, output_name, container, operator_name)\n\n\ndef apply_softsign(scope, input_name, output_name, container, operator_name=None):\n name = _create_name_or_use_existing_one(scope, 'Softsign', operator_name)\n container.add_node('Softsign', input_name, output_name, name=name, op_version=1)\n\n\n# See alpha and gamma at https://github.com/keras-team/keras/blob/master/keras/activations.py#L80-L81\ndef apply_selu(scope, input_name, output_name, container, operator_name=None, alpha=1.673263, gamma=1.050701):\n _apply_unary_operation(scope, 'Selu', input_name, output_name, container, operator_name, alpha=alpha, gamma=gamma)\n\n\ndef apply_softmax(scope, input_name, output_name, container, operator_name=None, axis=1):\n name = _create_name_or_use_existing_one(scope, 'Softmax', operator_name)\n container.add_node('Softmax', input_name, output_name, name=name, axis=axis)\n\n\ndef apply_scaled_tanh(scope, input_name, output_name, container, operator_name=None, alpha=None, beta=None):\n if alpha is None:\n alpha = [1.0]\n if beta is None:\n beta = [1.0]\n if len(alpha) != 1 or len(beta) != 1:\n raise ValueError('alpha and beta must be 1-element lists')\n\n name = _create_name_or_use_existing_one(scope, 'ScaledTanh', operator_name)\n if container.target_opset < 9:\n attrs = {'name': name, 'alpha': alpha[0], 'beta': beta[0]}\n container.add_node('ScaledTanh', input_name, output_name, **attrs)\n else:\n # Define scalar a, initialize with parameter alpha.\n aName = scope.get_unique_variable_name(name + '_alpha')\n aShape = [len(alpha)] if len(alpha) == 1 else [len(alpha), 1, 1]\n container.add_initializer(aName, onnx_proto.TensorProto.FLOAT, aShape, alpha)\n\n # Define scalar b, initialize with parameter beta.\n bShape = [len(beta)] if len(beta) == 1 else [len(beta), 1, 1]\n bName = scope.get_unique_variable_name(name + '_beta')\n container.add_initializer(bName, onnx_proto.TensorProto.FLOAT, bShape, beta)\n\n # c = b * x\n cName = scope.get_unique_variable_name(name + '_c')\n apply_mul(scope, [input_name, bName], cName, container)\n\n # d = tanh(c)\n dName = scope.get_unique_variable_name(name + '_d')\n apply_tanh(scope, cName, dName, container)\n\n # output = a * d\n apply_mul(scope, [aName, dName], output_name, container)\n\n\ndef apply_slice(scope, input_name, output_name, container, starts, ends,\n axes=None, steps=None, operator_name=None):\n name = _create_name_or_use_existing_one(scope, 'Slice', operator_name)\n\n if container.target_opset < 10:\n if axes is None:\n container.add_node('Slice', input_name, output_name, name=name,\n starts=starts, ends=ends, op_version=1)\n else:\n container.add_node('Slice', input_name, output_name, name=name,\n starts=starts, ends=ends, axes=axes, op_version=1)\n else:\n if container.target_opset == 10:\n op_version = 10\n else:\n op_version = 11\n inputs = input_name if isinstance(input_name, list) else [input_name]\n if isinstance(starts, str):\n starts_name = starts\n else:\n starts_name = scope.get_unique_variable_name('starts')\n container.add_initializer(starts_name, onnx_proto.TensorProto.INT64,\n [len(starts)], starts)\n\n if isinstance(ends, str):\n ends_name = ends\n else:\n ends_name = scope.get_unique_variable_name('ends')\n container.add_initializer(ends_name, onnx_proto.TensorProto.INT64,\n [len(ends)], ends)\n\n inputs.append(starts_name)\n inputs.append(ends_name)\n if axes:\n if isinstance(axes, str):\n axes_name = axes\n else:\n axes_name = scope.get_unique_variable_name('axes')\n container.add_initializer(axes_name, onnx_proto.TensorProto.INT64,\n [len(axes)], axes)\n inputs.append(axes_name)\n if steps:\n if not axes:\n inputs.append('')\n if isinstance(steps, str):\n steps_name = steps\n else:\n steps_name = scope.get_unique_variable_name('steps')\n container.add_initializer(steps_name, onnx_proto.TensorProto.INT64,\n [len(steps)], steps)\n inputs.append(steps_name)\n container.add_node('Slice', inputs, output_name, name=name,\n op_version=op_version)\n\n\ndef apply_slice2(scope, input_name, output_name, container,\n operator_name=None, starts=None, ends=None, axes=None, steps=None):\n assert starts is not None, 'the starts in slice op cannot be None'\n assert ends is not None, 'the ends in slice op cannot be None'\n return apply_slice(scope, input_name, output_name, container, starts, ends,\n axes, steps, operator_name)\n\n\ndef apply_split(scope, input_name, output_names, container, operator_name=None, split=None, axis=0):\n name = _create_name_or_use_existing_one(scope, 'Split', operator_name)\n if container.target_opset <= 1:\n op_version = 1\n elif container.target_opset < 11:\n op_version = 2\n else:\n op_version = 11\n\n attrs = {'name': name}\n if split is not None:\n attrs['split'] = split\n if axis is not None:\n attrs['axis'] = axis\n\n container.add_node('Split', input_name, output_names, op_version=op_version, **attrs)\n\n\ndef apply_sqrt(scope, input_name, output_name, container, operator_name=None):\n _apply_unary_operation(scope, 'Sqrt', input_name, output_name, container, operator_name=operator_name)\n\n\ndef _apply_squeeze_unsqueeze(scope, input_name, output_name, container, squeeze_str, operator_name=None, axes=None,\n rank=0):\n name = _create_name_or_use_existing_one(scope, squeeze_str, operator_name)\n if container.target_opset < 11:\n op_version = 1\n axes = [axis if axis >= 0 else axis + rank + 1 for axis in axes]\n else:\n op_version = 11\n container.add_node(squeeze_str, input_name, output_name, name=name, op_version=op_version, axes=axes)\n\n\ndef apply_squeeze(scope, input_name, output_name, container, operator_name=None, axes=None, rank=0):\n if axes is None:\n axes = [0]\n _apply_squeeze_unsqueeze(scope, input_name, output_name, container, 'Squeeze', operator_name, axes, rank)\n\n\ndef apply_sub(scope, input_names, output_name, container, operator_name=None, axis=None, broadcast=0):\n _apply_basic_numerical_operation(scope, 'Sub', input_names, output_name, container, operator_name=operator_name,\n axis=axis, broadcast=broadcast)\n\n\ndef apply_sum(scope, input_names, output_name, container, operator_name=None):\n name = _create_name_or_use_existing_one(scope, 'Sum', operator_name)\n if container.target_opset < 6:\n op_version = 1\n else:\n op_version = 6\n container.add_node('Sum', input_names, output_name, op_version=op_version, name=name)\n\n\ndef apply_tanh(scope, input_name, output_name, container, operator_name=None):\n _apply_unary_operation(scope, 'Tanh', input_name, output_name, container, operator_name)\n\n\ndef apply_thresholded_relu(scope, input_name, output_name, container, operator_name=None, alpha=None):\n if alpha is None:\n alpha = [1.0]\n\n name = _create_name_or_use_existing_one(scope, 'ThresholdedRelu', operator_name)\n attrs = {'name': name, 'alpha': alpha[0]}\n if container.target_opset < 10:\n # ThresholdedRelu graduated from an experimental op to a full op in opset 10\n # onnxruntime maintains support in the ONNX domain for ThresholdedRelu as a contrib op\n attrs['op_domain'] = \"ai.onnx\"\n op_version = 1\n else:\n op_version = 10\n container.add_node('ThresholdedRelu', input_name, output_name, op_version=op_version, **attrs)\n\n\ndef apply_tile(scope, input_name, output_name, container, operator_name=None, repeats=None):\n name = _create_name_or_use_existing_one(scope, 'Tile', operator_name)\n\n if repeats is None or (not isinstance(repeats, str) and all(repeat_count == 1 for repeat_count in repeats)):\n container.add_node('Identity', input_name, output_name, name=name)\n return\n\n if container.target_opset < 6:\n intermediate_input_name = input_name\n intermediate_output_name = None\n if isinstance(repeats, str):\n raise ValueError('repeats cannot be string type before opset 6')\n\n for axis, repeat_count in enumerate(repeats):\n if repeat_count == 1:\n continue\n\n # Create the 2nd input of Tile\n tile_tensor_name = scope.get_unique_variable_name(name + '_tile')\n container.add_initializer(tile_tensor_name, onnx_proto.TensorProto.FLOAT, [1], [float(repeat_count)])\n\n # Create the 3rd input of Tile\n axis_tensor_name = scope.get_unique_variable_name(name + '_axis')\n container.add_initializer(axis_tensor_name, onnx_proto.TensorProto.FLOAT, [1], [float(axis)])\n\n # Create tile for duplicating along one axis. After ONNX-1.2, we can duplicate along multiple axes, so we\n # don't have to iterate through all axes.\n intermediate_output_name = scope.get_unique_variable_name(name + '_input')\n container.add_node('Tile', [intermediate_input_name, tile_tensor_name, axis_tensor_name],\n intermediate_output_name, name=name)\n\n # Use the output produced by this round as the input in the next iteration\n intermediate_input_name = intermediate_output_name\n\n # Create a new name for next Tile\n name = scope.get_unique_operator_name('Tile')\n\n # Use the last Tile name for the name of an Identity\n container.add_node('Identity', intermediate_output_name, output_name, op_version=1, name=name)\n else:\n # ONNX-1.2 has a new Tile and we use it here\n if isinstance(repeats, str):\n container.add_node('Tile', input_name + [repeats], output_name, op_version=6, name=name)\n else:\n repeat_tensor_name = scope.get_unique_variable_name(name + '_repeats')\n container.add_initializer(repeat_tensor_name, onnx_proto.TensorProto.INT64, [len(repeats)], repeats)\n container.add_node('Tile', [input_name, repeat_tensor_name], output_name, op_version=6, name=name)\n\n\ndef apply_topk(scope, input_name, output_names, container, k, operator_name=None):\n name = _create_name_or_use_existing_one(scope, 'TopK', operator_name)\n\n if container.target_opset < 10:\n if isinstance(k, str):\n raise ValueError('topk k cannot be string type before opset 10')\n container.add_node('TopK', input_name, output_names, name=name, k=k, op_version=1)\n else:\n if container.target_opset == 10:\n op_version = 10\n else:\n op_version = 11\n\n if isinstance(k, str):\n k_value_name = k\n else:\n k_value_name = scope.get_unique_variable_name('k_value')\n container.add_initializer(k_value_name, onnx_proto.TensorProto.INT64, [1], [k])\n container.add_node('TopK', input_name + [k_value_name], output_names, name=name, op_version=op_version)\n\n\ndef apply_transpose(scope, input_name, output_name, container, operator_name=None, perm=None):\n name = _create_name_or_use_existing_one(scope, 'Transpose', operator_name)\n container.add_node('Transpose', input_name, output_name, name=name, perm=perm)\n\n\ndef apply_upsample(scope, input_name, output_name, container, operator_name=None, mode='nearest',\n coordinate_transformation_mode='asymmetric', scales=None):\n '''\n :param mode: nearest or linear\n :param scales: an integer list of scaling-up rate of all input dimensions\n '''\n if container.target_opset < 10:\n name = _create_name_or_use_existing_one(scope, 'Upsample', operator_name)\n inputs = [input_name]\n attrs = {'name': name}\n if container.target_opset < 7:\n if len(scales) != 4:\n raise ValueError('Need to specify a 4-element list the the scales of N-, C-, H-, and W-axes')\n attrs['height_scale'] = float(scales[2])\n attrs['width_scale'] = float(scales[3])\n attrs['mode'] = mode.upper()\n op_version = 1\n else:\n attrs['mode'] = mode.lower()\n if container.target_opset < 9:\n attrs['scales'] = list(map(float, scales))\n op_version = 7\n else:\n # scales moved from attribute to input in opset 9\n scales_tensor_name = scope.get_unique_variable_name(name + '_scales')\n container.add_initializer(scales_tensor_name, onnx_proto.TensorProto.FLOAT, [len(scales)], scales)\n inputs = [input_name, scales_tensor_name]\n op_version = 9\n\n container.add_node('Upsample', inputs, output_name, op_version=op_version, **attrs)\n else:\n # Upsample op is deprecated in ONNX opset 10\n # We implement Upsample through Resize instead\n apply_resize(scope, input_name, output_name, container, operator_name, mode, coordinate_transformation_mode,\n scales)\n\n\ndef apply_unsqueeze(scope, input_name, output_name, container, operator_name=None, axes=None, rank=0):\n if axes is None:\n axes = [0]\n _apply_squeeze_unsqueeze(scope, input_name, output_name, container, 'Unsqueeze', operator_name, axes, rank)\n"
] |
[
[
"numpy.array",
"numpy.iinfo"
]
] |
protivinsky/python-utils
|
[
"145fc8e6385df745c7b73fa0dfbb17abf6f58f82"
] |
[
"libs/plots.py"
] |
[
"import os\r\nfrom yattag import Doc, indent\r\nfrom libs.utils import create_stamped_temp, slugify\r\nimport matplotlib.pyplot as plt\r\n\r\n# NOTE - Does not work out of the box, needs a fix:\r\n#\r\n# Annoyingly, the js loading of subpages violates Cross-Origin Requests policy in all browsers\r\n# when files are served locally via file:///. Works fine for http protocol though.\r\n# It is possible to use iframes rather than js loader, but it's ugly and has other issues (multiple nested scrollbars).\r\n#\r\n# Workarounds:\r\n# - Firefox:\r\n# - go to about:config -> search for privacy.file_unique_origin and toggle\r\n# - then set up Firefox as the default for opening .htm files (that's the reason why I do not use .html)\r\n# - Chrome\r\n# - can be started with \"--allow-file-access-from-files\", then it should just work\r\n# - it would be possible to start the appropriate process in .show, but I have not tried\r\n# - one workaround is enough for me\r\n# - https://stackoverflow.com/a/18137280\r\n# - Edge:\r\n# - until recently, it was the only browser not enforcing the CORS policy for local files, so it just\r\n# worked. The new version of Edge enforces the same, do not know how to get around there.\r\n# - or it is possible to use local webserver and serve the files via it\r\n# - CORS policy is respected with http\r\n# - python webserver works fine, just serving the directory: python -m http.server 8000\r\n# - however seems more hassle than just changing firefox config...\r\n\r\n\r\nclass Chart:\r\n\r\n def __init__(self, figs, cols=3, title=None, format='png'):\r\n if not isinstance(figs, list):\r\n figs = [figs]\r\n self.figs = [f if isinstance(f, plt.Figure) else f.get_figure() for f in figs]\r\n self.cols = cols\r\n self.format = format\r\n self.title = title or self.figs[0].axes[0].title._text\r\n\r\n def save(self, path, inner=False):\r\n os.makedirs(path, exist_ok=True)\r\n n = len(self.figs)\r\n for i in range(n):\r\n self.figs[i].savefig(f'{path}/fig_{i+1:03d}.{self.format}')\r\n plt.close('all')\r\n\r\n doc, tag, text = Doc().tagtext()\r\n\r\n doc.asis('<!DOCTYPE html>')\r\n with tag('html'):\r\n with tag('head'):\r\n with tag('title'):\r\n text(self.title or 'Chart')\r\n with tag('body'):\r\n with tag('h1'):\r\n text(self.title or 'Chart')\r\n num_rows = (n + self.cols - 1) // self.cols\r\n for r in range(num_rows):\r\n with tag('div'):\r\n for c in range(min(self.cols, n - self.cols * r)):\r\n doc.stag('img', src=f'fig_{self.cols * r + c + 1:03d}.{self.format}')\r\n\r\n file = open('{}/page.htm'.format(path), 'w', encoding='utf-8')\r\n file.write(indent(doc.getvalue()))\r\n file.close()\r\n\r\n def show(self):\r\n path = create_stamped_temp('reports')\r\n self.save(path)\r\n os.startfile('{}/page.htm'.format(path))\r\n\r\n\r\n# I am not using it at the end, not sure if it works correctly.\r\nclass Text:\r\n\r\n def __init__(self, texts, width=750, title=None):\r\n if not isinstance(texts, list):\r\n texts = [texts]\r\n self.texts = texts\r\n self.width = width\r\n self.title = title\r\n\r\n def save(self, path, inner=False):\r\n os.makedirs(path, exist_ok=True)\r\n\r\n doc, tag, text = Doc().tagtext()\r\n\r\n doc.asis('<!DOCTYPE html>')\r\n with tag('html'):\r\n with tag('head'):\r\n with tag('title'):\r\n text(self.title or 'Text')\r\n with tag('body'):\r\n with tag('h1'):\r\n text(self.title or 'Text')\r\n with tag('div'):\r\n for t in self.texts:\r\n with tag('div', style='width: {}px; float: left'.format(self.width)):\r\n with tag('pre'):\r\n text(t)\r\n\r\n file = open('{}/page.htm'.format(path), 'w', encoding='utf-8')\r\n file.write(indent(doc.getvalue()))\r\n file.close()\r\n\r\n def show(self):\r\n path = create_stamped_temp('reports')\r\n self.save(path)\r\n os.startfile('{}/page.htm'.format(path))\r\n\r\n\r\nclass Selector:\r\n\r\n def __init__(self, charts, title=None):\r\n if not isinstance(charts, list):\r\n charts = [charts]\r\n self.charts = [ch if isinstance(ch, (Text, Chart, Selector)) else Chart(ch) for ch in charts]\r\n self.title = title or 'Selector'\r\n\r\n def save(self, path):\r\n os.makedirs(path, exist_ok=True)\r\n n = len(self.charts)\r\n for i in range(n):\r\n ch = self.charts[i]\r\n if ch.title is None:\r\n ch.title = '{}_{:02d}'.format('Chart' if isinstance(ch, Chart) else ('Text' if isinstance(ch, Text)\r\n else 'Selector'), i)\r\n ch.save('{}/{}'.format(path, slugify(ch.title)))\r\n\r\n doc, tag, text, line = Doc().ttl()\r\n\r\n doc.asis('<!DOCTYPE html>')\r\n with tag('html'):\r\n with tag('head'):\r\n with tag('title'):\r\n text(self.title or 'Selector')\r\n with tag('script'):\r\n doc.asis(\"\"\"\r\n function loader(target, file) {\r\n var element = document.getElementById(target);\r\n var xmlhttp = new XMLHttpRequest();\r\n xmlhttp.onreadystatechange = function(){\r\n if(xmlhttp.status == 200 && xmlhttp.readyState == 4){ \r\n var txt = xmlhttp.responseText;\r\n var next_file = \"\"\r\n var matches = txt.match(/<script>loader\\\\('.*', '(.*)'\\\\)<\\\\/script>/);\r\n if (matches) {\r\n next_file = matches[1];\r\n }; \r\n txt = txt.replace(/^[\\s\\S]*<body>/, \"\").replace(/<\\/body>[\\s\\S]*$/, \"\");\r\n txt = txt.replace(/src=\\\\\"fig_/g, \"src=\\\\\"\" + file + \"/fig_\");\r\n txt = txt.replace(/loader\\\\('/g, \"loader('\" + file.replace(\"/\", \"-\") + \"-\");\r\n txt = txt.replace(/div id=\\\\\"/, \"div id=\\\\\"\" + file.replace(\"/\", \"-\") + \"-\");\r\n txt = txt.replace(/content', '/g, \"content', '\" + file + \"/\");\r\n element.innerHTML = txt;\r\n if (next_file) {\r\n loader(file.replace(\"/\", \"-\") + \"-content\", file.replace(\"/\", \"-\") + \"/\" + next_file);\r\n }; \r\n };\r\n };\r\n xmlhttp.open(\"GET\", file + \"/page.htm\", true);\r\n xmlhttp.send();\r\n }\r\n \"\"\")\r\n with tag('body'):\r\n with tag('h1'):\r\n text(self.title or 'Selector')\r\n with tag('div'):\r\n for ch in self.charts:\r\n #line('a', ch.title, href='{}/page.html'.format(slugify(ch.title)), target='iframe')\r\n line('button', ch.title, type='button',\r\n onclick='loader(\\'content\\', \\'{}\\')'.format(slugify(ch.title)))\r\n with tag('div', id='content'):\r\n text('')\r\n with tag('script'):\r\n doc.asis('loader(\\'content\\', \\'{}\\')'.format(slugify(self.charts[0].title)))\r\n\r\n file = open('{}/page.htm'.format(path), 'w', encoding='utf-8')\r\n file.write(indent(doc.getvalue()))\r\n file.close()\r\n\r\n def show(self):\r\n path = create_stamped_temp('reports')\r\n self.save(path)\r\n os.startfile('{}/page.htm'.format(path))\r\n\r\n\r\n"
] |
[
[
"matplotlib.pyplot.close"
]
] |
xizhihuang/one_model_one_week
|
[
"a84fecd93a0fd60e1bbb923d78fe47d453938126"
] |
[
"linear_regression/regression.py"
] |
[
"# -*- coding:utf-8 -*-\nimport numpy as np\nnp.random.seed(1337)\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nimport matplotlib.pyplot as plt\n\nX = np.linspace(-1, 1, 500)\nnp.random.shuffle(X)\nY = 0.5 * X + 0.2 + np.random.normal(0, 0.05, (500,))\n\nX_train, Y_train = X[:400], Y[:400]\nX_val, Y_val = X[400:], Y[400:]\n\nplt.scatter(X, Y)\nplt.show()\n\nmodel = Sequential()\nmodel.add(Dense(output_dim=1, input_dim=1))\n\nmodel.compile(loss='mse', optimizer='sgd', metrics=['accuracy'])\n\nprint('begin to train...')\nfor step in range(500):\n cost = model.train_on_batch(X_train, Y_train)\n if step % 100 == 0:\n print('train cost:', cost)\n\ncost = model.evaluate(X_val, Y_val, batch_size=100)\nprint('test cost', cost)\nW, b = model.layers[0].get_weights()\nprint('weight=', W, 'b=', b)\n\n\nY_pred = model.predict(X_val)\nplt.scatter(X_val, Y_val)\nplt.plot(X_val, Y_pred)\nplt.show()\n\n"
] |
[
[
"numpy.random.normal",
"numpy.random.seed",
"matplotlib.pyplot.plot",
"numpy.random.shuffle",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"numpy.linspace"
]
] |
Evanc123/chainer
|
[
"929af7189b1271683200aa9b0ba6da2dd3dee110",
"929af7189b1271683200aa9b0ba6da2dd3dee110"
] |
[
"tests/chainer_tests/functions_tests/activation_tests/test_softmax.py",
"tests/chainer_tests/initializer_tests/test_normal.py"
] |
[
"import unittest\n\nimport numpy\n\nimport chainer\nfrom chainer.backends import cuda\nfrom chainer import functions\nfrom chainer import gradient_check\nfrom chainer import testing\nfrom chainer.testing import attr\n\n\n@testing.parameterize(*testing.product({\n 'shape_axis':\n [{'shape': None, 'axis': 1}, ] +\n testing.product({'shape': ((2, 3),), 'axis': (0, 1)}) +\n testing.product({'shape': ((2, 3, 4),), 'axis': (0, 2)}) +\n testing.product({'shape': ((2, 3, 2, 3),), 'axis': (1, 3)}),\n 'dtype': [numpy.float16, numpy.float32, numpy.float64],\n}))\n@testing.fix_random()\nclass TestSoftmax(unittest.TestCase):\n\n def setUp(self):\n self.shape = self.shape_axis['shape']\n self.axis = self.shape_axis['axis']\n if self.shape is None:\n # For checking numerical stability\n value = -5 if self.dtype == numpy.float16 else -1000\n self.x = numpy.array([[value, 1]], dtype=self.dtype)\n else:\n self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)\n self.gy = numpy.random.uniform(-1, 1, self.x.shape).astype(self.dtype)\n self.ggx = numpy.random.uniform(-1, 1, self.x.shape).astype(self.dtype)\n\n self.check_forward_options = {}\n self.check_backward_options = {}\n self.check_double_backward_options = {}\n if self.dtype == numpy.float16:\n self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}\n self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}\n self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}\n\n def check_forward(self, x_data, use_cudnn='always'):\n x = chainer.Variable(x_data)\n with chainer.using_config('use_cudnn', use_cudnn):\n y = functions.softmax(x, axis=self.axis)\n self.assertEqual(y.data.dtype, self.dtype)\n\n y_expect = numpy.exp(self.x)\n y_roll = numpy.rollaxis(y_expect, self.axis, y_expect.ndim)\n for i in numpy.ndindex(y_roll.shape[:-1]):\n y_roll[i] /= y_roll[i].sum()\n\n testing.assert_allclose(\n y_expect, y.data, **self.check_forward_options)\n\n def test_forward_cpu(self):\n self.check_forward(self.x)\n\n @attr.gpu\n def test_forward_gpu(self):\n self.check_forward(cuda.to_gpu(self.x))\n\n @attr.gpu\n def test_forward_gpu_non_contiguous(self):\n self.check_forward(\n cuda.cupy.asfortranarray(cuda.to_gpu(self.x)))\n\n @attr.gpu\n def test_forward_gpu_no_cudnn(self):\n self.check_forward(cuda.to_gpu(self.x), 'never')\n\n def check_backward(self, x_data, gy_data, use_cudnn='always'):\n def f(x):\n return functions.softmax(x, axis=self.axis)\n\n with chainer.using_config('use_cudnn', use_cudnn):\n gradient_check.check_backward(\n f, x_data, gy_data, dtype=numpy.float64,\n **self.check_backward_options)\n\n def test_backward_cpu(self):\n self.check_backward(self.x, self.gy)\n\n @attr.gpu\n def test_backward_gpu(self):\n self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))\n\n @attr.gpu\n def test_backward_gpu_non_contiguous(self):\n self.check_backward(\n cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),\n cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)))\n\n @attr.gpu\n def test_backward_gpu_no_cudnn(self):\n self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), 'never')\n\n def check_double_backward(self, x_data, gy_data, ggx_data,\n use_cudnn='always'):\n def f(x):\n return functions.softmax(x, axis=self.axis)\n\n with chainer.using_config('use_cudnn', use_cudnn):\n gradient_check.check_double_backward(\n f, (x_data,), (gy_data,), (ggx_data), dtype=numpy.float64,\n **self.check_double_backward_options)\n\n def test_double_backward_cpu(self):\n self.check_double_backward(self.x, self.gy, self.ggx)\n\n @attr.gpu\n def test_double_backward_gpu(self):\n self.check_double_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))\n\n @attr.gpu\n def test_double_backward_gpu_no_cudnn(self):\n self.check_double_backward(\n cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),\n 'never')\n\n\n@testing.parameterize(*testing.product({\n 'axis': [0],\n 'use_cudnn': ['always', 'auto', 'never'],\n 'dtype': [numpy.float16, numpy.float32, numpy.float64],\n}))\n@attr.cudnn\nclass TestSoftmaxCudnnCall(unittest.TestCase):\n\n def setUp(self):\n self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)\n self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)\n with chainer.using_config('use_cudnn', self.use_cudnn):\n self.expect = chainer.should_use_cudnn('>=auto')\n\n def forward(self):\n x = chainer.Variable(self.x)\n return functions.softmax(x, axis=self.axis)\n\n def test_call_cudnn_forward(self):\n with chainer.using_config('use_cudnn', self.use_cudnn):\n with testing.patch('cupy.cuda.cudnn.softmaxForward') as func:\n self.forward()\n self.assertEqual(func.called, self.expect)\n\n def test_call_cudnn_backward(self):\n with chainer.using_config('use_cudnn', self.use_cudnn):\n y = self.forward()\n y.grad = self.gy\n with testing.patch('cupy.cuda.cudnn.softmaxBackward') as func:\n y.backward()\n self.assertEqual(func.called, self.expect)\n\n\ntesting.run_module(__name__, __file__)\n",
"import unittest\n\nfrom chainer.backends import cuda\nfrom chainer import initializers\nfrom chainer import testing\nfrom chainer.testing import attr\nimport numpy\n\n\n@testing.parameterize(*(\n testing.product_dict(\n [\n {'target': initializers.Normal, 'fan_option': None},\n {'target': initializers.LeCunNormal, 'fan_option': None},\n {'target': initializers.GlorotNormal, 'fan_option': None},\n {'target': initializers.HeNormal, 'fan_option': 'fan_in'},\n {'target': initializers.HeNormal, 'fan_option': 'fan_out'}\n ],\n testing.product(\n {'shape': [(2, 3), (2, 3, 4)],\n 'dtype': [numpy.float16, numpy.float32, numpy.float64]\n }\n )\n )\n))\nclass NormalBase(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def check_initializer(self, w):\n if self.fan_option is None:\n initializer = self.target(scale=0.1)\n else:\n initializer = self.target(scale=0.1, fan_option=self.fan_option)\n initializer(w)\n self.assertTupleEqual(w.shape, self.shape)\n self.assertEqual(w.dtype, self.dtype)\n\n def test_initializer_cpu(self):\n w = numpy.empty(self.shape, dtype=self.dtype)\n self.check_initializer(w)\n\n @attr.gpu\n def test_initializer_gpu(self):\n w = cuda.cupy.empty(self.shape, dtype=self.dtype)\n self.check_initializer(w)\n\n def check_shaped_initializer(self, xp):\n initializer = self.target(scale=0.1, dtype=self.dtype)\n w = initializers.generate_array(initializer, self.shape, xp)\n self.assertIs(cuda.get_array_module(w), xp)\n self.assertTupleEqual(w.shape, self.shape)\n self.assertEqual(w.dtype, self.dtype)\n\n def test_shaped_initializer_cpu(self):\n self.check_shaped_initializer(numpy)\n\n @attr.gpu\n def test_shaped_initializer_gpu(self):\n self.check_shaped_initializer(cuda.cupy)\n\n\ntesting.run_module(__name__, __file__)\n"
] |
[
[
"numpy.ndindex",
"numpy.array",
"numpy.rollaxis",
"numpy.exp",
"numpy.random.uniform"
],
[
"numpy.empty"
]
] |
SteveDoyle2/pynastran
|
[
"14798312ac0419857ce030ee367f924b4924f9fd"
] |
[
"pyNastran/op2/tables/ogs_grid_point_stresses/ogs_surface_stresses.py"
] |
[
"import warnings\nfrom typing import List\nimport numpy as np\n\nfrom pyNastran.op2.result_objects.op2_objects import ScalarObject, get_times_dtype\nfrom pyNastran.f06.f06_formatting import (\n write_floats_10e, _eigenvalue_header)\nfrom pyNastran.op2.writer.utils import fix_table3_types\nfrom pyNastran.op2.op2_interface.write_utils import set_table3_field\n\n\nclass GridPointSurfaceArray(ScalarObject):\n \"\"\"\n ' S T R E S S E S A T G R I D P O I N T S - - S U R F A C E 5\\n',\n '0 SURFACE X-AXIS X NORMAL(Z-AXIS) Z REFERENCE COORDINATE SYSTEM FOR SURFACE DEFINITION CID 0\\n',\n ' GRID ELEMENT STRESSES IN SURFACE SYSTEM PRINCIPAL STRESSES MAX \\n',\n ' ID ID FIBRE NORMAL-X NORMAL-Y SHEAR-XY ANGLE MAJOR MINOR SHEAR VON MISES\\n']\n '0 13683 3736 TRIAX6 4.996584E+00 0.0 1.203093E+02 0.0 0.0 0.0'\n ' 13683 3737 TRIAX6 -4.996584E+00 0.0 -1.203093E+02 0.0 0.0 0.0'\n ' 13683 *TOTALS* 6.366463E-12 0.0 -1.364242E-12 0.0 0.0 0.0'\n\n \"\"\"\n def __init__(self, data_code, is_sort1, isubcase, dt):\n ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True)\n self.ntotal = 0\n self.ntimes = 0\n self.nelements = 0\n self.itotal = 0\n self.ielement = 0\n self.data = None\n self.itime = None\n self.node_element = None\n self.location = None\n self._times = None\n\n def _reset_indices(self) -> None:\n self.itotal = 0\n self.ielement = 0\n\n @property\n def is_real(self) -> bool:\n return True\n @property\n def is_complex(self) -> bool:\n return False\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the GridPointStressesArray\"\"\"\n if self.is_built:\n return\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n #self.names = []\n self.nelements //= self.ntimes\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self.node_element = np.zeros((self.ntotal, 2), dtype=idtype)\n #oxx, oyy, txy, angle, major, minor, ovm\n self.data = np.zeros((self.ntimes, self.nelements, 8), dtype=fdtype)\n self.location = np.empty(self.ntotal, dtype='U8')\n\n self._times = np.zeros(self.ntimes, dtype=dtype)\n\n def _write_table_3(self, op2_file, op2_ascii, new_result, itable, itime): #, itable=-3, itime=0):\n import inspect\n from struct import pack\n frame = inspect.currentframe()\n call_frame = inspect.getouterframes(frame, 2)\n op2_ascii.write('%s.write_table_3: %s\\n' % (self.__class__.__name__, call_frame[1][3]))\n\n #if itable == -3:\n #print('*writing itable=%s' % itable)\n if new_result and itable != -3:\n header = [\n 4, 146, 4,\n ]\n else:\n header = [\n 4, itable, 4,\n 4, 1, 4,\n 4, 0, 4,\n 4, 146, 4,\n ]\n op2_file.write(pack(b'%ii' % len(header), *header))\n op2_ascii.write('table_3_header = %s\\n' % header)\n #op2_file.write(pack('12i', *header))\n #else:\n #print('***writing itable=%s' % itable)\n #op2_file.write(pack('3i', *[\n ##4, itable, 4,\n ##4, 1, 4,\n ##4, 0, 4,\n #4, 146, 4,\n #]))\n approach_code = self.approach_code\n table_code = self.table_code\n isubcase = self.isubcase\n #[\n #'aCode', 'tCode', 'element_type', 'isubcase',\n #'???', '???', '???', 'load_set'\n #'format_code', 'num_wide', 's_code', '???',\n #'???', '???', '???', '???',\n #'???', '???', '???', '???',\n #'???', '???', '???', '???',\n #'???', 'Title', 'subtitle', 'label']\n #random_code = self.random_code\n ogs = self.ogs\n if ogs is None:\n #print(''.join(self.get_stats()))\n warnings.warn('ogs=0...')\n ogs = 0\n\n format_code = self.format_code\n s_code = self.sCode\n num_wide = self.num_wide\n acoustic_flag = 0\n thermal = 0\n title = b'%-128s' % self.title.encode('ascii')\n subtitle = b'%-128s' % self.subtitle.encode('ascii')\n label = b'%-128s' % self.label.encode('ascii')\n ftable3 = b'50i 128s 128s 128s'\n unused_oCode = 0\n\n ftable3 = b'i' * 50 + b'128s 128s 128s'\n field6 = 0\n field7 = 0\n if self.analysis_code == 1:\n field5 = self.lsdvmns[itime]\n if np.isnan(field5): # poor sort2 -> sort1\n raise RuntimeError('field5 in a static case is nan...; do you have SORT2?')\n #field5 = 1\n\n elif self.analysis_code == 2:\n field5 = self.modes[itime]\n field6 = self.eigns[itime]\n field7 = self.cycles[itime]\n assert isinstance(field6, float), type(field6)\n assert isinstance(field7, float), type(field7)\n ftable3 = set_table3_field(ftable3, 6, b'f') # field 6\n ftable3 = set_table3_field(ftable3, 7, b'f') # field 7\n\n #elif self.analysis_code == 3:\n #field5 = self.freqs[itime]\n elif self.analysis_code == 5:\n field5 = self.freqs[itime]\n ftable3 = set_table3_field(ftable3, 5, b'f') # field 5\n elif self.analysis_code == 6:\n field5 = self.dts[itime]\n ftable3 = set_table3_field(ftable3, 5, b'f') # field 5\n elif self.analysis_code == 7: # pre-buckling\n field5 = self.lsdvmns[itime] # load set number\n elif self.analysis_code == 8: # post-buckling\n field5 = self.lsdvmns[itime] # load set number\n #if hasattr(self, 'eigns'):\n if hasattr(self, 'eigens'):\n field6 = self.eigns[itime]\n elif hasattr(self, 'eigrs'):\n field6 = self.eigrs[itime]\n else: # pragma: no cover\n print(self.get_stats())\n raise NotImplementedError('cant find eigns or eigrs on analysis_code=8')\n ftable3 = set_table3_field(ftable3, 6, b'f') # field 6\n elif self.analysis_code == 9: # complex eigenvalues\n field5 = self.modes[itime]\n if hasattr(self, 'eigns'):\n field6 = self.eigns[itime]\n elif hasattr(self, 'eigrs'):\n field6 = self.eigrs[itime]\n else: # pragma: no cover\n print(self.get_stats())\n raise NotImplementedError('cant find eigns or eigrs on analysis_code=9')\n\n ftable3 = set_table3_field(ftable3, 6, b'f') # field 6\n field7 = self.eigis[itime]\n ftable3 = set_table3_field(ftable3, 7, b'f') # field 7\n elif self.analysis_code == 10: # nonlinear statics\n field5 = self.lftsfqs[itime]\n ftable3 = set_table3_field(ftable3, 5, b'f') # field 5; load step\n elif self.analysis_code == 11: # old geometric nonlinear statics\n field5 = self.lsdvmns[itime] # load set number\n else:\n raise NotImplementedError(self.analysis_code)\n\n\n #self.ogs = self.add_data_parameter(data, 'ogs_id', b'i', 3, False)\n #self.refid = self.add_data_parameter(data, 'refid', b'i', 8, False)\n #self.format_code = self.add_data_parameter(data, 'format_code', b'i', 9, False)\n #self.num_wide = self.add_data_parameter(data, 'num_wide', b'i', 10, False)\n #self.sCode = self.add_data_parameter(data, 'sCode', b'i', 11, False)\n #self.oCoord = self.add_data_parameter(data, 'oCoord', b'i', 12, False)\n #self.axis = self.add_data_parameter(data, 'axis', b'i', 13, False)\n #self.normal = self.add_data_parameter(data, 'normal', b'i', 14, False)\n\n table3 = [\n approach_code, table_code, ogs, isubcase, field5,\n field6, field7, self.refid, format_code, num_wide,\n s_code, self.oCoord, self.axis, self.normal, 0,\n 0, 0, 0, 0, 0,\n 0, 0, thermal, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0,\n title, subtitle, label,\n ]\n assert table3[22] == thermal\n\n table3 = fix_table3_types(table3, size=4)\n data = [584] + table3 + [584]\n fmt = b'i' + ftable3 + b'i'\n #print(fmt)\n #print(data)\n #f.write(pack(fascii, '%s header 3c' % self.table_name, fmt, data))\n op2_ascii.write('%s header 3c = %s\\n' % (self.table_name, data))\n op2_file.write(pack(fmt, *data))\n\n #def build_dataframe(self):\n #\"\"\"creates a pandas dataframe\"\"\"\n #import pandas as pd\n #headers = self.get_headers()\n #element_node = [self.element_node[:, 0], self.element_node[:, 1]]\n #if self.nonlinear_factor not in (None, np.nan):\n #column_names, column_values = self._build_dataframe_transient_header()\n #self.data_frame = pd.Panel(self.data, items=column_values, major_axis=element_node, minor_axis=headers).to_frame()\n #self.data_frame.columns.names = column_names\n #else:\n #self.data_frame = pd.Panel(self.data, major_axis=element_node, minor_axis=headers).to_frame()\n #self.data_frame.columns.names = ['Static']\n #self.data_frame.index.names = ['NodeID', 'ElementID', 'Item']\n\n def add_sort1(self, dt, nid, eid, fiber, nx, ny, txy, angle, majorP, minorP, tmax, ovm):\n \"\"\"unvectorized method for adding SORT1 transient data\"\"\"\n #assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)\n self._times[self.itime] = dt\n self.node_element[self.itotal, :] = [nid, eid]\n self.location[self.itotal] = fiber\n self.data[self.itime, self.itotal, :] = [nx, ny, txy, angle, majorP, minorP, tmax, ovm]\n self.itotal += 1\n\n def get_stats(self, short: bool=False) -> List[str]:\n if not self.is_built:\n return [\n '<%s>\\n' % self.__class__.__name__,\n f' ntimes: {self.ntimes:d}\\n',\n f' ntotal: {self.ntotal:d}\\n',\n ]\n\n ntimes, nelements, _ = self.data.shape\n assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)\n assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)\n\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i\\n'\n % (self.__class__.__name__, ntimes, nelements))\n ntimes_word = 'ntimes'\n else:\n msg.append(' type=%s nelements=%i\\n'\n % (self.__class__.__name__, nelements))\n ntimes_word = '1'\n headers = self.get_headers()\n n = len(headers)\n msg.append(' data: [%s, nelements, %i] where %i=[%s]\\n' % (ntimes_word, n, n, str(', '.join(headers))))\n msg.append(f' node_element.shape = {self.node_element.shape}\\n')\n msg.append(f' location.shape = {self.location.shape}\\n')\n msg.append(f' data.shape = {self.data.shape}\\n')\n msg += self.get_data_code()\n return msg\n\n\n def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n page_num: int=1, is_mag_phase: bool=False, is_sort1: bool=True):\n if header is None:\n header = []\n\n cid = self.refid\n axis_int = self.oCoord\n axis_map = {0 : 'X', 1 : 'Y', 2 : 'Z'}\n axis = axis_map[axis_int]\n msg = self._get_f06_message(self.ogs_id, cid, axis)\n\n ntimes = self.data.shape[0]\n\n nids = self.node_element[:, 0]\n eids = self.node_element[:, 1]\n for itime in range(ntimes):\n dt = self._times[itime]\n header = _eigenvalue_header(self, header, itime, ntimes, dt)\n f06_file.write(''.join(header + msg))\n\n nx = self.data[itime, :, 0]\n ny = self.data[itime, :, 1]\n txy = self.data[itime, :, 2]\n angle = self.data[itime, :, 3]\n majorp = self.data[itime, :, 4]\n minorp = self.data[itime, :, 5]\n tmax = self.data[itime, :, 6]\n ovm = self.data[itime, :, 7]\n fibers = self.location\n nid_old = -1\n for (nid, eid, fiber, nxi, nyi, txyi, anglei, majorpi, minorpi, tmaxi, ovmi) in zip(\n nids, eids, fibers, nx, ny, txy, angle, majorp, minorp, tmax, ovm):\n [nxi, nyi, txyi, majorpi, minorpi, tmaxi, ovmi] = write_floats_10e([\n nxi, nyi, txyi, majorpi, minorpi, tmaxi, ovmi])\n if nid > nid_old:\n f06_file.write(\n '0%8s %8s %4s %-10s %-10s %-10s %8.4f %10s %10s %10s %s\\n' % (\n nid, eid, fiber, nxi, nyi, txyi, anglei, majorpi, minorpi,\n tmaxi, ovmi))\n else:\n f06_file.write(\n ' %8s %8s %4s %-10s %-10s %-10s %8.4f %10s %10s %10s %s\\n' % (\n '', '', fiber, nxi, nyi, txyi, anglei, majorpi, minorpi,\n tmaxi, ovmi))\n nid_old = nid\n f06_file.write(page_stamp % page_num)\n page_num += 1\n return page_num - 1\n\n def write_op2(self, op2_file, op2_ascii, itable, new_result,\n date, is_mag_phase=False, endian='>'):\n \"\"\"writes an OP2\"\"\"\n import inspect\n from struct import Struct\n frame = inspect.currentframe()\n call_frame = inspect.getouterframes(frame, 2)\n op2_ascii.write(f'{self.__class__.__name__}.write_op2: {call_frame[1][3]}\\n')\n\n if itable == -1:\n #print('***************', itable)\n self._write_table_header(op2_file, op2_ascii, date)\n itable = -3\n\n #if isinstance(self.nonlinear_factor, float):\n #op2_format = '%sif' % (7 * self.ntimes)\n #raise NotImplementedError()\n #else:\n #op2_format = 'i21f'\n #s = Struct(op2_format)\n\n #eids2 = self.element_node[:, 0]\n #nodes = self.element_node[:, 1]\n #nelements_nodes = len(nodes)\n\n #eids3 = self.element_cid[:, 0]\n #cids3 = self.element_cid[:, 1]\n\n # table 4 info\n #ntimes = self.data.shape[0]\n #nnodes = self.data.shape[1]\n #nelements = len(np.unique(eids2))\n\n # 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm\n #ntotal = ((nnodes * 21) + 1) + (nelements * 4)\n #nnodes_centroid = self.nnodes_per_element\n #nnodes_no_centroid = self.nnodes_per_element_no_centroid\n nnodes = self.data.shape[1]\n #ntotali = 11\n ntotali = self.num_wide\n assert ntotali == 11, ntotali\n ntotal = ntotali * nnodes\n\n\n #print('shape = %s' % str(self.data.shape))\n #assert nnodes > 1, nnodes\n #assert self.ntimes == 1, self.ntimes\n\n op2_ascii.write(f' ntimes = {self.ntimes}\\n')\n ntimes = self.ntimes\n\n #print('ntotal=%s' % (ntotal))\n if not self.is_sort1:\n raise NotImplementedError('SORT2')\n #op2_format = endian + b'2i6f'\n\n #idtype = self.element_cid.dtype\n fdtype = self.data.dtype\n #print(self.size)\n if self.size == fdtype.itemsize:\n grid_bytes = b'GRID'\n else:\n warnings.warn(f'downcasting {self.class_name}...')\n idtype = np.int32(1)\n fdtype = np.float32(1.0)\n grid_bytes = b'GRID'\n\n #[nids, eids, fibers, nx, ny, txy, angle, majorp, minorp, tmax, ovm]\n nids = self.node_element[:, 0]\n eids = self.node_element[:, 1]\n nids_device = nids * 10 + self.device_code\n\n nids_device\n\n # speed up transient cases, but slightly slows down static cases\n data_out = np.empty((nnodes, 11), dtype=fdtype)\n # setting:\n # - [nid_device, eids, location_bytes]\n data_out[:, 0] = nids_device\n data_out[:, 1] = eids\n location_bytes = np.array([loc.encode('ascii') for loc in self.location])\n data_out[:, 2] = location_bytes.view(fdtype)\n\n\n #nx = self.data[itime, :, 0]\n #ny = self.data[itime, :, 1]\n #txy = self.data[itime, :, 2]\n #angle = self.data[itime, :, 3]\n #majorp = self.data[itime, :, 4]\n #minorp = self.data[itime, :, 5]\n #tmax = self.data[itime, :, 6]\n #ovm = self.data[itime, :, 7]\n #fibers = self.location\n\n #cen_array = np.full(nelements, grid_bytes, dtype='|S4')\n #nnodes_no_centroid_array = np.full(nelements, nnodes_no_centroid, dtype=idtype)\n\n #element_wise_data = to_column_bytes([\n #element_device, # ints\n #cids3, # ints\n #cen_array, # bytes\n #nnodes_no_centroid_array, # ints\n #], fdtype, debug=False)\n\n # we could tack the nodes on, so we don't have to keep stacking it\n # but we run into issues with datai\n #\n # total=nelements_nodes\n #nodes_view = nodes.view(fdtype).reshape(nelements, nnodes_centroid)\n #inode = np.arange(nnodes_centroid)\n #data_out[:, 4+inode*21] = nodes_view[:, inode]\n\n op2_ascii.write(f'nnodes={nnodes:d}\\n')\n struct_i = Struct('i')\n struct_13i = Struct('13i')\n for itime in range(self.ntimes):\n self._write_table_3(op2_file, op2_ascii, new_result, itable, itime)\n\n # record 4\n #print('stress itable = %s' % itable)\n itable -= 1\n header = [4, itable, 4,\n 4, 1, 4,\n 4, 0, 4,\n 4, ntotal, 4,\n 4 * ntotal]\n op2_file.write(struct_13i.pack(*header))\n op2_ascii.write('r4 [4, 0, 4]\\n')\n op2_ascii.write(f'r4 [4, {itable:d}, 4]\\n')\n op2_ascii.write(f'r4 [4, {4 * ntotal:d}, 4]\\n')\n\n\n # stack each output by columns and fix any dtypes\n #datai2 = datai.reshape(nelements, 21*nnodes_centroid)\n #data_out = np.hstack([element_wise_data, datai2])\n #data_out[:, 4:] = datai2\n\n # switch datai to element format and put it in the output buffer\n data_out[:, 3:] = self.data[itime, :, :]\n assert data_out.size == ntotal\n op2_file.write(data_out)\n\n itable -= 1\n header = [4 * ntotal,]\n op2_file.write(struct_i.pack(*header))\n op2_ascii.write('footer = %s\\n' % header)\n new_result = False\n return itable\n\n def __eq__(self, table): # pragma: no cover\n assert self.is_sort1 == table.is_sort1\n self._eq_header(table)\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n ntimes = self.data.shape[0]\n\n i = 0\n if self.is_sort1:\n for itime in range(ntimes):\n for inid, (nid, eid) in enumerate(self.node_element):\n t1 = self.data[itime, inid, :]\n t2 = table.data[itime, inid, :]\n (nx1, ny1, txy1, majorp1, minorp1, tmax1, ovm1) = t1\n (nx2, ny2, txy2, majorp2, minorp2, tmax2, ovm2) = t2\n if not np.allclose(t1, t2):\n #if not np.array_equal(t1, t2):\n msg += '%s %s\\n (%s, %s, %s, %s, %s, %s, %s)\\n (%s, %s, %s, %s, %s, %s, %s)\\n' % (\n nid, eid,\n nx1, ny1, txy1, majorp1, minorp1, tmax1, ovm1,\n nx2, ny2, txy2, majorp2, minorp2, tmax2, ovm2)\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n else:\n raise NotImplementedError(self.is_sort2)\n if i > 0:\n print(msg)\n raise ValueError(msg)\n return True\n\n def _get_f06_message(self, ogs_id: int, cid: int, axis: str) -> List[str]:\n raise NotImplementedError()\n\nclass GridPointSurfaceStressesArray(GridPointSurfaceArray):\n\n def get_headers(self) -> List[str]:\n headers = ['nx', 'ny', 'txy', 'angle', 'majorP', 'minorP', 'tmax', 'ovm']\n return headers\n\n def _get_f06_message(self, ogs_id: int, cid: int, axis: str) -> List[str]:\n msg = [\n f' S T R E S S E S A T G R I D P O I N T S - - S U R F A C E {ogs_id:d}\\n',\n f'0 SURFACE X-AXIS X NORMAL(Z-AXIS) {axis} REFERENCE COORDINATE SYSTEM FOR SURFACE DEFINITION CID {cid}\\n',\n ' GRID ELEMENT STRESSES IN SURFACE SYSTEM PRINCIPAL STRESSES MAX \\n',\n ' ID ID FIBRE NORMAL-X NORMAL-Y SHEAR-XY ANGLE MAJOR MINOR SHEAR VON MISES\\n']\n #'0 13683 3736 TRIAX6 4.996584E+00 0.0 1.203093E+02 0.0 0.0 0.0'\n #' 13683 3737 TRIAX6 -4.996584E+00 0.0 -1.203093E+02 0.0 0.0 0.0'\n #' 13683 *TOTALS* 6.366463E-12 0.0 -1.364242E-12 0.0 0.0 0.0'\n return msg\n\n\n\nclass GridPointSurfaceStrainsArray(GridPointSurfaceArray):\n\n def get_headers(self) -> List[str]:\n headers = ['nx', 'ny', 'exy', 'angle', 'majorP', 'minorP', 'emax', 'evm']\n return headers\n\n def _get_f06_message(self, ogs_id: int, cid: int, axis: str) -> List[str]:\n msg = [\n f' S T R A I N S A T G R I D P O I N T S - - S U R F A C E {ogs_id:d}\\n',\n #f' S T R E S S E S A T G R I D P O I N T S - - S U R F A C E {ogs_id:d}\\n',\n f'0 SURFACE X-AXIS X NORMAL(Z-AXIS) {axis} REFERENCE COORDINATE SYSTEM FOR SURFACE DEFINITION CID {cid}\\n',\n #' GRID ELEMENT STRESSES IN SURFACE SYSTEM PRINCIPAL STRESSES MAX \\n',\n ' GRID ELEMENT STRAINS IN SURFACE SYSTEM PRINCIPAL STRAINS MAX \\n',\n ' ID ID FIBRE NORMAL-X NORMAL-Y SHEAR-XY ANGLE MAJOR MINOR SHEAR VON MISES\\n']\n #'0 13683 3736 TRIAX6 4.996584E+00 0.0 1.203093E+02 0.0 0.0 0.0'\n #' 13683 3737 TRIAX6 -4.996584E+00 0.0 -1.203093E+02 0.0 0.0 0.0'\n #' 13683 *TOTALS* 6.366463E-12 0.0 -1.364242E-12 0.0 0.0 0.0'\n return msg\n\n\n\n\nclass GridPointStressesVolumePrincipalArray(ScalarObject):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True)\n self.ntotal = 0\n self.ntimes = 0\n self.nelements = 0\n self.itotal = 0\n self.ielement = 0\n self.data = None\n self.itime = None\n self._times = None\n\n def get_headers(self) -> List[str]:\n headers = [\n 'lxa', 'lxb', 'lxc',\n 'lya', 'lyb', 'lyc',\n 'lza', 'lzb', 'lzc',\n 'sa', 'sb', 'sc',\n 'epr', 'ovm']\n return headers\n\n def __eq__(self, table): # pragma: no cover\n self._eq_header(table)\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n ntimes = self.data.shape[0]\n\n i = 0\n if self.is_sort1:\n for itime in range(ntimes):\n for inid, nid in enumerate(self.node):\n t1 = self.data[itime, inid, :]\n t2 = table.data[itime, inid, :]\n (lxa1, lxb1, lxc1, lya1, lyb1, lyc1, lza1, lzb1, lzc1, sa1, sb1, sc1, epr1, ovm1) = t1\n (lxa2, lxb2, lxc2, lya2, lyb2, lyc2, lza2, lzb2, lzc2, sa2, sb2, sc2, epr2, ovm2) = t2\n if not np.allclose(t1, t2):\n #if not np.array_equal(t1, t2):\n msg += '%s\\n (%s, %s, %s, %s, %s, %s, %s)\\n (%s, %s, %s, %s, %s, %s, %s)\\n' % (\n nid,\n lxa1, lxb1, lxc1, lya1, lyb1, lyc1, lza1,\n lxa2, lxb2, lxc2, lya2, lyb2, lyc2, lza2)\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n else:\n raise NotImplementedError(self.is_sort2)\n if i > 0:\n print(msg)\n raise ValueError(msg)\n return True\n\n def _reset_indices(self) -> None:\n self.itotal = 0\n self.ielement = 0\n\n @property\n def is_real(self) -> bool:\n return True\n @property\n def is_complex(self) -> bool:\n return False\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the GridPointStressesArray\"\"\"\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n #print('self.IDs', self.data)\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n self.nelements //= self.ntimes\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self.node = np.zeros(self.ntotal, dtype=idtype)\n #lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm\n self.data = np.zeros((self.ntimes, self.ntotal, 14), dtype=fdtype)\n self.location = np.empty(self.ntotal, dtype='U8')\n\n self._times = np.zeros(self.ntimes, dtype=dtype)\n\n def get_stats(self, short: bool=False) -> List[str]:\n if not self.is_built:\n return [\n '<%s>\\n' % self.__class__.__name__,\n f' ntimes: {self.ntimes:d}\\n',\n f' ntotal: {self.ntotal:d}\\n',\n ]\n\n ntimes, nelements, _ = self.data.shape\n assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)\n assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)\n\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i\\n'\n % (self.__class__.__name__, ntimes, nelements))\n ntimes_word = 'ntimes'\n else:\n msg.append(' type=%s nelements=%i\\n'\n % (self.__class__.__name__, nelements))\n ntimes_word = '1'\n headers = self.get_headers()\n n = len(headers)\n msg.append(' data: [%s, nelements, %i] where %i=[%s]\\n' % (ntimes_word, n, n, str(', '.join(headers))))\n msg.append(f' node.shape = {self.node.shape}\\n')\n msg.append(f' location.shape = {self.location.shape}\\n')\n msg.append(f' data.shape = {self.data.shape}\\n')\n msg += self.get_data_code()\n return msg\n\n def add_sort1(self, dt, nid, lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm):\n assert isinstance(nid, int) and nid > 0, 'dt=%s nid=%s' % (dt, nid)\n self._times[self.itime] = dt\n self.node[self.itotal] = nid\n self.data[self.itime, self.itotal, :] = [lxa, lxb, lxc, lya, lyb, lyc, lza, lzb, lzc, sa, sb, sc, epr, ovm]\n self.itotal += 1\n\n #def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n #page_num: int=1, is_mag_phase: bool=False, is_sort1: bool=True):\n #pass\n\n\nclass GridPointStressesVolumeDirectArray(ScalarObject):\n def __init__(self, data_code, is_sort1, isubcase, dt):\n ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True)\n self.ntotal = 0\n self.ntimes = 0\n self.nelements = 0\n self.itotal = 0\n self.ielement = 0\n self.data = None\n self.itime = None\n self._times = None\n\n def get_headers(self) -> List[str]:\n headers = ['ox', 'oy', 'oz', 'txy', 'tyz', 'txz', 'pressure', 'ovm']\n return headers\n\n def _reset_indices(self) -> None:\n self.itotal = 0\n self.ielement = 0\n\n @property\n def is_real(self) -> bool:\n return True\n @property\n def is_complex(self) -> bool:\n return False\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the GridPointStressesArray\"\"\"\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n #print('self.IDs', self.data)\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n self.nelements //= self.ntimes\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self.node = np.zeros(self.ntotal, dtype=idtype)\n #oxx, oyy, txy, angle, major, minor, ovm\n self.data = np.zeros((self.ntimes, self.ntotal, 8), dtype=fdtype)\n self.location = np.empty(self.ntotal, dtype='U8')\n self._times = np.zeros(self.ntimes, dtype=dtype)\n\n def get_stats(self, short: bool=False) -> List[str]:\n if not self.is_built:\n return [\n '<%s>\\n' % self.__class__.__name__,\n f' ntimes: {self.ntimes:d}\\n',\n f' ntotal: {self.ntotal:d}\\n',\n ]\n\n ntimes, nelements, _ = self.data.shape\n assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)\n assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)\n\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i\\n'\n % (self.__class__.__name__, ntimes, nelements))\n ntimes_word = 'ntimes'\n else:\n msg.append(' type=%s nelements=%i\\n'\n % (self.__class__.__name__, nelements))\n ntimes_word = '1'\n headers = self.get_headers()\n n = len(headers)\n msg.append(' data: [%s, nelements, %i] where %i=[%s]\\n' % (ntimes_word, n, n, str(', '.join(headers))))\n msg.append(f' node.shape = {self.node.shape}\\n')\n msg.append(f' location.shape = {self.location.shape}\\n')\n msg.append(f' data.shape = {self.data.shape}\\n')\n msg += self.get_data_code()\n return msg\n\n def add_sort1(self, dt, nid, nx, ny, nz, txy, tyz, txz, pressure, ovm):\n assert isinstance(nid, int) and nid > 0, 'dt=%s nid=%s' % (dt, nid)\n self._times[self.itime] = dt\n self.node[self.itotal] = nid\n self.data[self.itime, self.itotal, :] = [nx, ny, nz, txy, tyz, txz, pressure, ovm]\n self.itotal += 1\n\n def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',\n page_num: int=1, is_mag_phase: bool=False, is_sort1: bool=True):\n \"\"\"\n ' D I R E C T S T R E S S E S A T G R I D P O I N T S - - V O L U M E 101'\n ' OUTPUT COORDINATE SYSTEM = 0 BASIC '\n ' GRID NORMAL-X NORMAL-Y NORMAL-Z SHEAR-XY SHEAR-YZ SHEAR-ZX MEAN VON MISES'\n ' ID PRESSURE'\n ' 1 1.455E+03 -1.548E+02 -2.927E+02 -1.573E+01 3.326E+01 -3.438E+03 -3.357E+02 6.188E+03'\n ' 2 1.093E+03 -1.996E+02 -1.682E+02 1.542E+02 5.962E+01 -4.104E+03 -2.417E+02 7.227E+03'\n \"\"\"\n if header is None:\n header = []\n\n\n cid = self.refid\n #axis_int = self.oCoord\n #axis_map = {0 : 'X', 1 : 'Y', 2 : 'Z'}\n #axis = axis_map[axis_int]\n msg = [\n ' D I R E C T S T R E S S E S A T G R I D P O I N T S - - V O L U M E %3i\\n'\n ' OUTPUT COORDINATE SYSTEM = %7i ELEMENT \\n'\n ' GRID NORMAL-X NORMAL-Y NORMAL-Z SHEAR-XY SHEAR-YZ SHEAR-ZX MEAN VON MISES\\n'\n ' ID PRESSURE\\n' % (\n #' 8086 6.136E-02 2.131E-01 8.353E-02 -2.268E+00 -2.274E-13 1.525E-13 -1.193E-01 3.930E+00'\n self.ogs_id, cid)\n ]\n\n ntimes = self.data.shape[0]\n\n nids = self.node\n zero = ' '\n for itime in range(ntimes):\n dt = self._times[itime]\n header = _eigenvalue_header(self, header, itime, ntimes, dt)\n f06_file.write(''.join(header + msg))\n\n nx = self.data[itime, :, 0]\n ny = self.data[itime, :, 1]\n nz = self.data[itime, :, 2]\n txy = self.data[itime, :, 3]\n tyz = self.data[itime, :, 4]\n txz = self.data[itime, :, 5]\n pressure = self.data[itime, :, 6]\n ovm = self.data[itime, :, 7]\n for (nid, nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi) in zip(\n nids, nx, ny, nz, txy, tyz, txz, pressure, ovm):\n [nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi] = write_floats_10e([\n nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi])\n\n f06_file.write('%s%8s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-s\\n' % (\n zero, nid, nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi.rstrip()))\n f06_file.write(page_stamp % page_num)\n page_num += 1\n return page_num - 1\n\n def __eq__(self, table): # pragma: no cover\n assert self.is_sort1 == table.is_sort1\n self._eq_header(table)\n if not np.array_equal(self.data, table.data):\n msg = 'table_name=%r class_name=%s\\n' % (self.table_name, self.__class__.__name__)\n msg += '%s\\n' % str(self.code_information())\n ntimes = self.data.shape[0]\n\n i = 0\n if self.is_sort1:\n for itime in range(ntimes):\n for inid, nid in enumerate(self.node):\n t1 = self.data[itime, inid, :]\n t2 = table.data[itime, inid, :]\n (nx1, ny1, nz1, txy1, tyz1, txz1, pressure1, ovm1) = t1\n (nx2, ny2, nz2, txy2, tyz2, txz2, pressure2, ovm2) = t2\n if not np.allclose(t1, t2):\n #if not np.array_equal(t1, t2):\n msg += '%s\\n (%s, %s, %s, %s, %s, %s, %s, %s)\\n (%s, %s, %s, %s, %s, %s, %s, %s)\\n' % (\n nid,\n nx1, ny1, nz1, txy1, tyz1, txz1, pressure1, ovm1,\n nx2, ny2, nz2, txy2, tyz2, txz2, pressure2, ovm2)\n i += 1\n if i > 10:\n print(msg)\n raise ValueError(msg)\n else:\n raise NotImplementedError(self.is_sort2)\n if i > 0:\n print(msg)\n raise ValueError(msg)\n return True\n\n#msg = [\n #' P R I N C I P A L G R I D P O I N T S T R E S S D I S C O N T I N U I T I E S - - V O L U M E %s\\n'\n #' OUTPUT COORDINATE SYSTEM = %7i ELEMENT \\n'\n #' GRID PRINCIPAL STRESS DISCONTINUITY MEAN VON MISES ERROR\\n'\n #' ID A B C PRESSURE EST.\\n' % (\n #ivolume, cid)\n #' 8086 5.448E-09 9.886E-08 2.026E-15 2.484E-09 1.086E-07 5.716E-08'\n#]\n# not sure what result this is for\n#zero = ' '\n#f06_file.write('%s%8s %-10s %-10s %-10s %-10s %-10s %-10s %-10s %-s\\n' % (\n #zero, nid, nxi, nyi, nzi, txyi, tyzi, txzi, pressurei, ovmi.rstrip()))\n\nGridPointStressesVolumeDiscontinutiesArray = None # tCode=34\n\nclass GridPointStressesSurfaceDiscontinutiesArray(ScalarObject): # tCode=35\n def __init__(self, data_code, is_sort1, isubcase, dt):\n ScalarObject.__init__(self, data_code, isubcase, apply_data_code=True)\n self.ntotal = 0\n self.ntimes = 0\n self.nelements = 0\n self.itotal = 0\n self.ielement = 0\n self.data = None\n self.itime = None\n #self.node_element = None\n self._times = None\n\n def get_headers(self) -> List[str]:\n headers = ['oxx', 'oyy', 'ozz', 'txy', 'pressure']\n return headers\n\n def _reset_indices(self) -> None:\n self.itotal = 0\n self.ielement = 0\n\n @property\n def is_real(self) -> bool:\n return True\n @property\n def is_complex(self) -> bool:\n return False\n\n def build(self):\n \"\"\"sizes the vectorized attributes of the GridPointStressesArray\"\"\"\n #print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))\n #print('self.IDs', self.data)\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n\n assert self.ntimes > 0, 'ntimes=%s' % self.ntimes\n assert self.nelements > 0, 'nelements=%s' % self.nelements\n assert self.ntotal > 0, 'ntotal=%s' % self.ntotal\n #self.names = []\n self.nelements //= self.ntimes\n\n self.node = np.zeros(self.ntotal, dtype='int32')\n #oxx, oyy, ozz, txy, pressure\n self.data = np.zeros((self.ntimes, self.ntotal, 5), dtype='float32')\n self.location = np.empty(self.ntotal, dtype='U8')\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n\n self._times = np.zeros(self.ntimes, dtype=dtype)\n\n def get_stats(self, short: bool=False) -> List[str]:\n if not self.is_built:\n return [\n '<%s>\\n' % self.__class__.__name__,\n f' ntimes: {self.ntimes:d}\\n',\n f' ntotal: {self.ntotal:d}\\n',\n ]\n\n ntimes, nelements, _ = self.data.shape\n assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)\n assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)\n\n msg = []\n if self.nonlinear_factor not in (None, np.nan): # transient\n msg.append(' type=%s ntimes=%i nelements=%i\\n'\n % (self.__class__.__name__, ntimes, nelements))\n ntimes_word = 'ntimes'\n else:\n msg.append(' type=%s nelements=%i\\n'\n % (self.__class__.__name__, nelements))\n ntimes_word = '1'\n headers = self.get_headers()\n n = len(headers)\n msg.append(' data: [%s, nelements, %i] where %i=[%s]\\n' % (ntimes_word, n, n, str(', '.join(headers))))\n msg.append(f' node.shape = {self.node.shape}\\n')\n msg.append(f' location.shape = {self.location.shape}\\n')\n msg.append(f' data.shape = {self.data.shape}\\n')\n msg += self.get_data_code()\n return msg\n\n def add_sort1(self, dt, nid, oxx, oyy, ozz, txy, pressure):\n assert isinstance(nid, int) and nid > 0, 'dt=%s nid=%s' % (dt, nid)\n self._times[self.itime] = dt\n self.node[self.itotal] = nid\n self.data[self.itime, self.itotal, :] = [oxx, oyy, ozz, txy, pressure]\n self.itotal += 1\n\nclass GridPointStrainsVolumePrincipalArray(GridPointStressesVolumePrincipalArray):\n pass\n\nclass GridPointStrainsVolumeDirectArray(GridPointStressesVolumeDirectArray):\n pass\n\nGridPointStrainsVolumeDiscontinutiesArray = None\n\nclass GridPointStrainsSurfaceDiscontinutiesArray(GridPointStressesSurfaceDiscontinutiesArray):\n pass\n"
] |
[
[
"numpy.isnan",
"numpy.empty",
"numpy.array_equal",
"numpy.zeros",
"numpy.allclose",
"numpy.float32",
"numpy.int32"
]
] |
jmetteUni/CoTeDe-modified
|
[
"3f1142811f051eb023337b7f80a513ad53866d68",
"3f1142811f051eb023337b7f80a513ad53866d68"
] |
[
"cotede/qctests/gradient_depthconditional.py",
"cotede/qctests/morello2014.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\n\n\"\"\"\n\nimport logging\n\nimport numpy as np\nfrom numpy import ma\n\nfrom .qctests import QCCheckVar\nfrom .gradient import curvature\n\nmodule_logger = logging.getLogger(__name__)\n\n\nclass GradientDepthConditional(QCCheckVar):\n def set_features(self):\n self.features = {\"gradient\": curvature(self.data[self.varname])}\n\n def test(self):\n self.flags = {}\n\n flag = np.zeros(np.shape(self.data[self.varname]), dtype=\"i1\")\n feature = np.absolute(self.features[\"gradient\"])\n\n # ---- Shallow zone -----------------\n threshold = self.cfg[\"shallow_max\"]\n flag[\n np.nonzero(\n (np.atleast_1d(self[\"PRES\"]) <= self.cfg[\"pressure_threshold\"])\n & (feature > threshold)\n )\n ] = self.flag_bad\n flag[\n np.nonzero(\n (np.atleast_1d(self[\"PRES\"]) <= self.cfg[\"pressure_threshold\"])\n & (feature <= threshold)\n )\n ] = self.flag_good\n # ---- Deep zone --------------------\n threshold = self.cfg[\"deep_max\"]\n flag[\n np.nonzero(\n (np.atleast_1d(self[\"PRES\"]) > self.cfg[\"pressure_threshold\"])\n & (feature > threshold)\n )\n ] = self.flag_bad\n flag[\n np.nonzero(\n (np.atleast_1d(self[\"PRES\"]) > self.cfg[\"pressure_threshold\"])\n & (feature <= threshold)\n )\n ] = self.flag_good\n\n x = np.atleast_1d(self.data[self.varname])\n flag[ma.getmaskarray(x) | ~np.isfinite(x)] = 9\n self.flags[\"gradient_depthconditional\"] = flag\n",
"# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"A hybrid fuzzy logic method\n\nThis method applies a fuzzy logic classification with a modified deffuzification in the end, and it was proposed in the following sequence of papers:\n\n- Timms, G.P., de Souza, P.a., Reznik, L., Smith, D.V., 2011. Auto- mated data quality assessment of marine sensors. Sensors 11, 9589–9602. doi:10.3390/s111009589.\n- Morello, E., Lynch, T., Slawinski, D., Howell, B., Hughes, D., Timms, G., 2011. Quantitative quality control (qc) procedures for the australian national reference stations: Sensor data, in: OCEANS 2011, IEEE, Waikoloa, HI. pp. 1–7.\n- Morello, E.B., Galibert, G., Smith, D., Ridgway, K.R., Howell, B., Slawin- ski, D., Timms, G.P., Evans, K., Lynch, T.P., 2014. Quality Control (QC) procedures for Australias National Reference Stations sensor dataComparing semi-autonomous systems to an expert oceanographer. Methods Oceanogr. 9, 17–33. doi:10.1016/j.mio.2014.09.001.\n\"\"\"\n\nimport logging\n\nimport numpy as np\nfrom numpy import ma\n\nfrom ..fuzzy import fuzzyfy\nfrom .core import QCCheckVar\nfrom .gradient import gradient\nfrom .spike import spike\nfrom .woa_normbias import woa_normbias\n\nmodule_logger = logging.getLogger(__name__)\n\n\ndef morello2014(features, cfg=None):\n \"\"\"\n \"\"\"\n if (cfg is None) or (\"output\" not in cfg) or (\"features\" not in cfg):\n module_logger.debug(\"Using original Morello2014 coefficients\")\n cfg = {\n \"output\": {\"low\": None, \"high\": None},\n \"features\": {\n \"spike\": {\"weight\": 1, \"low\": {'type': 'zmf', 'params': [0.07, 0.2]}, \"high\": {'type': 'zmf', 'params': [2, 6]}},\n \"woa_normbias\": {\"weight\": 1, \"low\": {'type': 'zmf', 'params': [3, 4]}, \"high\": {'type': 'zmf', 'params': [5, 6]}},\n \"gradient\": {\"weight\": 1, \"low\": {'type': 'zmf', 'params': [0.5, 1.5]}, \"high\": {'type': 'zmf', 'params':[3, 4]}},\n },\n }\n\n if not np.all([f in features for f in cfg[\"features\"]]):\n module_logger.warning(\n \"Not all features (%s) required by morello2014 are available\".format(\n cfg[\"features\"].keys()\n )\n )\n raise KeyError\n\n f = fuzzyfy(data=features, features=cfg[\"features\"], output=cfg[\"output\"])\n\n for level in f:\n if isinstance(f[level], ma.MaskedArray):\n mask = f[level].mask\n f[level] = f[level].data\n f[level][mask] = np.nan\n\n return f\n\n\nclass Morello2014(QCCheckVar):\n def set_features(self):\n self.features = {}\n for v in [f for f in self.cfg[\"features\"] if f not in self.features]:\n if v == \"woa_bias\":\n woa_comparison = woa_normbias(self.data, self.varname, self.attrs)\n self.features[v] = woa_comparison[\"woa_bias\"]\n elif v == \"woa_normbias\":\n woa_comparison = woa_normbias(self.data, self.varname, self.attrs)\n self.features[v] = woa_comparison[\"woa_normbias\"]\n elif v == \"spike\":\n self.features[v] = spike(self.data[self.varname])\n elif v == \"gradient\":\n self.features[v] = gradient(self.data[self.varname])\n\n\n def test(self):\n self.flags = {}\n\n cfg = self.cfg\n flag = np.zeros(np.shape(self.data[self.varname]), dtype=\"i1\")\n\n try:\n f = morello2014(self.features, self.cfg)\n except:\n self.flags[\"morello2014\"] = flag\n return\n\n # This is how Timms and Morello defined the Fuzzy Logic approach\n flag[(f[\"low\"] > 0.5) & (f[\"high\"] < 0.3)] = 2\n flag[(f[\"low\"] > 0.9)] = 1\n # Everything else is flagged 3\n flag[(f[\"low\"] <= 0.5) | (f[\"high\"] >= 0.3)] = 3\n # Missing check if threshold was crossed, to flag as 4\n # The thresholds coincide with the end of the ramp for the fuzzy set\n # high, hence we can simply\n flag[(f[\"high\"] == 1.0)] = 4\n\n self.flags[\"morello2014\"] = flag\n"
] |
[
[
"numpy.ma.getmaskarray",
"numpy.shape",
"numpy.atleast_1d",
"numpy.isfinite",
"numpy.absolute"
],
[
"numpy.all",
"numpy.shape"
]
] |
sparkroy/DAML
|
[
"8a6d0869771bb35d2084d055123bbe790ecb8cf7"
] |
[
"lib/common/utils.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\nfrom collections import defaultdict\nimport itertools\nimport os\nimport numpy as np\nimport yaml\nfrom lib.functions.triplet_loss import triplet_loss as F_tloss\nfrom lib.common.evaluation import evaluate_cluster\n\nimport chainer\nimport torch\nimport torch.nn.functional as F\n\n\nimport copy\nimport os\n\nimport matplotlib.pyplot as plt\nimport six\nimport time\n\nfrom chainer import cuda\nfrom chainer import Variable\nfrom tqdm import tqdm\nimport random\nfrom sklearn.preprocessing import LabelEncoder\n\ndef load_params(filename):\n with open(filename) as f:\n params = yaml.load(f)\n return params\n\n\ndef make_positive_pairs(num_classes, num_examples_per_class, repetition=1):\n c = num_classes\n n = num_examples_per_class\n num_pairs_per_class = n * (n - 1) // 2\n\n pairs_posi_class0 = np.array(list(itertools.combinations(range(n), 2)))\n offsets = n * np.repeat(np.arange(c), num_pairs_per_class)[:, None]\n pairs_posi = np.tile(pairs_posi_class0, (c, 1)) + offsets\n return np.tile(pairs_posi, (repetition, 1))\n\n\ndef iter_combinatorial_pairs(queue, num_examples, batch_size, interval,\n num_classes, augment_positive=False):\n num_examples_per_class = num_examples // num_classes\n pairs = np.array(list(itertools.combinations(range(num_examples), 2)))\n\n if augment_positive:\n additional_positive_pairs = make_positive_pairs(\n num_classes, num_examples_per_class, num_classes - 1)\n pairs = np.concatenate((pairs, additional_positive_pairs))\n\n num_pairs = len(pairs)\n num_batches = num_pairs // batch_size\n perm = np.random.permutation(num_pairs)\n for i, batch_indexes in enumerate(np.array_split(perm, num_batches)):\n if i % interval == 0:\n x, c = queue.get()\n x = x.astype(np.float32) / 255.0\n c = c.ravel()\n indexes0, indexes1 = pairs[batch_indexes].T\n x0, x1, c0, c1 = x[indexes0], x[indexes1], c[indexes0], c[indexes1]\n t = np.int32(c0 == c1) # 1 if x0 and x1 are same class, 0 otherwise\n yield x0, x1, t\n\n\nclass NPairMCIndexMaker(object):\n def __init__(self, batch_size, num_classes, num_per_class):\n self.batch_size = batch_size # number of examples in a batch\n self.num_classes = num_classes # number of classes\n self.num_per_class = num_per_class # number of examples per class\n\n def get_epoch_indexes(self):\n B = self.batch_size\n K = self.num_classes\n M = self.num_per_class\n N = K * M # number of total examples\n num_batches = M * int(K // B) # number of batches per epoch\n\n indexes = np.arange(N, dtype=np.int32).reshape(K, M)\n epoch_indexes = []\n for m in range(M):\n perm = np.random.permutation(K)\n c_batches = np.array_split(perm, num_batches // M)\n for c_batch in c_batches:\n b = len(c_batch) # actual number of examples of this batch\n indexes_anchor = M * c_batch + m\n\n positive_candidates = np.delete(indexes[c_batch], m, axis=1)\n indexes_positive = positive_candidates[\n range(b), np.random.choice(M - 1, size=b)]\n\n epoch_indexes.append((indexes_anchor, indexes_positive))\n\n return epoch_indexes\n\n\nclass Logger(defaultdict):\n def __init__(self, root_dir_path, **kwargs):\n super(Logger, self).__init__(list, kwargs)\n if not os.path.exists(root_dir_path):\n os.makedirs(root_dir_path)\n self._root_dir_path = root_dir_path\n\n def __getattr__(self, key):\n return self[key]\n\n def __setattr__(self, key, value):\n self[key] = value\n\n def __str__(self):\n keys = filter(lambda key: not key.startswith('_'), self)\n return \", \".join([\"{}:{}\".format(key, self[key]) for key in keys])\n\n def save(self, dir_name):\n dir_path = os.path.join(self._root_dir_path, dir_name)\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n\n others = []\n for key, value in self.items():\n if key.startswith('_'):\n continue\n\n if isinstance(value, (np.ndarray, list)):\n np.save(os.path.join(dir_path, key + \".npy\"), value)\n elif isinstance(value, (chainer.Chain, chainer.ChainList)):\n model_path = os.path.join(dir_path, \"model.npz\")\n chainer.serializers.save_npz(model_path, value)\n elif isinstance(value, chainer.Optimizer):\n optimizer_path = os.path.join(dir_path, \"optimizer.npz\")\n chainer.serializers.save_npz(optimizer_path, value)\n else:\n others.append(\"{}: {}\".format(key, value))\n\n with open(os.path.join(dir_path, \"log.txt\"), \"a\") as f:\n text = \"\\n\".join(others) + \"\\n\"\n f.write(text)\n\n\nclass UniformDistribution(object):\n def __init__(self, low, high):\n assert low <= high\n self.low = low\n self.high = high\n\n def rvs(self, size=None, random_state=None):\n uniform = random_state.uniform if random_state else np.random.uniform\n return uniform(self.low, self.high, size)\n\n\nclass LogUniformDistribution(object):\n def __init__(self, low, high):\n assert low <= high\n self.low = low\n self.high = high\n\n def rvs(self, size=None, random_state=None):\n uniform = random_state.uniform if random_state else np.random.uniform\n return np.exp(uniform(np.log(self.low), np.log(self.high), size))\n\n\ndef iterate_forward(model, dis_model, epoch_iterator, normalize=False, epoch=20):\n xp = model.xp\n y_batches = []\n c_batches = []\n for batch in tqdm(copy.copy(epoch_iterator)):\n x_batch_data, c_batch_data = batch\n x_batch = Variable(xp.asarray(x_batch_data))\n y_batch = model(x_batch)\n if epoch >= 20:\n y_batch = dis_model(y_batch)\n if normalize:\n y_batch_data = y_batch.data / xp.linalg.norm(\n y_batch.data, axis=1, keepdims=True)\n else:\n y_batch_data = y_batch.data\n y_batches.append(y_batch_data)\n y_batch = None\n c_batches.append(c_batch_data)\n y_data = cuda.to_cpu(xp.concatenate(y_batches))\n c_data = np.concatenate(c_batches)\n return y_data, c_data\n\ndef compute_soft_hard_retrieval(distance_matrix, labels, label_batch=None):\n softs = []\n hards = []\n retrievals = []\n\n if label_batch is None:\n label_batch = labels\n distance_matrix = cuda.to_cpu(distance_matrix)\n labels = cuda.to_cpu(labels)\n label_batch = cuda.to_cpu(label_batch)\n\n K = 11 # \"K\" for top-K\n for d_i, label_i in zip(distance_matrix, label_batch):\n top_k_indexes = np.argpartition(d_i, K)[:K]\n sorted_top_k_indexes = top_k_indexes[np.argsort(d_i[top_k_indexes])]\n ranked_labels = labels[sorted_top_k_indexes]\n # 0th entry is excluded since it is always 0\n ranked_hits = ranked_labels[1:] == label_i\n\n # soft top-k, k = 1, 2, 5, 10\n soft = [np.any(ranked_hits[:k]) for k in [1, 2, 5, 10]]\n softs.append(soft)\n # hard top-k, k = 2, 3, 4\n hard = [np.all(ranked_hits[:k]) for k in [2, 3, 4]]\n hards.append(hard)\n # retrieval top-k, k = 2, 3, 4\n retrieval = [np.mean(ranked_hits[:k]) for k in [2, 3, 4]]\n retrievals.append(retrieval)\n\n average_soft = np.array(softs).mean(axis=0)\n average_hard = np.array(hards).mean(axis=0)\n average_retrieval = np.array(retrievals).mean(axis=0)\n return average_soft, average_hard, average_retrieval\n\ndef lossfun_one_batch(model, gen_model, dis_model, opt, fea_opt, opt_gen ,opt_dis, params, batch, epoch = 100):\n # the first half of a batch are the anchors and the latters\n # are the positive examples corresponding to each anchor\n lambda1 = 1.0\n lambda2 = 1.0\n if params.loss == \"angular\":\n x_data, c_data = batch\n x_data = model.xp.asarray(x_data)\n \n y = model(x_data)\n y_a, y_p = F.split_axis(y, 2, axis=0)\n return angular_mc_loss_m(y_a, y_p, params.tradeoff,params.alpha)\n elif params.loss == \"triplet\":\n x_data, c_data = batch\n x_data = model.xp.asarray(x_data)\n batch = model(x_data)\n batchsize = len(batch)\n a, p, n = F.split_axis(batch, 3, axis=0)\n t_loss = F_tloss(a, p, n, params.alpha)\n batch_concat = F.concat([a, p, n], axis = 1)\n\n fake = gen_model(batch_concat)\n batch_fake = F.concat([a, p, fake], axis=0)\n embedding_fake = dis_model(batch_fake)\n\n loss_hard = l2_hard(batch_fake,batch)\n loss_reg = l2_norm(batch_fake,batch)\n loss_adv = adv_loss(embedding_fake)\n loss_gen = loss_hard + lambda1*loss_reg + lambda2*loss_adv\n loss_m = triplet_loss(embedding_fake)\n\n \n if epoch < 20:\n t_loss.backward()\n fea_opt.update()\n else: \n loss_gen.backward()\n loss_m.backward()\n opt.update()\n opt_gen.update()\n opt_dis.update()\n model.cleargrads()\n gen_model.cleargrads()\n dis_model.cleargrads()\n\n chainer.reporter.report({'loss_gen': loss_gen})\n chainer.reporter.report({'loss_dis': loss_m})\n return loss_gen, loss_m\n\ndef evaluate(model, dis_model, epoch_iterator, distance='euclidean', normalize=False,\n batch_size=10, return_distance_matrix=False, epoch=20):\n if distance not in ('cosine', 'euclidean'):\n raise ValueError(\"distance must be 'euclidean' or 'cosine'.\")\n\n with chainer.no_backprop_mode():\n with chainer.using_config('train', False):\n y_data, c_data = iterate_forward(\n model, dis_model, epoch_iterator, normalize=normalize, epoch=epoch)\n\n add_epsilon = True\n xp = cuda.get_array_module(y_data)\n num_examples = len(y_data)\n print(y_data.shape,c_data.shape)\n nmi, f1 = evaluate_cluster(y_data,c_data,98)\n return nmi, f1\n\ndef triplet_loss(y,alpha=1.0):\n a, p, n= split_to_three(y)\n\n distance = torch.sum((a - p) ** 2.0, dim = 1) - torch.sum((a - n) ** 2.0, dim = 1) +alpha\n return torch.mean(F.relu(distance)) / 2\n\ndef adv_loss(y,alpha=1.0):\n a, p, n = split_to_three(y)\n distance = -torch.sum((a - p) ** 2.0, dim = 1) + torch.sum((a - n) ** 2.0, dim = 1) - alpha\n\n return torch.mean(F.relu(distance)) / 2\n\ndef l2_norm(fake,batch):\n _, _, fake_n = split_to_three(fake)\n _, _, n = split_to_three(batch)\n\n l2 = torch.sum((fake_n - n) ** 2.0, dim = 1) \n\n return torch.mean(l2)\n\ndef l2_hard(fake,batch):\n _, _, fake_n= split_to_three(fake)\n a, _, _ = split_to_three(batch)\n\n l2 = torch.sum((fake_n - a) ** 2.0, dim = 1) \n\n return torch.mean(l2)\n\ndef split_to_three(tensor):\n #split along dim=0 into three pieces\n d = y.shape[0]\n a, p, n= torch.split(tensor, d//3, dim=0)\n return a, p, n"
] |
[
[
"numpy.random.choice",
"numpy.tile",
"numpy.mean",
"torch.sum",
"numpy.concatenate",
"numpy.log",
"numpy.arange",
"torch.nn.functional.relu",
"numpy.int32",
"numpy.array",
"numpy.delete",
"torch.nn.functional.concat",
"numpy.argsort",
"numpy.array_split",
"torch.nn.functional.split_axis",
"numpy.random.permutation",
"torch.split",
"numpy.any",
"numpy.argpartition",
"numpy.all",
"torch.mean"
]
] |
yougoforward/tensorpackwithmscnn
|
[
"3c61c31892a1d954678fcef22f1243ab252e0015",
"3c61c31892a1d954678fcef22f1243ab252e0015",
"8d5ae5cc2cfcf2e4e53b4d1064ac9e727f736d09"
] |
[
"tensorpack/dataflow/image.py",
"tensorpack/callbacks/param.py",
"examples/SpatialTransformer/mnist-addition.py"
] |
[
"# -*- coding: UTF-8 -*-\n# File: image.py\n\n\nimport numpy as np\nimport copy as copy_mod\nfrom contextlib import contextmanager\nfrom .base import RNGDataFlow\nfrom .common import MapDataComponent, MapData\nfrom ..utils import logger\nfrom ..utils.argtools import shape2d\n\n__all__ = ['ImageFromFile', 'AugmentImageComponent', 'AugmentImageCoordinates', 'AugmentImageComponents']\n\n\ndef _valid_coords(coords):\n assert coords.ndim == 2, coords.ndim\n assert coords.shape[1] == 2, coords.shape\n assert np.issubdtype(coords.dtype, np.float), coords.dtype\n\n\nclass ExceptionHandler:\n def __init__(self, catch_exceptions=False):\n self._nr_error = 0\n self.catch_exceptions = catch_exceptions\n\n @contextmanager\n def catch(self):\n try:\n yield\n except Exception:\n self._nr_error += 1\n if not self.catch_exceptions:\n raise\n else:\n if self._nr_error % 100 == 0 or self._nr_error < 10:\n logger.exception(\"Got {} augmentation errors.\".format(self._nr_error))\n\n\nclass ImageFromFile(RNGDataFlow):\n \"\"\" Produce images read from a list of files. \"\"\"\n def __init__(self, files, channel=3, resize=None, shuffle=False):\n \"\"\"\n Args:\n files (list): list of file paths.\n channel (int): 1 or 3. Will convert grayscale to RGB images if channel==3.\n resize (tuple): int or (h, w) tuple. If given, resize the image.\n \"\"\"\n assert len(files), \"No image files given to ImageFromFile!\"\n self.files = files\n self.channel = int(channel)\n self.imread_mode = cv2.IMREAD_GRAYSCALE if self.channel == 1 else cv2.IMREAD_COLOR\n if resize is not None:\n resize = shape2d(resize)\n self.resize = resize\n self.shuffle = shuffle\n\n def size(self):\n return len(self.files)\n\n def get_data(self):\n if self.shuffle:\n self.rng.shuffle(self.files)\n for f in self.files:\n im = cv2.imread(f, self.imread_mode)\n if self.channel == 3:\n im = im[:, :, ::-1]\n if self.resize is not None:\n im = cv2.resize(im, tuple(self.resize[::-1]))\n if self.channel == 1:\n im = im[:, :, np.newaxis]\n yield [im]\n\n\nclass AugmentImageComponent(MapDataComponent):\n \"\"\"\n Apply image augmentors on 1 image component.\n \"\"\"\n\n def __init__(self, ds, augmentors, index=0, copy=True, catch_exceptions=False):\n \"\"\"\n Args:\n ds (DataFlow): input DataFlow.\n augmentors (AugmentorList): a list of :class:`imgaug.ImageAugmentor` to be applied in order.\n index (int): the index of the image component to be augmented in the datapoint.\n copy (bool): Some augmentors modify the input images. When copy is\n True, a copy will be made before any augmentors are applied,\n to keep the original images not modified.\n Turn it off to save time when you know it's OK.\n catch_exceptions (bool): when set to True, will catch\n all exceptions and only warn you when there are too many (>100).\n Can be used to ignore occasion errors in data.\n \"\"\"\n if isinstance(augmentors, AugmentorList):\n self.augs = augmentors\n else:\n self.augs = AugmentorList(augmentors)\n\n exception_handler = ExceptionHandler(catch_exceptions)\n\n def func(x):\n with exception_handler.catch():\n if copy:\n x = copy_mod.deepcopy(x)\n return self.augs.augment(x)\n\n super(AugmentImageComponent, self).__init__(\n ds, func, index)\n\n def reset_state(self):\n self.ds.reset_state()\n self.augs.reset_state()\n\n\nclass AugmentImageCoordinates(MapData):\n \"\"\"\n Apply image augmentors on an image and a list of coordinates.\n Coordinates must be a Nx2 floating point array, each row is (x, y).\n \"\"\"\n\n def __init__(self, ds, augmentors, img_index=0, coords_index=1, copy=True, catch_exceptions=False):\n\n \"\"\"\n Args:\n ds (DataFlow): input DataFlow.\n augmentors (AugmentorList): a list of :class:`imgaug.ImageAugmentor` to be applied in order.\n img_index (int): the index of the image component to be augmented.\n coords_index (int): the index of the coordinate component to be augmented.\n copy, catch_exceptions: same as in :class:`AugmentImageComponent`\n \"\"\"\n if isinstance(augmentors, AugmentorList):\n self.augs = augmentors\n else:\n self.augs = AugmentorList(augmentors)\n\n exception_handler = ExceptionHandler(catch_exceptions)\n\n def func(dp):\n with exception_handler.catch():\n img, coords = dp[img_index], dp[coords_index]\n _valid_coords(coords)\n if copy:\n img, coords = copy_mod.deepcopy((img, coords))\n img, prms = self.augs._augment_return_params(img)\n dp[img_index] = img\n coords = self.augs._augment_coords(coords, prms)\n dp[coords_index] = coords\n return dp\n\n super(AugmentImageCoordinates, self).__init__(ds, func)\n\n def reset_state(self):\n self.ds.reset_state()\n self.augs.reset_state()\n\n\nclass AugmentImageComponents(MapData):\n \"\"\"\n Apply image augmentors on several components, with shared augmentation parameters.\n\n Example:\n\n .. code-block:: python\n\n ds = MyDataFlow() # produce [image(HWC), segmask(HW), keypoint(Nx2)]\n ds = AugmentImageComponents(\n ds, augs,\n index=(0,1), coords_index=(2,))\n\n \"\"\"\n\n def __init__(self, ds, augmentors, index=(0, 1), coords_index=(), copy=True, catch_exceptions=False):\n \"\"\"\n Args:\n ds (DataFlow): input DataFlow.\n augmentors (AugmentorList): a list of :class:`imgaug.ImageAugmentor` instance to be applied in order.\n index: tuple of indices of the image components.\n coords_index: tuple of indices of the coordinates components.\n copy, catch_exceptions: same as in :class:`AugmentImageComponent`\n \"\"\"\n if isinstance(augmentors, AugmentorList):\n self.augs = augmentors\n else:\n self.augs = AugmentorList(augmentors)\n self.ds = ds\n\n exception_handler = ExceptionHandler(catch_exceptions)\n\n def func(dp):\n dp = copy_mod.copy(dp) # always do a shallow copy, make sure the list is intact\n copy_func = copy_mod.deepcopy if copy else lambda x: x # noqa\n with exception_handler.catch():\n major_image = index[0] # image to be used to get params. TODO better design?\n im = copy_func(dp[major_image])\n im, prms = self.augs._augment_return_params(im)\n dp[major_image] = im\n for idx in index[1:]:\n dp[idx] = self.augs._augment(copy_func(dp[idx]), prms)\n for idx in coords_index:\n coords = copy_func(dp[idx])\n _valid_coords(coords)\n dp[idx] = self.augs._augment_coords(coords, prms)\n return dp\n\n super(AugmentImageComponents, self).__init__(ds, func)\n\n def reset_state(self):\n self.ds.reset_state()\n self.augs.reset_state()\n\n\ntry:\n import cv2\n from .imgaug import AugmentorList\nexcept ImportError:\n from ..utils.develop import create_dummy_class\n ImageFromFile = create_dummy_class('ImageFromFile', 'cv2') # noqa\n AugmentImageComponent = create_dummy_class('AugmentImageComponent', 'cv2') # noqa\n AugmentImageCoordinates = create_dummy_class('AugmentImageCoordinates', 'cv2') # noqa\n AugmentImageComponents = create_dummy_class('AugmentImageComponents', 'cv2') # noqa\n",
"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# File: param.py\n\n\nimport tensorflow as tf\nfrom abc import abstractmethod, ABCMeta\nimport operator\nimport six\nimport os\n\nfrom .base import Callback\nfrom ..utils import logger\nfrom ..tfutils.common import get_op_tensor_name\n\n__all__ = ['HyperParam', 'GraphVarParam', 'ObjAttrParam',\n 'HyperParamSetter', 'HumanHyperParamSetter',\n 'ScheduledHyperParamSetter',\n 'StatMonitorParamSetter', 'HyperParamSetterWithFunc',\n ]\n\n\n@six.add_metaclass(ABCMeta)\nclass HyperParam(object):\n \"\"\" Base class for a hyperparam. \"\"\"\n\n def setup_graph(self):\n \"\"\" setup the graph in ``setup_graph`` callback stage, if necessary\"\"\"\n pass\n\n @abstractmethod\n def set_value(self, v):\n \"\"\"\n Set the value of the param.\n\n Args:\n v: the value to be set\n \"\"\"\n pass\n\n @abstractmethod\n def get_value(self):\n \"\"\"\n Get the value of the param.\n \"\"\"\n pass\n\n @property\n def readable_name(self):\n \"\"\" A name to display \"\"\"\n return self._readable_name\n\n\nclass GraphVarParam(HyperParam):\n \"\"\" A variable in the graph (e.g. learning_rate) can be a hyperparam.\"\"\"\n\n def __init__(self, name, shape=[]):\n \"\"\"\n Args:\n name(str): name of the variable.\n shape(list): shape of the variable.\n \"\"\"\n self.name = name\n self.shape = shape\n self._readable_name, self.var_name = get_op_tensor_name(name)\n\n def setup_graph(self):\n \"\"\" Will setup the assign operator for that variable. \"\"\"\n all_vars = tf.global_variables() + tf.local_variables()\n for v in all_vars:\n if v.name == self.var_name:\n self.var = v\n break\n else:\n raise ValueError(\"{} is not a variable in the graph!\".format(self.var_name))\n\n def set_value(self, v):\n \"\"\" Assign the variable a new value. \"\"\"\n self.var.load(v)\n\n def get_value(self):\n \"\"\" Evaluate the variable. \"\"\"\n return self.var.eval()\n\n\nclass ObjAttrParam(HyperParam):\n \"\"\" An attribute of an object can be a hyperparam. \"\"\"\n\n def __init__(self, obj, attrname, readable_name=None):\n \"\"\"\n Args:\n obj: the object\n attrname (str): the attribute\n readable_name(str): The name to display and set with. Defaults to be ``attrname``.\n \"\"\"\n self.obj = obj\n self.attrname = attrname\n if readable_name is None:\n self._readable_name = attrname\n else:\n self._readable_name = readable_name\n\n def set_value(self, v):\n setattr(self.obj, self.attrname, v)\n\n def get_value(self, v):\n return getattr(self.obj, self.attrname)\n\n\nclass HyperParamSetter(Callback):\n \"\"\"\n An abstract base callback to set hyperparameters.\n \"\"\"\n\n def __init__(self, param):\n \"\"\"\n Args:\n param(HyperParam or str): if is a :class:`str`, it is assumed to\n be a :class:`GraphVarParam`.\n \"\"\"\n # if a string, assumed to be a scalar graph variable\n if isinstance(param, six.string_types):\n param = GraphVarParam(param)\n assert isinstance(param, HyperParam), type(param)\n self.param = param\n self.last_value = None\n\n def _setup_graph(self):\n self.param.setup_graph()\n\n def get_value_to_set(self):\n \"\"\"\n Returns:\n The value to assign to the variable.\n\n Note:\n Subclasses will implement the abstract method\n :meth:`_get_value_to_set`, which should return a new value to\n set, or return None to do nothing.\n \"\"\"\n ret = self._get_value_to_set()\n if ret is not None and ret != self.last_value:\n logger.info(\"After epoch {}, {} will change to {:.8f}\".format(\n self.epoch_num, self.param.readable_name, ret))\n self.last_value = ret\n return ret\n\n @abstractmethod\n def _get_value_to_set(self):\n pass\n\n def get_current_value(self):\n \"\"\"\n Returns:\n The current value of the param.\n \"\"\"\n return self.param.get_value()\n\n def _trigger(self):\n self._set_param()\n\n def _before_train(self):\n self._set_param()\n\n def _set_param(self):\n v = self.get_value_to_set()\n if v is not None:\n self.param.set_value(v)\n\n\nclass HumanHyperParamSetter(HyperParamSetter):\n \"\"\"\n Set hyperparameter by loading the value from a file each time it get called.\n This is useful for manually tuning some parameters (e.g. learning_rate)\n without interrupting the training.\n \"\"\"\n\n def __init__(self, param, file_name='hyper.txt'):\n \"\"\"\n Args:\n param: same as in :class:`HyperParamSetter`.\n file_name(str): a file containing the new value of the parameter.\n Each line in the file is a ``k:v`` pair, for example, ``learning_rate:1e-4``.\n If the pair is not found, the param will not be changed.\n \"\"\"\n super(HumanHyperParamSetter, self).__init__(param)\n self.file_name = os.path.join(logger.get_logger_dir(), file_name)\n logger.info(\"Use {} to set hyperparam: '{}'.\".format(\n self.file_name, self.param.readable_name))\n\n def _get_value_to_set(self):\n # ignore if no such file exists\n if not os.path.isfile(self.file_name):\n return None\n try:\n with open(self.file_name) as f:\n lines = f.readlines()\n lines = [s.strip().split(':') for s in lines]\n dic = {str(k): float(v) for k, v in lines}\n ret = dic[self.param.readable_name]\n return ret\n except Exception:\n logger.warn(\n \"Cannot find {} in {}\".format(\n self.param.readable_name, self.file_name))\n return None\n\n\nclass ScheduledHyperParamSetter(HyperParamSetter):\n \"\"\"\n Set hyperparameters by a predefined epoch-based schedule.\n \"\"\"\n\n def __init__(self, param, schedule, interp=None):\n \"\"\"\n Args:\n param: same as in :class:`HyperParamSetter`.\n schedule (list): with the format ``[(epoch1, val1), (epoch2, val2), (epoch3, val3)]``.\n Each ``(ep, val)`` pair means to set the param\n to \"val\" **after** the completion of epoch `ep`.\n If ep == 0, the value will be set before the first epoch\n (because by default the first is epoch 1).\n interp: None: no interpolation. 'linear': linear interpolation\n\n Example:\n .. code-block:: python\n\n ScheduledHyperParamSetter('learning_rate',\n [(30, 1e-2), (60, 1e-3), (85, 1e-4), (95, 1e-5)]),\n \"\"\"\n schedule = [(int(a), float(b)) for a, b in schedule]\n self.schedule = sorted(schedule, key=operator.itemgetter(0))\n if interp is not None:\n assert interp == 'linear'\n self.interp = interp\n super(ScheduledHyperParamSetter, self).__init__(param)\n\n def _get_value_to_set(self):\n if self.interp is None:\n for e, v in self.schedule:\n if e == self.epoch_num:\n return v\n return None\n else:\n laste, lastv = None, None\n for e, v in self.schedule:\n if e == self.epoch_num:\n return v\n if e > self.epoch_num:\n break\n laste, lastv = e, v\n if laste is None or laste == e:\n # hasn't reached the first scheduled point, or reached the end of all scheduled points\n return None\n v = (self.epoch_num - laste) * 1. / (e - laste) * (v - lastv) + lastv\n return v\n\n\nclass HyperParamSetterWithFunc(HyperParamSetter):\n \"\"\" Set the parameter by a function of epoch num and old value. \"\"\"\n def __init__(self, param, func):\n \"\"\"\n Args:\n param: same as in :class:`HyperParamSetter`.\n func: ``param`` will be set by ``new_value = func(epoch_num, old_value)``.\n ``epoch_num`` is the number of epochs that have finished.\n\n Example:\n Decrease by a factor of 0.9 every two epochs:\n\n .. code-block:: python\n\n HyperParamSetterWithFunc('learning_rate',\n lambda e, x: x * 0.9 if e % 2 == 0 else x)\n \"\"\"\n super(HyperParamSetterWithFunc, self).__init__(param)\n self.f = func\n\n def _get_value_to_set(self):\n return self.f(self.epoch_num, self.get_current_value())\n\n\nclass StatMonitorParamSetter(HyperParamSetter):\n \"\"\"\n Change the param by monitoring the change of a statistic.\n Change when it wasn't decreasing/increasing enough.\n \"\"\"\n def __init__(self, param, stat_name, value_func, threshold,\n last_k, reverse=False):\n \"\"\"\n Args:\n param: same as in :class:`HyperParamSetter`.\n stat_name (str): name of the statistics.\n value_func (float -> float): a function which returns a new value\n taking the old value.\n threshold (float): change threshold.\n last_k (int): last k epochs.\n reverse (bool): monitor increasing instead of decreasing.\n\n This callback will change ``param`` by ``new_value = value_func(old_value)``, when:\n ``min(stats) >= stats[0] - threshold``, where\n ``stats = [the values of stat_name in last k epochs]``\n\n If ``reverse`` is True, it will change the ``param`` when:\n ``max(stats) <= stats[0] + threshold``.\n\n Example:\n If validation error wasn't decreasing for 5 epochs, anneal the learning rate by 0.2:\n\n .. code-block:: python\n\n StatMonitorParamSetter('learning_rate', 'val-error', lambda x: x * 0.2, 0, 5)\n \"\"\"\n super(StatMonitorParamSetter, self).__init__(param)\n self.stat_name = stat_name\n self.value_func = value_func\n self.last_k = last_k\n self.threshold = threshold\n self.reverse = reverse\n\n self.last_changed_epoch = 0\n\n def _get_value_to_set(self):\n hist = self.trainer.monitors.get_history(self.stat_name)\n if len(hist) < self.last_k + 1 or \\\n self.epoch_num - self.last_changed_epoch < self.last_k:\n return None\n hist = hist[-self.last_k - 1:] # len==last_k+1\n\n hist_first = hist[0]\n if not self.reverse:\n hist_min = min(hist)\n if hist_min < hist_first - self.threshold: # small enough\n return None\n else:\n hist_max = max(hist)\n if hist_max > hist_first + self.threshold: # large enough\n return None\n self.last_changed_epoch = self.epoch_num\n logger.info(\n \"[StatMonitorParamSetter] Triggered, history of {}: \".format(\n self.stat_name) + ','.join(map(str, hist)))\n return self.value_func(self.get_current_value())\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: mnist-addition.py\n# Author: Yuxin Wu <ppwwyyxxc@gmail.com>\n\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nimport os\nimport argparse\n\n\nfrom tensorpack import *\nfrom tensorpack.dataflow import dataset\nfrom tensorpack.tfutils import sesscreate, optimizer, summary\n\nIMAGE_SIZE = 42\nWARP_TARGET_SIZE = 28\nHALF_DIFF = (IMAGE_SIZE - WARP_TARGET_SIZE) // 2\n\n\nclass Model(ModelDesc):\n def _get_inputs(self):\n return [InputDesc(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE, 2), 'input'),\n InputDesc(tf.int32, (None,), 'label')]\n\n def _build_graph(self, inputs):\n xys = np.array([(y, x, 1) for y in range(WARP_TARGET_SIZE)\n for x in range(WARP_TARGET_SIZE)], dtype='float32')\n xys = tf.constant(xys, dtype=tf.float32, name='xys') # p x 3\n\n image, label = inputs\n\n image = image / 255.0 - 0.5 # bhw2\n\n def get_stn(image):\n stn = (LinearWrap(image)\n .AvgPooling('downsample', 2)\n .Conv2D('conv0', 20, 5, padding='VALID')\n .MaxPooling('pool0', 2)\n .Conv2D('conv1', 20, 5, padding='VALID')\n .FullyConnected('fc1', out_dim=32)\n .FullyConnected('fct', out_dim=6, nl=tf.identity,\n W_init=tf.constant_initializer(),\n b_init=tf.constant_initializer([1, 0, HALF_DIFF, 0, 1, HALF_DIFF]))())\n # output 6 parameters for affine transformation\n stn = tf.reshape(stn, [-1, 2, 3], name='affine') # bx2x3\n stn = tf.reshape(tf.transpose(stn, [2, 0, 1]), [3, -1]) # 3 x (bx2)\n coor = tf.reshape(tf.matmul(xys, stn),\n [WARP_TARGET_SIZE, WARP_TARGET_SIZE, -1, 2])\n coor = tf.transpose(coor, [2, 0, 1, 3], 'sampled_coords') # b h w 2\n sampled = ImageSample('warp', [image, coor], borderMode='constant')\n return sampled\n\n with argscope([Conv2D, FullyConnected], nl=tf.nn.relu):\n with tf.variable_scope('STN1'):\n sampled1 = get_stn(image)\n with tf.variable_scope('STN2'):\n sampled2 = get_stn(image)\n\n # For visualization in tensorboard\n with tf.name_scope('visualization'):\n padded1 = tf.pad(sampled1, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]])\n padded2 = tf.pad(sampled2, [[0, 0], [HALF_DIFF, HALF_DIFF], [HALF_DIFF, HALF_DIFF], [0, 0]])\n img_orig = tf.concat([image[:, :, :, 0], image[:, :, :, 1]], 1) # b x 2h x w\n transform1 = tf.concat([padded1[:, :, :, 0], padded1[:, :, :, 1]], 1)\n transform2 = tf.concat([padded2[:, :, :, 0], padded2[:, :, :, 1]], 1)\n stacked = tf.concat([img_orig, transform1, transform2], 2, 'viz')\n tf.summary.image('visualize',\n tf.expand_dims(stacked, -1), max_outputs=30)\n\n sampled = tf.concat([sampled1, sampled2], 3, 'sampled_concat')\n logits = (LinearWrap(sampled)\n .FullyConnected('fc1', out_dim=256, nl=tf.nn.relu)\n .FullyConnected('fc2', out_dim=128, nl=tf.nn.relu)\n .FullyConnected('fct', out_dim=19, nl=tf.identity)())\n tf.nn.softmax(logits, name='prob')\n\n cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)\n cost = tf.reduce_mean(cost, name='cross_entropy_loss')\n\n wrong = tf.to_float(tf.logical_not(tf.nn.in_top_k(logits, label, 1)), name='incorrect_vector')\n summary.add_moving_summary(tf.reduce_mean(wrong, name='train_error'))\n\n wd_cost = tf.multiply(1e-5, regularize_cost('fc.*/W', tf.nn.l2_loss),\n name='regularize_loss')\n summary.add_moving_summary(cost, wd_cost)\n self.cost = tf.add_n([wd_cost, cost], name='cost')\n\n def _get_optimizer(self):\n lr = tf.get_variable('learning_rate', initializer=5e-4, trainable=False)\n opt = tf.train.AdamOptimizer(lr, epsilon=1e-3)\n return optimizer.apply_grad_processors(\n opt, [\n gradproc.ScaleGradient(('STN.*', 0.1)),\n gradproc.SummaryGradient()])\n\n\ndef get_data(isTrain):\n ds = dataset.Mnist('train' if isTrain else 'test')\n # create augmentation for both training and testing\n augs = [\n imgaug.MapImage(lambda x: x * 255.0),\n imgaug.RandomResize((0.7, 1.2), (0.7, 1.2)),\n imgaug.RotationAndCropValid(45),\n imgaug.RandomPaste((IMAGE_SIZE, IMAGE_SIZE)),\n imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01)\n ]\n ds = AugmentImageComponent(ds, augs)\n\n ds = JoinData([ds, ds])\n # stack the two digits into two channels, and label it with the sum\n ds = MapData(ds, lambda dp: [np.stack([dp[0], dp[2]], axis=2), dp[1] + dp[3]])\n ds = BatchData(ds, 128)\n return ds\n\n\ndef view_warp(modelpath):\n pred = OfflinePredictor(PredictConfig(\n session_init=get_model_loader(modelpath),\n model=Model(),\n input_names=['input'],\n output_names=['visualization/viz', 'STN1/affine', 'STN2/affine']))\n\n xys = np.array([[0, 0, 1],\n [WARP_TARGET_SIZE, 0, 1],\n [WARP_TARGET_SIZE, WARP_TARGET_SIZE, 1],\n [0, WARP_TARGET_SIZE, 1]], dtype='float32')\n\n def draw_rect(img, affine, c, offset=[0, 0]):\n a = np.transpose(affine) # 3x2\n a = (np.matmul(xys, a) + offset).astype('int32')\n cv2.line(img, tuple(a[0][::-1]), tuple(a[1][::-1]), c)\n cv2.line(img, tuple(a[1][::-1]), tuple(a[2][::-1]), c)\n cv2.line(img, tuple(a[2][::-1]), tuple(a[3][::-1]), c)\n cv2.line(img, tuple(a[3][::-1]), tuple(a[0][::-1]), c)\n\n ds = get_data(False)\n ds.reset_state()\n for k in ds.get_data():\n img, label = k\n outputs, affine1, affine2 = pred(img)\n for idx, viz in enumerate(outputs):\n viz = cv2.cvtColor(viz, cv2.COLOR_GRAY2BGR)\n # Here we assume the second branch focuses on the first digit\n draw_rect(viz, affine2[idx], (0, 0, 255))\n draw_rect(viz, affine1[idx], (0, 0, 255), offset=[IMAGE_SIZE, 0])\n cv2.imwrite('{:03d}.png'.format(idx), (viz + 0.5) * 255)\n break\n\n\ndef get_config():\n logger.auto_set_dir()\n\n dataset_train, dataset_test = get_data(True), get_data(False)\n steps_per_epoch = dataset_train.size() * 5\n\n return TrainConfig(\n model=Model(),\n data=QueueInput(dataset_train),\n callbacks=[\n ModelSaver(),\n InferenceRunner(dataset_test,\n [ScalarStats('cost'), ClassificationError()]),\n ScheduledHyperParamSetter('learning_rate', [(200, 1e-4)])\n ],\n session_creator=sesscreate.NewSessionCreator(\n config=get_default_sess_config(0.5)),\n steps_per_epoch=steps_per_epoch,\n max_epoch=500,\n )\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')\n parser.add_argument('--load', help='load model')\n parser.add_argument('--view', action='store_true')\n args = parser.parse_args()\n if args.gpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\n if args.view:\n view_warp(args.load)\n else:\n config = get_config()\n if args.load:\n config.session_init = SaverRestore(args.load)\n launch_train_with_config(config, SimpleTrainer())\n"
] |
[
[
"numpy.issubdtype"
],
[
"tensorflow.local_variables",
"tensorflow.global_variables"
],
[
"tensorflow.nn.in_top_k",
"tensorflow.constant_initializer",
"tensorflow.matmul",
"tensorflow.reshape",
"tensorflow.nn.softmax",
"tensorflow.concat",
"tensorflow.transpose",
"tensorflow.add_n",
"tensorflow.constant",
"tensorflow.variable_scope",
"numpy.transpose",
"tensorflow.pad",
"numpy.array",
"tensorflow.train.AdamOptimizer",
"numpy.matmul",
"tensorflow.expand_dims",
"numpy.stack",
"tensorflow.get_variable",
"tensorflow.name_scope",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.reduce_mean"
]
] |
johannespitz/MNF_VBNN
|
[
"e274a57cac03282e8ff3181c831e650b024b14a2"
] |
[
"mutual_information.py"
] |
[
"import sys\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef _compute_mi(model, samples, num_runs):\n predictions_list = []\n for idx in tqdm(range(num_runs), file=sys.stdout):\n predictions_list.append(model.predict(samples, batch_size=10000))\n\n probs = np.asarray(predictions_list)\n mean = probs.mean(axis=0)\n\n def _entropy(ps):\n return -1. * (ps * np.log(ps + 1e-8)).sum(axis=-1)\n\n mean_H = _entropy(mean)\n indi_H = _entropy(probs)\n\n return mean_H - indi_H.mean(axis=0), mean_H\n\ndef plot_mi(tests_dict, model, num_runs=20):\n fig, axs = plt.subplots(2, 1, figsize=(20, 10))\n\n for name, samples in tests_dict.items():\n mi, ent = _compute_mi(model, samples, num_runs)\n print(f'MI: {name:15s} min: {mi.min():.4f}, max: {mi.max():.4f}, mean: {mi.mean():.4f}')\n print(f'PE: {name:15s} min: {ent.min():.4f}, max: {ent.max():.4f}, mean: {ent.mean():.4f}')\n # with self.summary_writer.as_default():\n # tf.summary.histogram('mi/' + name, mi, epoch)\n # tf.summary.histogram('ent/' + name, ent, epoch)\n sns.distplot(mi, label=name, ax=axs[0], kde=True, kde_kws=dict(gridsize=1000))\n sns.distplot(ent, label=name, ax=axs[1], kde=True, kde_kws=dict(gridsize=1000))\n\n plt.legend()\n fig.tight_layout()\n return plt"
] |
[
[
"numpy.log",
"matplotlib.pyplot.legend",
"numpy.asarray",
"matplotlib.pyplot.subplots"
]
] |
likith012/distill-grammar
|
[
"04ff5e07337789edfe57f21f85e30e7992ae90d9"
] |
[
"deploy/convert_onnx.py"
] |
[
"\"\"\"Convert the model to ONN format\n\"\"\"\n\n__author__ = \"Likith Reddy\"\n__version__ = \"1.0.0\"\n__email__ = \"likith012@gmail.com\"\n\n\nimport torch\nimport torch.nn as nn\nimport torch.onnx\nimport sys, os\n\nsys.path.insert(0, os.path.join(sys.path[0], '../'))\n\nfrom configs import config\nfrom src.dataset import BERTDataset\n\n\nif __name__ == '__main__':\n\n sentence = ['I love BERT']\n\n dataset = BERTDataset(sentence = sentence, target = [1], config = config)\n\n model = config.MODEL\n\n num_device = torch.cuda.device_count()\n device_ids = list(range(num_device))\n if len(device_ids) > 1:\n model = nn.DataParallel(model, device_ids=device_ids)\n\n model = model.module if hasattr(model, 'module') else model\n model = config.MODEL.from_pretrained(config.MODEL_PATH, local_files_only = True)\n model.eval()\n\n ids = dataset[0]['ids'].unsqueeze(0)\n attention_mask = dataset[0]['mask'].unsqueeze(0)\n token_type_ids = None\n\n device = 'cpu'\n\n ids = ids.to(device, dtype = torch.long)\n attention_mask = attention_mask.to(device, dtype = torch.long)\n\n torch.onnx.export(\n model,\n (ids, token_type_ids, attention_mask),\n \"onnx_model.onnx\",\n input_names = ['ids', 'token_type_ids' 'attention_mask'],\n output_names = ['output'],\n dynamic_axes = {\n 'ids': {0: 'batch_size'},\n 'token_type_ids': {0, 'batch_size'},\n 'attention_mask': {0: 'batch_size'},\n 'output': {0: 'batch_size'},\n },\n verbose = True,\n opset_version=12, \n enable_onnx_checker=True\n )"
] |
[
[
"torch.nn.DataParallel",
"torch.onnx.export",
"torch.cuda.device_count"
]
] |
dsshim0125/grmc
|
[
"a4d02b66055b33df07a8136cd6b11020b27ac4c5"
] |
[
"evaluate_pytorch.py"
] |
[
"import os\r\nimport glob\r\nimport time\r\nimport argparse\r\nimport torch\r\nfrom torch import nn\r\nfrom model.densedepth import Model\r\nfrom model.fcrn import ResNet\r\nfrom utils import evaluate\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\n# Argument Parser\r\nparser = argparse.ArgumentParser(description='High Quality Monocular Depth Estimation via Transfer Learning')\r\nparser.add_argument('--model_type', default='densedepth', type=str, help='Depth estimation network for evaluation')\r\nparser.add_argument('--layers', default=161, type=int, help='number of layers of encoder')\r\n\r\nargs = parser.parse_args()\r\n\r\n# Custom object needed for inference and training\r\n\r\n# Load test data\r\n\r\nprint('Loading test data...', end='')\r\nimport numpy as np\r\nfrom zipfile import ZipFile\r\ndef extract_zip(input_zip):\r\n input_zip=ZipFile(input_zip)\r\n return {name: input_zip.read(name) for name in input_zip.namelist()}\r\n\r\ndata = extract_zip('/media/dsshim/nyu_v2/nyu_test.zip')\r\nfrom io import BytesIO\r\nrgb = np.load(BytesIO(data['eigen_test_rgb.npy']))\r\ndepth = np.load(BytesIO(data['eigen_test_depth.npy']))\r\ncrop = np.load(BytesIO(data['eigen_test_crop.npy']))\r\nprint('Test data loaded.\\n')\r\n\r\n\r\n\r\n\r\nif args.model_type == 'densedepth':\r\n model = Model()\r\n\r\nelse:\r\n model = ResNet(layers=args.layers)\r\n\r\n\r\nmodel.load_state_dict(torch.load('checkpoints/%s_%d.pth'%(args.model_type, args.layers)))\r\nmodel = model.cuda()\r\n\r\nmodel.eval()\r\n\r\n\r\n\r\n\r\nstart = time.time()\r\nprint('Testing...')\r\n\r\ne = evaluate(model, rgb, depth, crop, batch_size=1)\r\n\r\n\r\nend = time.time()\r\nprint('\\nTest time', end-start, 's')\r\n"
] |
[
[
"torch.load"
]
] |
evhub/compiled-cocotest
|
[
"7bfb1c92ec1f1cea48f12280faf95d3c7ed8b8e0"
] |
[
"dest/extras.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# __coconut_hash__ = 0x58bfe0d3\n\n# Compiled with Coconut version 1.4.1 [Ernest Scribbler]\n\n# Coconut Header: -------------------------------------------------------------\n\nfrom __future__ import print_function, absolute_import, unicode_literals, division\nimport sys as _coconut_sys\nif _coconut_sys.version_info < (3,):\n from __builtin__ import chr, filter, hex, input, int, map, object, oct, open, print, range, str, zip, filter, reversed, enumerate, raw_input, xrange\n py_chr, py_hex, py_input, py_int, py_map, py_object, py_oct, py_open, py_print, py_range, py_str, py_zip, py_filter, py_reversed, py_enumerate, py_raw_input, py_xrange, py_repr = chr, hex, input, int, map, object, oct, open, print, range, str, zip, filter, reversed, enumerate, raw_input, xrange, repr\n _coconut_NotImplemented, _coconut_raw_input, _coconut_xrange, _coconut_int, _coconut_long, _coconut_print, _coconut_str, _coconut_unicode, _coconut_repr = NotImplemented, raw_input, xrange, int, long, print, str, unicode, repr\n from future_builtins import *\n chr, str = unichr, unicode\n from io import open\n class object(object):\n __slots__ = ()\n def __ne__(self, other):\n eq = self == other\n if eq is _coconut_NotImplemented:\n return eq\n return not eq\n class int(_coconut_int):\n __slots__ = ()\n if hasattr(_coconut_int, \"__doc__\"):\n __doc__ = _coconut_int.__doc__\n class __metaclass__(type):\n def __instancecheck__(cls, inst):\n return _coconut.isinstance(inst, (_coconut_int, _coconut_long))\n def __subclasscheck__(cls, subcls):\n return _coconut.issubclass(subcls, (_coconut_int, _coconut_long))\n class range(object):\n __slots__ = (\"_xrange\",)\n if hasattr(_coconut_xrange, \"__doc__\"):\n __doc__ = _coconut_xrange.__doc__\n def __init__(self, *args):\n self._xrange = _coconut_xrange(*args)\n def __iter__(self):\n return _coconut.iter(self._xrange)\n def __reversed__(self):\n return _coconut.reversed(self._xrange)\n def __len__(self):\n return _coconut.len(self._xrange)\n def __contains__(self, elem):\n return elem in self._xrange\n def __getitem__(self, index):\n if _coconut.isinstance(index, _coconut.slice):\n args = _coconut.slice(*self._args)\n start, stop, step, ind_step = (args.start if args.start is not None else 0), args.stop, (args.step if args.step is not None else 1), (index.step if index.step is not None else 1)\n return self.__class__((start if ind_step >= 0 else stop - step) if index.start is None else start + step * index.start if index.start >= 0 else stop + step * index.start, (stop if ind_step >= 0 else start - step) if index.stop is None else start + step * index.stop if index.stop >= 0 else stop + step * index.stop, step if index.step is None else step * index.step)\n else:\n return self._xrange[index]\n def count(self, elem):\n \"\"\"Count the number of times elem appears in the range.\"\"\"\n return _coconut_int(elem in self._xrange)\n def index(self, elem):\n \"\"\"Find the index of elem in the range.\"\"\"\n if elem not in self._xrange: raise _coconut.ValueError(_coconut.repr(elem) + \" is not in range\")\n start, _, step = self._xrange.__reduce_ex__(2)[1]\n return (elem - start) // step\n def __repr__(self):\n return _coconut.repr(self._xrange)[1:]\n @property\n def _args(self):\n return self._xrange.__reduce__()[1]\n def __reduce_ex__(self, protocol):\n return (self.__class__, self._xrange.__reduce_ex__(protocol)[1])\n def __reduce__(self):\n return self.__reduce_ex__(_coconut.pickle.DEFAULT_PROTOCOL)\n def __hash__(self):\n return _coconut.hash(self._args)\n def __copy__(self):\n return self.__class__(*self._args)\n def __eq__(self, other):\n return _coconut.isinstance(other, self.__class__) and self._args == other._args\n from collections import Sequence as _coconut_Sequence\n _coconut_Sequence.register(range)\n from functools import wraps as _coconut_wraps\n @_coconut_wraps(_coconut_print)\n def print(*args, **kwargs):\n file = kwargs.get(\"file\", _coconut_sys.stdout)\n flush = kwargs.get(\"flush\", False)\n if \"flush\" in kwargs:\n del kwargs[\"flush\"]\n if _coconut.hasattr(file, \"encoding\") and file.encoding is not None:\n _coconut_print(*(_coconut_unicode(x).encode(file.encoding) for x in args), **kwargs)\n else:\n _coconut_print(*(_coconut_unicode(x).encode() for x in args), **kwargs)\n if flush:\n file.flush()\n @_coconut_wraps(_coconut_raw_input)\n def input(*args, **kwargs):\n if _coconut.hasattr(_coconut_sys.stdout, \"encoding\") and _coconut_sys.stdout.encoding is not None:\n return _coconut_raw_input(*args, **kwargs).decode(_coconut_sys.stdout.encoding)\n return _coconut_raw_input(*args, **kwargs).decode()\n @_coconut_wraps(_coconut_repr)\n def repr(obj):\n if isinstance(obj, _coconut_unicode):\n return _coconut_unicode(_coconut_repr(obj)[1:])\n if isinstance(obj, _coconut_str):\n return \"b\" + _coconut_unicode(_coconut_repr(obj))\n return _coconut_unicode(_coconut_repr(obj))\n ascii = repr\n def raw_input(*args):\n \"\"\"Coconut uses Python 3 \"input\" instead of Python 2 \"raw_input\".\"\"\"\n raise _coconut.NameError('Coconut uses Python 3 \"input\" instead of Python 2 \"raw_input\"')\n def xrange(*args):\n \"\"\"Coconut uses Python 3 \"range\" instead of Python 2 \"xrange\".\"\"\"\n raise _coconut.NameError('Coconut uses Python 3 \"range\" instead of Python 2 \"xrange\"')\n if _coconut_sys.version_info < (2, 7):\n import functools as _coconut_functools, copy_reg as _coconut_copy_reg\n def _coconut_new_partial(func, args, keywords):\n return _coconut_functools.partial(func, *(args if args is not None else ()), **(keywords if keywords is not None else {}))\n _coconut_copy_reg.constructor(_coconut_new_partial)\n def _coconut_reduce_partial(self):\n return (_coconut_new_partial, (self.func, self.args, self.keywords))\n _coconut_copy_reg.pickle(_coconut_functools.partial, _coconut_reduce_partial)\nelse:\n from builtins import chr, filter, hex, input, int, map, object, oct, open, print, range, str, zip, filter, reversed, enumerate\n py_chr, py_hex, py_input, py_int, py_map, py_object, py_oct, py_open, py_print, py_range, py_str, py_zip, py_filter, py_reversed, py_enumerate, py_repr = chr, hex, input, int, map, object, oct, open, print, range, str, zip, filter, reversed, enumerate, repr\n _coconut_str = str\nclass _coconut(object):\n import collections, copy, functools, types, itertools, operator, threading, weakref, os\n if _coconut_sys.version_info < (3, 2):\n try:\n from backports.functools_lru_cache import lru_cache\n functools.lru_cache = lru_cache\n except ImportError: pass\n if _coconut_sys.version_info < (3,):\n import cPickle as pickle\n else:\n import pickle\n if _coconut_sys.version_info >= (2, 7):\n OrderedDict = collections.OrderedDict\n else:\n OrderedDict = dict\n if _coconut_sys.version_info < (3, 3):\n abc = collections\n else:\n import collections.abc as abc\n class typing(object):\n @staticmethod\n def NamedTuple(name, fields):\n return _coconut.collections.namedtuple(name, [x for x, t in fields])\n Ellipsis, Exception, AttributeError, ImportError, IndexError, KeyError, NameError, TypeError, ValueError, StopIteration, classmethod, dict, enumerate, filter, float, frozenset, getattr, hasattr, hash, id, int, isinstance, issubclass, iter, len, list, locals, map, min, max, next, object, property, range, reversed, set, slice, str, sum, super, tuple, type, zip, repr, bytearray = Ellipsis, Exception, AttributeError, ImportError, IndexError, KeyError, NameError, TypeError, ValueError, StopIteration, classmethod, dict, enumerate, filter, float, frozenset, getattr, hasattr, hash, id, int, isinstance, issubclass, iter, len, list, locals, map, min, max, next, object, property, range, reversed, set, slice, str, sum, super, tuple, type, zip, staticmethod(repr), bytearray\n_coconut_sentinel = _coconut.object()\nclass MatchError(Exception):\n \"\"\"Pattern-matching error. Has attributes .pattern and .value.\"\"\"\n __slots__ = (\"pattern\", \"value\")\nclass _coconut_tail_call(object):\n __slots__ = (\"func\", \"args\", \"kwargs\")\n def __init__(self, func, *args, **kwargs):\n self.func, self.args, self.kwargs = func, args, kwargs\n_coconut_tco_func_dict = {}\ndef _coconut_tco(func):\n @_coconut.functools.wraps(func)\n def tail_call_optimized_func(*args, **kwargs):\n call_func = func\n while True:\n wkref = _coconut_tco_func_dict.get(_coconut.id(call_func))\n if (wkref is not None and wkref() is call_func) or _coconut.isinstance(call_func, _coconut_base_pattern_func):\n call_func = call_func._coconut_tco_func\n result = call_func(*args, **kwargs) # pass --no-tco to clean up your traceback\n if not isinstance(result, _coconut_tail_call):\n return result\n call_func, args, kwargs = result.func, result.args, result.kwargs\n tail_call_optimized_func._coconut_tco_func = func\n tail_call_optimized_func.__module__ = _coconut.getattr(func, \"__module__\", None)\n tail_call_optimized_func.__name__ = _coconut.getattr(func, \"__name__\", \"<coconut tco function (pass --no-tco to remove)>\")\n tail_call_optimized_func.__qualname__ = _coconut.getattr(func, \"__qualname__\", tail_call_optimized_func.__name__)\n _coconut_tco_func_dict[_coconut.id(tail_call_optimized_func)] = _coconut.weakref.ref(tail_call_optimized_func)\n return tail_call_optimized_func\ndef _coconut_igetitem(iterable, index):\n if isinstance(iterable, (_coconut_reversed, _coconut_map, _coconut.zip, _coconut_enumerate, _coconut_count, _coconut.abc.Sequence)):\n return iterable[index]\n if not _coconut.isinstance(index, _coconut.slice):\n if index < 0:\n return _coconut.collections.deque(iterable, maxlen=-index)[0]\n return _coconut.next(_coconut.itertools.islice(iterable, index, index + 1))\n if index.start is not None and index.start < 0 and (index.stop is None or index.stop < 0) and index.step is None:\n queue = _coconut.collections.deque(iterable, maxlen=-index.start)\n if index.stop is not None:\n queue = _coconut.list(queue)[:index.stop - index.start]\n return queue\n if (index.start is not None and index.start < 0) or (index.stop is not None and index.stop < 0) or (index.step is not None and index.step < 0):\n return _coconut.list(iterable)[index]\n return _coconut.itertools.islice(iterable, index.start, index.stop, index.step)\nclass _coconut_base_compose(object):\n __slots__ = (\"func\", \"funcstars\")\n def __init__(self, func, *funcstars):\n self.func = func\n self.funcstars = []\n for f, stars in funcstars:\n if _coconut.isinstance(f, _coconut_base_compose):\n self.funcstars.append((f.func, stars))\n self.funcstars += f.funcstars\n else:\n self.funcstars.append((f, stars))\n def __call__(self, *args, **kwargs):\n arg = self.func(*args, **kwargs)\n for f, stars in self.funcstars:\n if stars == 0:\n arg = f(arg)\n elif stars == 1:\n arg = f(*arg)\n elif stars == 2:\n arg = f(**arg)\n else:\n raise _coconut.ValueError(\"invalid arguments to \" + _coconut.repr(self))\n return arg\n def __repr__(self):\n return _coconut.repr(self.func) + \" \" + \" \".join((\"..*> \" if star == 1 else \"..**>\" if star == 2 else \"..> \") + _coconut.repr(f) for f, star in self.funcstars)\n def __reduce__(self):\n return (self.__class__, (self.func,) + _coconut.tuple(self.funcstars))\n def __get__(self, obj, objtype=None):\n return _coconut.functools.partial(self, obj)\ndef _coconut_forward_compose(func, *funcs): return _coconut_base_compose(func, *((f, 0) for f in funcs))\ndef _coconut_back_compose(*funcs): return _coconut_forward_compose(*_coconut.reversed(funcs))\ndef _coconut_forward_star_compose(func, *funcs): return _coconut_base_compose(func, *((f, 1) for f in funcs))\ndef _coconut_back_star_compose(*funcs): return _coconut_forward_star_compose(*_coconut.reversed(funcs))\ndef _coconut_forward_dubstar_compose(func, *funcs): return _coconut_base_compose(func, *((f, 2) for f in funcs))\ndef _coconut_back_dubstar_compose(*funcs): return _coconut_forward_dubstar_compose(*_coconut.reversed(funcs))\ndef _coconut_pipe(x, f): return f(x)\ndef _coconut_star_pipe(xs, f): return f(*xs)\ndef _coconut_dubstar_pipe(kws, f): return f(**kws)\ndef _coconut_back_pipe(f, x): return f(x)\ndef _coconut_back_star_pipe(f, xs): return f(*xs)\ndef _coconut_back_dubstar_pipe(f, kws): return f(**kws)\ndef _coconut_assert(cond, msg=None): assert cond, msg if msg is not None else \"(assert) got falsey value \" + _coconut.repr(cond)\ndef _coconut_bool_and(a, b): return a and b\ndef _coconut_bool_or(a, b): return a or b\ndef _coconut_none_coalesce(a, b): return a if a is not None else b\ndef _coconut_minus(a, *rest):\n if not rest:\n return -a\n for b in rest:\n a = a - b\n return a\n@_coconut.functools.wraps(_coconut.itertools.tee)\ndef tee(iterable, n=2):\n if n >= 0 and _coconut.isinstance(iterable, (_coconut.tuple, _coconut.frozenset)):\n return (iterable,) * n\n if n > 0 and (_coconut.hasattr(iterable, \"__copy__\") or _coconut.isinstance(iterable, _coconut.abc.Sequence)):\n return (iterable,) + _coconut.tuple(_coconut.copy.copy(iterable) for _ in _coconut.range(n - 1))\n return _coconut.itertools.tee(iterable, n)\nclass reiterable(object):\n \"\"\"Allows an iterator to be iterated over multiple times.\"\"\"\n __slots__ = (\"iter\",)\n def __init__(self, iterable):\n self.iter = iterable\n def _get_new_iter(self):\n self.iter, new_iter = _coconut_tee(self.iter)\n return new_iter\n def __iter__(self):\n return _coconut.iter(self._get_new_iter())\n def __getitem__(self, index):\n return _coconut_igetitem(self._get_new_iter(), index)\n def __reversed__(self):\n return _coconut_reversed(self._get_new_iter())\n def __len__(self):\n return _coconut.len(self.iter)\n def __repr__(self):\n return \"reiterable(%r)\" % (self.iter,)\n def __reduce__(self):\n return (self.__class__, (self.iter,))\n def __copy__(self):\n return self.__class__(self._get_new_iter())\n def __fmap__(self, func):\n return _coconut_map(func, self)\nclass scan(object):\n \"\"\"Reduce func over iterable, yielding intermediate results,\n optionally starting from initializer.\"\"\"\n __slots__ = (\"func\", \"iter\", \"initializer\")\n def __init__(self, function, iterable, initializer=_coconut_sentinel):\n self.func = function\n self.iter = iterable\n self.initializer = initializer\n def __iter__(self):\n acc = self.initializer\n if acc is not _coconut_sentinel:\n yield acc\n for item in self.iter:\n if acc is _coconut_sentinel:\n acc = item\n else:\n acc = self.func(acc, item)\n yield acc\n def __len__(self):\n return _coconut.len(self.iter)\n def __repr__(self):\n return \"scan(%r, %r)\" % (self.func, self.iter)\n def __reduce__(self):\n return (self.__class__, (self.func, self.iter))\n def __copy__(self):\n return self.__class__(self.func, _coconut.copy.copy(self.iter))\n def __fmap__(self, func):\n return _coconut_map(func, self)\nclass reversed(object):\n __slots__ = (\"iter\",)\n if hasattr(_coconut.map, \"__doc__\"):\n __doc__ = _coconut.reversed.__doc__\n def __new__(cls, iterable):\n if _coconut.isinstance(iterable, _coconut.range):\n return iterable[::-1]\n if not _coconut.hasattr(iterable, \"__reversed__\") or _coconut.isinstance(iterable, (_coconut.list, _coconut.tuple)):\n return _coconut.object.__new__(cls)\n return _coconut.reversed(iterable)\n def __init__(self, iterable):\n self.iter = iterable\n def __iter__(self):\n return _coconut.iter(_coconut.reversed(self.iter))\n def __getitem__(self, index):\n if _coconut.isinstance(index, _coconut.slice):\n return _coconut_igetitem(self.iter, _coconut.slice(-(index.start + 1) if index.start is not None else None, -(index.stop + 1) if index.stop else None, -(index.step if index.step is not None else 1)))\n return _coconut_igetitem(self.iter, -(index + 1))\n def __reversed__(self):\n return self.iter\n def __len__(self):\n return _coconut.len(self.iter)\n def __repr__(self):\n return \"reversed(%r)\" % (self.iter,)\n def __hash__(self):\n return -_coconut.hash(self.iter)\n def __reduce__(self):\n return (self.__class__, (self.iter,))\n def __copy__(self):\n return self.__class__(_coconut.copy.copy(self.iter))\n def __eq__(self, other):\n return isinstance(other, self.__class__) and self.iter == other.iter\n def __contains__(self, elem):\n return elem in self.iter\n def count(self, elem):\n \"\"\"Count the number of times elem appears in the reversed iterator.\"\"\"\n return self.iter.count(elem)\n def index(self, elem):\n \"\"\"Find the index of elem in the reversed iterator.\"\"\"\n return _coconut.len(self.iter) - self.iter.index(elem) - 1\n def __fmap__(self, func):\n return self.__class__(_coconut_map(func, self.iter))\nclass map(_coconut.map):\n __slots__ = (\"func\", \"iters\")\n if hasattr(_coconut.map, \"__doc__\"):\n __doc__ = _coconut.map.__doc__\n def __new__(cls, function, *iterables):\n new_map = _coconut.map.__new__(cls, function, *iterables)\n new_map.func = function\n new_map.iters = iterables\n return new_map\n def __getitem__(self, index):\n if _coconut.isinstance(index, _coconut.slice):\n return self.__class__(self.func, *(_coconut_igetitem(i, index) for i in self.iters))\n return self.func(*(_coconut_igetitem(i, index) for i in self.iters))\n def __reversed__(self):\n return self.__class__(self.func, *(_coconut_reversed(i) for i in self.iters))\n def __len__(self):\n return _coconut.min(_coconut.len(i) for i in self.iters)\n def __repr__(self):\n return \"map(%r, %s)\" % (self.func, \", \".join((_coconut.repr(i) for i in self.iters)))\n def __reduce__(self):\n return (self.__class__, (self.func,) + self.iters)\n def __reduce_ex__(self, _):\n return self.__reduce__()\n def __copy__(self):\n return self.__class__(self.func, *_coconut.map(_coconut.copy.copy, self.iters))\n def __fmap__(self, func):\n return self.__class__(_coconut_forward_compose(self.func, func), *self.iters)\nclass parallel_map(map):\n \"\"\"Multi-process implementation of map using concurrent.futures.\n Requires arguments to be pickleable.\"\"\"\n __slots__ = ()\n def __iter__(self):\n from concurrent.futures import ProcessPoolExecutor\n with ProcessPoolExecutor() as executor:\n return _coconut.iter(_coconut.list(executor.map(self.func, *self.iters)))\n def __repr__(self):\n return \"parallel_\" + _coconut_map.__repr__(self)\nclass concurrent_map(map):\n \"\"\"Multi-thread implementation of map using concurrent.futures.\"\"\"\n __slots__ = ()\n def __iter__(self):\n from concurrent.futures import ThreadPoolExecutor\n from multiprocessing import cpu_count # cpu_count() * 5 is the default Python 3.5 thread count\n with ThreadPoolExecutor(cpu_count() * 5) as executor:\n return _coconut.iter(_coconut.list(executor.map(self.func, *self.iters)))\n def __repr__(self):\n return \"concurrent_\" + _coconut_map.__repr__(self)\nclass filter(_coconut.filter):\n __slots__ = (\"func\", \"iter\")\n if hasattr(_coconut.filter, \"__doc__\"):\n __doc__ = _coconut.filter.__doc__\n def __new__(cls, function, iterable):\n new_filter = _coconut.filter.__new__(cls, function, iterable)\n new_filter.func = function\n new_filter.iter = iterable\n return new_filter\n def __reversed__(self):\n return self.__class__(self.func, _coconut_reversed(self.iter))\n def __repr__(self):\n return \"filter(%r, %r)\" % (self.func, self.iter)\n def __reduce__(self):\n return (self.__class__, (self.func, self.iter))\n def __reduce_ex__(self, _):\n return self.__reduce__()\n def __copy__(self):\n return self.__class__(self.func, _coconut.copy.copy(self.iter))\n def __fmap__(self, func):\n return _coconut_map(func, self)\nclass zip(_coconut.zip):\n __slots__ = (\"iters\",)\n if hasattr(_coconut.zip, \"__doc__\"):\n __doc__ = _coconut.zip.__doc__\n def __new__(cls, *iterables):\n new_zip = _coconut.zip.__new__(cls, *iterables)\n new_zip.iters = iterables\n return new_zip\n def __getitem__(self, index):\n if _coconut.isinstance(index, _coconut.slice):\n return self.__class__(*(_coconut_igetitem(i, index) for i in self.iters))\n return _coconut.tuple(_coconut_igetitem(i, index) for i in self.iters)\n def __reversed__(self):\n return self.__class__(*(_coconut_reversed(i) for i in self.iters))\n def __len__(self):\n return _coconut.min(_coconut.len(i) for i in self.iters)\n def __repr__(self):\n return \"zip(%s)\" % (\", \".join((_coconut.repr(i) for i in self.iters)),)\n def __reduce__(self):\n return (self.__class__, self.iters)\n def __reduce_ex__(self, _):\n return self.__reduce__()\n def __copy__(self):\n return self.__class__(*_coconut.map(_coconut.copy.copy, self.iters))\n def __fmap__(self, func):\n return _coconut_map(func, self)\nclass enumerate(_coconut.enumerate):\n __slots__ = (\"iter\", \"start\")\n if hasattr(_coconut.enumerate, \"__doc__\"):\n __doc__ = _coconut.enumerate.__doc__\n def __new__(cls, iterable, start=0):\n new_enumerate = _coconut.enumerate.__new__(cls, iterable, start)\n new_enumerate.iter = iterable\n new_enumerate.start = start\n return new_enumerate\n def __getitem__(self, index):\n if _coconut.isinstance(index, _coconut.slice):\n return self.__class__(_coconut_igetitem(self.iter, index), self.start + (0 if index.start is None else index.start if index.start >= 0 else len(self.iter) + index.start))\n return (self.start + index, _coconut_igetitem(self.iter, index))\n def __len__(self):\n return _coconut.len(self.iter)\n def __repr__(self):\n return \"enumerate(%r, %r)\" % (self.iter, self.start)\n def __reduce__(self):\n return (self.__class__, (self.iter, self.start))\n def __reduce_ex__(self, _):\n return self.__reduce__()\n def __copy__(self):\n return self.__class__(_coconut.copy.copy(self.iter), self.start)\n def __fmap__(self, func):\n return _coconut_map(func, self)\nclass count(object):\n \"\"\"count(start, step) returns an infinite iterator starting at start and increasing by step.\n If step is set to 0, count will infinitely repeat its first argument.\"\"\"\n __slots__ = (\"start\", \"step\")\n def __init__(self, start=0, step=1):\n self.start = start\n self.step = step\n def __iter__(self):\n while True:\n yield self.start\n if self.step:\n self.start += self.step\n def __contains__(self, elem):\n if not self.step:\n return elem == self.start\n if elem < self.start:\n return False\n return (elem - self.start) % self.step == 0\n def __getitem__(self, index):\n if _coconut.isinstance(index, _coconut.slice) and (index.start is None or index.start >= 0) and (index.stop is None or index.stop >= 0):\n new_start, new_step = self.start, self.step\n if self.step and index.start is not None:\n new_start += self.step * index.start\n if self.step and index.step is not None:\n new_step *= index.step\n if index.stop is None:\n return self.__class__(new_start, new_step)\n if self.step and _coconut.isinstance(self.start, _coconut.int) and _coconut.isinstance(self.step, _coconut.int):\n return _coconut.range(new_start, self.start + self.step * index.stop, new_step)\n return _coconut_map(self.__getitem__, _coconut.range(index.start if index.start is not None else 0, index.stop, index.step if index.step is not None else 1))\n if index < 0:\n raise _coconut.IndexError(\"count indices must be positive\")\n return self.start + self.step * index if self.step else self.start\n def count(self, elem):\n \"\"\"Count the number of times elem appears in the count.\"\"\"\n if not self.step:\n return _coconut.float(\"inf\") if elem == self.start else 0\n return int(elem in self)\n def index(self, elem):\n \"\"\"Find the index of elem in the count.\"\"\"\n if elem not in self:\n raise _coconut.ValueError(_coconut.repr(elem) + \" not in \" + _coconut.repr(self))\n return (elem - self.start) // self.step if self.step else 0\n def __reversed__(self):\n if not self.step:\n return self\n raise _coconut.TypeError(repr(self) + \" object is not reversible\")\n def __repr__(self):\n return \"count(%r, %r)\" % (self.start, self.step)\n def __hash__(self):\n return _coconut.hash((self.start, self.step))\n def __reduce__(self):\n return (self.__class__, (self.start, self.step))\n def __copy__(self):\n return self.__class__(self.start, self.step)\n def __eq__(self, other):\n return isinstance(other, self.__class__) and self.start == other.start and self.step == other.step\n def __fmap__(self, func):\n return _coconut_map(func, self)\nclass groupsof(object):\n \"\"\"groupsof(n, iterable) splits iterable into groups of size n.\n If the length of the iterable is not divisible by n, the last group may be of size < n.\"\"\"\n __slots__ = (\"group_size\", \"iter\")\n def __init__(self, n, iterable):\n self.iter = iterable\n try:\n self.group_size = _coconut.int(n)\n except _coconut.ValueError:\n raise _coconut.TypeError(\"group size must be an int; not %r\" % (n,))\n if self.group_size <= 0:\n raise _coconut.ValueError(\"group size must be > 0; not %r\" % (self.group_size,))\n def __iter__(self):\n iterator = _coconut.iter(self.iter)\n loop = True\n while loop:\n group = []\n for _ in _coconut.range(self.group_size):\n try:\n group.append(_coconut.next(iterator))\n except _coconut.StopIteration:\n loop = False\n break\n if group:\n yield _coconut.tuple(group)\n def __len__(self):\n return _coconut.len(self.iter)\n def __repr__(self):\n return \"groupsof(%r)\" % (self.iter,)\n def __reduce__(self):\n return (self.__class__, (self.group_size, self.iter))\n def __copy__(self):\n return self.__class__(self.group_size, _coconut.copy.copy(self.iter))\n def __fmap__(self, func):\n return _coconut_map(func, self)\nclass recursive_iterator(object):\n \"\"\"Decorator that optimizes a function for iterator recursion.\"\"\"\n __slots__ = (\"func\", \"tee_store\", \"backup_tee_store\")\n def __init__(self, func):\n self.func = func\n self.tee_store = {}\n self.backup_tee_store = []\n def __call__(self, *args, **kwargs):\n key = (args, _coconut.frozenset(kwargs))\n use_backup = False\n try:\n hash(key)\n except _coconut.Exception:\n try:\n key = _coconut.pickle.dumps(key, -1)\n except _coconut.Exception:\n use_backup = True\n if use_backup:\n for i, (k, v) in _coconut.enumerate(self.backup_tee_store):\n if k == key:\n to_tee, store_pos = v, i\n break\n else: # no break\n to_tee = self.func(*args, **kwargs)\n store_pos = None\n to_store, to_return = _coconut_tee(to_tee)\n if store_pos is None:\n self.backup_tee_store.append([key, to_store])\n else:\n self.backup_tee_store[store_pos][1] = to_store\n else:\n self.tee_store[key], to_return = _coconut_tee(self.tee_store.get(key) or self.func(*args, **kwargs))\n return to_return\n def __repr__(self):\n return \"@recursive_iterator(\" + _coconut.repr(self.func) + \")\"\n def __reduce__(self):\n return (self.__class__, (self.func,))\n def __get__(self, obj, objtype=None):\n return _coconut.functools.partial(self, obj)\nclass _coconut_FunctionMatchErrorContext(object):\n __slots__ = ('exc_class', 'taken')\n threadlocal_var = _coconut.threading.local()\n def __init__(self, exc_class):\n self.exc_class = exc_class\n self.taken = False\n def __enter__(self):\n try:\n self.threadlocal_var.contexts.append(self)\n except _coconut.AttributeError:\n self.threadlocal_var.contexts = [self]\n def __exit__(self, type, value, traceback):\n self.threadlocal_var.contexts.pop()\n @classmethod\n def get(cls):\n try:\n ctx = cls.threadlocal_var.contexts[-1]\n except (_coconut.AttributeError, _coconut.IndexError):\n return _coconut_MatchError\n if not ctx.taken:\n ctx.taken = True\n return ctx.exc_class\n return _coconut_MatchError\n_coconut_get_function_match_error = _coconut_FunctionMatchErrorContext.get\nclass _coconut_base_pattern_func(object):\n __slots__ = (\"FunctionMatchError\", \"__doc__\", \"patterns\")\n def __init__(self, *funcs):\n self.FunctionMatchError = _coconut.type(_coconut_str(\"MatchError\"), (_coconut_MatchError,), {})\n self.__doc__ = None\n self.patterns = []\n for func in funcs:\n self.add(func)\n def add(self, func):\n self.__doc__ = _coconut.getattr(func, \"__doc__\", None) or self.__doc__\n if _coconut.isinstance(func, _coconut_base_pattern_func):\n self.patterns += func.patterns\n else:\n self.patterns.append(func)\n def __call__(self, *args, **kwargs):\n for func in self.patterns[:-1]:\n try:\n with _coconut_FunctionMatchErrorContext(self.FunctionMatchError):\n return func(*args, **kwargs)\n except self.FunctionMatchError:\n pass\n return self.patterns[-1](*args, **kwargs)\n def _coconut_tco_func(self, *args, **kwargs):\n for func in self.patterns[:-1]:\n try:\n with _coconut_FunctionMatchErrorContext(self.FunctionMatchError):\n return func(*args, **kwargs)\n except self.FunctionMatchError:\n pass\n return _coconut_tail_call(self.patterns[-1], *args, **kwargs)\n def __repr__(self):\n return \"addpattern(\" + _coconut.repr(self.patterns[0]) + \")(*\" + _coconut.repr(self.patterns[1:]) + \")\"\n def __reduce__(self):\n return (self.__class__, _coconut.tuple(self.patterns))\n def __get__(self, obj, objtype=None):\n return _coconut.functools.partial(self, obj)\ndef addpattern(base_func):\n \"\"\"Decorator to add a new case to a pattern-matching function,\n where the new case is checked last.\"\"\"\n return _coconut.functools.partial(_coconut_base_pattern_func, base_func)\n_coconut_addpattern = addpattern\ndef prepattern(base_func):\n \"\"\"DEPRECATED: Use addpattern instead.\"\"\"\n def pattern_prepender(func):\n return addpattern(func)(base_func)\n return pattern_prepender\nclass _coconut_partial(object):\n __slots__ = (\"func\", \"_argdict\", \"_arglen\", \"_stargs\", \"keywords\")\n if hasattr(_coconut.functools.partial, \"__doc__\"):\n __doc__ = _coconut.functools.partial.__doc__\n def __init__(self, func, argdict, arglen, *args, **kwargs):\n self.func = func\n self._argdict = argdict\n self._arglen = arglen\n self._stargs = args\n self.keywords = kwargs\n def __reduce__(self):\n return (self.__class__, (self.func, self._argdict, self._arglen) + self._stargs, self.keywords)\n def __setstate__(self, keywords):\n self.keywords = keywords\n @property\n def args(self):\n return _coconut.tuple(self._argdict.get(i) for i in _coconut.range(self._arglen)) + self._stargs\n def __call__(self, *args, **kwargs):\n callargs = []\n argind = 0\n for i in _coconut.range(self._arglen):\n if i in self._argdict:\n callargs.append(self._argdict[i])\n elif argind >= _coconut.len(args):\n raise _coconut.TypeError(\"expected at least \" + _coconut.str(self._arglen - _coconut.len(self._argdict)) + \" argument(s) to \" + _coconut.repr(self))\n else:\n callargs.append(args[argind])\n argind += 1\n callargs += self._stargs\n callargs += args[argind:]\n kwargs.update(self.keywords)\n return self.func(*callargs, **kwargs)\n def __repr__(self):\n args = []\n for i in _coconut.range(self._arglen):\n if i in self._argdict:\n args.append(_coconut.repr(self._argdict[i]))\n else:\n args.append(\"?\")\n for arg in self._stargs:\n args.append(_coconut.repr(arg))\n return _coconut.repr(self.func) + \"$(\" + \", \".join(args) + \")\"\ndef consume(iterable, keep_last=0):\n \"\"\"consume(iterable, keep_last) fully exhausts iterable and return the last keep_last elements.\"\"\"\n return _coconut.collections.deque(iterable, maxlen=keep_last)\nclass starmap(_coconut.itertools.starmap):\n __slots__ = (\"func\", \"iter\")\n if hasattr(_coconut.itertools.starmap, \"__doc__\"):\n __doc__ = _coconut.itertools.starmap.__doc__\n def __new__(cls, function, iterable):\n new_map = _coconut.itertools.starmap.__new__(cls, function, iterable)\n new_map.func = function\n new_map.iter = iterable\n return new_map\n def __getitem__(self, index):\n if _coconut.isinstance(index, _coconut.slice):\n return self.__class__(self.func, _coconut_igetitem(self.iter, index))\n return self.func(*_coconut_igetitem(self.iter, index))\n def __reversed__(self):\n return self.__class__(self.func, *_coconut_reversed(self.iter))\n def __len__(self):\n return _coconut.len(self.iter)\n def __repr__(self):\n return \"starmap(%r, %r)\" % (self.func, self.iter)\n def __reduce__(self):\n return (self.__class__, (self.func, self.iter))\n def __reduce_ex__(self, _):\n return self.__reduce__()\n def __copy__(self):\n return self.__class__(self.func, _coconut.copy.copy(self.iter))\n def __fmap__(self, func):\n return self.__class__(_coconut_forward_compose(self.func, func), self.iter)\ndef makedata(data_type, *args):\n \"\"\"Construct an object of the given data_type containing the given arguments.\"\"\"\n if _coconut.hasattr(data_type, \"_make\") and _coconut.issubclass(data_type, _coconut.tuple):\n return data_type._make(args)\n if _coconut.issubclass(data_type, (_coconut.map, _coconut.range, _coconut.abc.Iterator)):\n return args\n if _coconut.issubclass(data_type, _coconut.str):\n return \"\".join(args)\n return data_type(args)\ndef datamaker(data_type):\n \"\"\"DEPRECATED: Use makedata instead.\"\"\"\n return _coconut.functools.partial(makedata, data_type)\ndef fmap(func, obj):\n \"\"\"fmap(func, obj) creates a copy of obj with func applied to its contents.\n Override by defining obj.__fmap__(func).\"\"\"\n if _coconut.hasattr(obj, \"__fmap__\"):\n return obj.__fmap__(func)\n if obj.__class__.__module__ == \"numpy\":\n from numpy import vectorize\n return vectorize(func)(obj)\n return _coconut_makedata(obj.__class__, *(_coconut_starmap(func, obj.items()) if _coconut.isinstance(obj, _coconut.abc.Mapping) else _coconut_map(func, obj)))\ndef memoize(maxsize=None, *args, **kwargs):\n \"\"\"Decorator that memoizes a function,\n preventing it from being recomputed if it is called multiple times with the same arguments.\"\"\"\n return _coconut.functools.lru_cache(maxsize, *args, **kwargs)\n_coconut_MatchError, _coconut_count, _coconut_enumerate, _coconut_makedata, _coconut_map, _coconut_reversed, _coconut_starmap, _coconut_tee, _coconut_zip, TYPE_CHECKING, reduce, takewhile, dropwhile = MatchError, count, enumerate, makedata, map, reversed, starmap, tee, zip, False, _coconut.functools.reduce, _coconut.itertools.takewhile, _coconut.itertools.dropwhile\n\n# Compiled Coconut: -----------------------------------------------------------\n\nfrom coconut.__coconut__ import consume as coc_consume # type: ignore\n\nfrom coconut.constants import IPY # type: ignore\nfrom coconut.constants import PY35 # type: ignore\nfrom coconut.constants import WINDOWS # type: ignore\nfrom coconut.constants import PYPY # type: ignore\nfrom coconut.exceptions import CoconutSyntaxError\nfrom coconut.exceptions import CoconutStyleError\nfrom coconut.exceptions import CoconutSyntaxError\nfrom coconut.exceptions import CoconutTargetError\nfrom coconut.exceptions import CoconutParseError # type: ignore\nfrom coconut.convenience import CoconutException\nfrom coconut.convenience import cmd\nfrom coconut.convenience import version\nfrom coconut.convenience import setup\nfrom coconut.convenience import parse\nfrom coconut.convenience import coconut_eval\n\nif IPY and not WINDOWS:\n if PY35:\n if _coconut_sys.version_info < (3, 4): # type: ignore\n import trollius as asyncio # type: ignore\n else: # type: ignore\n import asyncio # type: ignore\n from coconut.icoconut import CoconutKernel # type: ignore\nelse:\n CoconutKernel = None # type: ignore\n\ndef assert_raises(c, exc):\n \"\"\"Test whether callable c raises an exception of type exc.\"\"\"\n try:\n c()\n except exc:\n return True\n else:\n raise AssertionError(\"%s failed to raise exception %s\" % (c, exc))\n\n@_coconut_tco\ndef unwrap_future(maybe_future):\n \"\"\"\n If the passed value looks like a Future, return its result, otherwise return the value unchanged.\n\n This is needed for the CoconutKernel test to be compatible with ipykernel version 5 and newer,\n where IPyKernel.do_execute is a coroutine.\n \"\"\"\n\n if hasattr(maybe_future, 'result') and callable(maybe_future.result):\n return _coconut_tail_call(maybe_future.result)\n return maybe_future\n\ndef test_extras():\n if IPY:\n import coconut.highlighter # type: ignore\n assert consume(range(10), keep_last=1)[0] == 9 == coc_consume(range(10), keep_last=1)[0]\n assert version() == version(\"num\")\n assert version(\"name\")\n assert version(\"spec\")\n assert version(\"tag\")\n assert version(\"-v\")\n assert_raises(lambda _=None: version(\"other\"), CoconutException)\n def _coconut_lambda_0(_=None):\n raise CoconutException(\"derp\").syntax_err()\n assert_raises(_coconut_lambda_0, SyntaxError)\n assert coconut_eval(\"x -> x + 1\")(2) == 3\n assert coconut_eval(\"addpattern\")\n assert parse(\"abc\") == parse(\"abc\", \"sys\")\n assert parse(\"abc\", \"file\")\n assert parse(\"abc\", \"package\")\n assert parse(\"abc\", \"block\") == \"abc\\n\" == parse(\"abc\", \"single\")\n assert parse(\"abc\", \"eval\") == \"abc\" == parse(\" abc\", \"eval\")\n assert parse(\"abc\", \"any\") == \"abc\"\n assert parse(\"x |> map$(f)\", \"any\") == \"map(f, x)\"\n assert \"_coconut\" not in parse(\"a |> .b |> .m() |> f$(x) |> .[0]\", \"block\")\n assert parse(\"abc # derp\", \"any\") == \"abc # derp\"\n assert_raises(lambda _=None: parse(\" abc\", \"file\"), CoconutException)\n assert_raises(lambda _=None: parse(\"'\"), CoconutException)\n assert_raises(lambda _=None: parse(\"(\"), CoconutException)\n assert_raises(lambda _=None: parse(\"\\\\(\"), CoconutException)\n assert_raises(lambda _=None: parse(\"if a:\\n b\\n c\"), CoconutException)\n assert_raises(lambda _=None: parse(\"$\"), CoconutException)\n assert_raises(lambda _=None: parse(\"_coconut\"), CoconutException)\n assert parse(\"def f(x):\\n \\t pass\")\n assert parse(\"lambda x: x\")\n assert parse(\"u''\")\n assert parse(\"def f(x):\\\\\\n pass\")\n assert parse(\"abc \")\n assert parse(\"abc # derp\", \"any\") == \"abc # derp\"\n setup(line_numbers=True)\n assert parse(\"abc\", \"any\") == \"abc # line 1\"\n setup(keep_lines=True)\n assert parse(\"abc\", \"any\") == \"abc # abc\"\n setup(line_numbers=True, keep_lines=True)\n assert parse(\"abc\", \"any\") == \"abc # line 1: abc\"\n setup()\n assert \"prepattern\" in parse(\"\\n\", mode=\"file\")\n assert \"datamaker\" in parse(\"\\n\", mode=\"file\")\n setup(strict=True)\n assert \"prepattern\" not in parse(\"\\n\", mode=\"file\")\n assert \"datamaker\" not in parse(\"\\n\", mode=\"file\")\n assert_raises(lambda _=None: parse(\"def f(x):\\n \\t pass\"), CoconutStyleError)\n assert_raises(lambda _=None: parse(\"lambda x: x\"), CoconutStyleError)\n assert_raises(lambda _=None: parse(\"u''\"), CoconutStyleError)\n assert_raises(lambda _=None: parse(\"def f(x):\\\\\\n pass\"), CoconutStyleError)\n assert_raises(lambda _=None: parse(\"abc \"), CoconutStyleError)\n assert_raises(lambda _=None: parse(\"abc\", \"file\"), CoconutStyleError)\n assert_raises(lambda _=None: parse(\"a=1;\"), CoconutStyleError)\n assert_raises(lambda _=None: parse(\"class derp(object)\"), CoconutStyleError)\n setup()\n assert_raises(lambda _=None: cmd(\"-f\"), SystemExit)\n assert_raises(lambda _=None: cmd(\"-pa .\"), SystemExit)\n assert_raises(lambda _=None: cmd(\"-n . .\"), SystemExit)\n assert_raises(lambda _=None: parse(\"f$()\"), CoconutSyntaxError)\n assert_raises(lambda _=None: parse(\"f(*x, y)\"), CoconutSyntaxError)\n assert_raises(lambda _=None: parse(\"def f(x) = return x\"), CoconutSyntaxError)\n assert_raises(lambda _=None: parse(\"def f(x) =\\n return x\"), CoconutSyntaxError)\n assert_raises(lambda _=None: parse(\"a := b\"), CoconutParseError)\n assert_raises(lambda _=None: parse(\"(a := b)\"), CoconutTargetError)\n setup(target=\"2.7\")\n assert parse(\"from io import BytesIO\", mode=\"any\") == \"from io import BytesIO\"\n assert_raises(lambda _=None: parse(\"def f(*, x=None) = x\"), CoconutTargetError)\n setup(target=\"3.6\")\n assert parse(\"def f(*, x=None) = x\")\n setup(target=\"3.8\")\n assert parse(\"(a := b)\")\n assert parse(\"print(a := 1, b := 2)\")\n assert parse(\"def f(a, /, b) = a, b\")\n if CoconutKernel is not None:\n if PY35:\n asyncio.set_event_loop(asyncio.new_event_loop())\n k = CoconutKernel()\n exec_result = (unwrap_future)(k.do_execute(\"derp = pow$(?, 2)\", False, True, {\"two\": \"(+)(1, 1)\"}, True))\n assert exec_result[\"status\"] == \"ok\"\n assert exec_result[\"user_expressions\"][\"two\"][\"data\"][\"text/plain\"] == \"2\"\n assert k.do_is_complete(\"if abc:\")[\"status\"] == \"incomplete\"\n assert k.do_is_complete(\"f(\")[\"status\"] == \"incomplete\"\n assert k.do_is_complete(\"abc\")[\"status\"] == \"complete\"\n inspect_result = k.do_inspect(\"derp\", 4, 0)\n assert inspect_result[\"status\"] == \"ok\"\n assert inspect_result[\"found\"]\n assert inspect_result[\"data\"][\"text/plain\"]\n complete_result = k.do_complete(\"der\", 1)\n assert complete_result[\"status\"] == \"ok\"\n assert \"derp\" in complete_result[\"matches\"]\n assert complete_result[\"cursor_start\"] == 0\n assert complete_result[\"cursor_end\"] == 1\n keyword_complete_result = k.do_complete(\"ma\", 1)\n assert keyword_complete_result[\"status\"] == \"ok\"\n assert \"match\" in keyword_complete_result[\"matches\"]\n assert \"map\" in keyword_complete_result[\"matches\"]\n assert keyword_complete_result[\"cursor_start\"] == 0\n assert keyword_complete_result[\"cursor_end\"] == 1\n if not PYPY:\n import numpy as np\n assert np.all(fmap(lambda _=None: _ + 1, np.arange(3)) == np.array([1, 2, 3]))\n print(\"<success>\")\n\nif __name__ == \"__main__\":\n print(\"Expect Coconut errors below from running extras:\")\n print(\"(but make sure you get a <success> after them)\")\n test_extras()\n"
] |
[
[
"numpy.array",
"numpy.arange",
"numpy.vectorize"
]
] |
brandontrabucco/cs285
|
[
"0ed5fca1d897bf197a43e2be14b204606ae4c36c"
] |
[
"cs285/distributions/continuous/tanh_gaussian.py"
] |
[
"\"\"\"Author: Brandon Trabucco, Copyright 2019, MIT License\"\"\"\n\n\nfrom cs285.distributions.continuous.gaussian import Gaussian\nimport tensorflow as tf\nimport math\n\n\nclass TanhGaussian(Gaussian):\n\n def __init__(\n self,\n *args,\n std=None,\n **kwargs\n ):\n Gaussian.__init__(self, *args, std=std, **kwargs)\n\n def clone(\n self,\n *inputs\n ):\n # create an exact duplicate (different pointers) of the policy\n return TanhGaussian(\n tf.keras.models.clone_model(self.model), std=self.std)\n\n def sample(\n self,\n *inputs\n ):\n # sample from a gaussian distribution\n gaussian_samples, log_probs = Gaussian.sample(self, *inputs)\n\n # pass samples through the tanh\n tanh_samples = tf.tanh(gaussian_samples)\n\n # compute the log probability density of the samples\n return tanh_samples, log_probs - 2.0 * tf.reduce_sum(\n math.log(2.0)\n - gaussian_samples\n - tf.math.softplus(-2.0 * gaussian_samples), axis=(-1))\n\n def expected_value(\n self,\n *inputs\n ):\n # expected value of a gaussian distribution\n gaussian_samples, log_probs = Gaussian.expected_value(self, *inputs)\n\n # compute the log probability density of the expected value\n return tf.tanh(gaussian_samples), log_probs - 2.0 * tf.reduce_sum(\n math.log(2.0)\n - gaussian_samples\n - tf.math.softplus(-2.0 * gaussian_samples), axis=(-1))\n\n def log_prob(\n self,\n tanh_samples,\n *inputs\n ):\n # convert tanh gaussian samples to gaussian samples\n gaussian_samples = tf.math.atanh(tf.clip_by_value(tanh_samples, -0.99, 0.99))\n\n # compute the log probability density under a gaussian\n log_probs = Gaussian.log_prob(self, gaussian_samples, *inputs)\n\n # compute the log probability density of the samples\n return log_probs - 2.0 * tf.reduce_sum(\n math.log(2.0)\n - gaussian_samples\n - tf.math.softplus(-2.0 * gaussian_samples), axis=(-1))\n"
] |
[
[
"tensorflow.tanh",
"tensorflow.clip_by_value",
"tensorflow.keras.models.clone_model",
"tensorflow.math.softplus"
]
] |
Pgaskins/mycode.py
|
[
"ba5bbe5a6bb727d535ff5c984f7e6ac9429bbf24"
] |
[
"graphing/graphmaker-csvreader.py"
] |
[
"#!/usr/bin/python3\n\n# from python std library\nimport csv\n\n# python3 -m pip install np\nimport numpy as np\n# python3 -m pip install matplotlib\nimport matplotlib\nmatplotlib.use('Agg')\n# sudo apt install python3-tk\nimport matplotlib.pyplot as plt\n\ndef parsecsvdata():\n \"\"\"returns a list. [0] is LAN and [1] WAN data\"\"\"\n summary = [] # list that will contain [(LAN), (WAN)]\n\n # open csv data\n with open(\"/home/student/mycode.py/graphing/2018summary.csv\",\\\n \"r\") as downtime:\n # parse csv data with csv.reader\n downdata = csv.reader(downtime, delimiter=\",\")\n for row in downdata:\n rowdat = (int(row[0]), int(row[1]), int(row[2]), int(row[3]))\n summary.append(rowdat) # add dict to list\n return summary\n\ndef main():\n N = 4\n ## grab our data\n summary = parsecsvdata() # grab our data\n localnetMeans = summary[0] # LAN data\n wanMeans = summary[1] # WAN data\n\n ind = np.arange(N) # the x locations for the groups\n # the width of the bars: can also be len(x) sequence\n width = 0.35\n\n # describe where to display p1\n p1 = plt.bar(ind, localnetMeans, width)\n # stack p2 on top of p1\n p2 = plt.bar(ind, wanMeans, width, bottom=localnetMeans)\n\n # Describe the table metadata\n plt.ylabel(\"Length of Outage (mins)\")\n plt.title(\"2018 Network Summary\")\n plt.xticks(ind, (\"Q1\", \"Q2\", \"Q3\", \"Q4\"))\n plt.yticks(np.arange(0, 81, 10))\n plt.legend((p1[0], p2[0]), (\"LAN\", \"WAN\"))\n\n # SAVE the graph locally\n plt.savefig(\"/home/student/mycode.py/graphing/2018summaryv2.png\")\n # Save to \"~/static\"\n plt.savefig(\"/home/student/static/2018summaryv2.png\") \n print(\"Graph created.\")\n\nif __name__ == \"__main__\":\n main()\n\n"
] |
[
[
"matplotlib.use",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xticks"
]
] |
grebtsew/Object-and-Facial-detection-in-python
|
[
"57c4bf8d934cc8d6dbaa0cfc56b2b343795ceef1"
] |
[
"tensorflow/multi threaded implementation/utils/detect_and_align.py"
] |
[
"from six import string_types, iteritems\nfrom scipy import misc\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport cv2\n\n\ndef align_image(img, pnet, rnet, onet):\n margin = 44\n image_size = 160\n\n img_size = np.asarray(img.shape)[0:2]\n bounding_boxes, landmarks, accur = detect_face(img, pnet, rnet, onet)\n nrof_bb = bounding_boxes.shape[0]\n padded_bounding_boxes = [None] * nrof_bb\n face_patches = [None] * nrof_bb\n\n if nrof_bb > 0:\n landmarks = np.stack(landmarks)\n landmarks = np.transpose(landmarks, (1, 0))\n for i in range(nrof_bb):\n det = np.squeeze(bounding_boxes[i, 0:4])\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0] - margin / 2, 0)\n bb[1] = np.maximum(det[1] - margin / 2, 0)\n bb[2] = np.minimum(det[2] + margin / 2, img_size[1])\n bb[3] = np.minimum(det[3] + margin / 2, img_size[0])\n cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]\n aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')\n prewhitened = prewhiten(aligned)\n padded_bounding_boxes[i] = bb\n face_patches[i] = prewhitened\n\n return face_patches, padded_bounding_boxes, landmarks, accur\n\n\ndef prewhiten(x):\n mean = np.mean(x)\n std = np.std(x)\n std_adj = np.maximum(std, 1.0 / np.sqrt(x.size))\n y = np.multiply(np.subtract(x, mean), 1 / std_adj)\n return y\n\n\ndef imresample(img, sz):\n im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA)\n return im_data\n\n\ndef generateBoundingBox(imap, reg, scale, t):\n # use heatmap to generate bounding boxes\n stride = 2\n cellsize = 12\n\n imap = np.transpose(imap)\n dx1 = np.transpose(reg[:, :, 0])\n dy1 = np.transpose(reg[:, :, 1])\n dx2 = np.transpose(reg[:, :, 2])\n dy2 = np.transpose(reg[:, :, 3])\n y, x = np.where(imap >= t)\n if y.shape[0] == 1:\n dx1 = np.flipud(dx1)\n dy1 = np.flipud(dy1)\n dx2 = np.flipud(dx2)\n dy2 = np.flipud(dy2)\n score = imap[(y, x)]\n reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]]))\n if reg.size == 0:\n reg = np.empty((0, 3))\n bb = np.transpose(np.vstack([y, x]))\n q1 = np.fix((stride * bb + 1) / scale)\n q2 = np.fix((stride * bb + cellsize - 1 + 1) / scale)\n boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg])\n return boundingbox, reg\n\n\ndef nms(boxes, threshold, method):\n if boxes.size == 0:\n return np.empty((0, 3)), None\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n s = boxes[:, 4]\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n I = np.argsort(s)\n pick = np.zeros_like(s, dtype=np.int16)\n counter = 0\n while I.size > 0:\n i = I[-1]\n pick[counter] = i\n counter += 1\n idx = I[0:-1]\n xx1 = np.maximum(x1[i], x1[idx])\n yy1 = np.maximum(y1[i], y1[idx])\n xx2 = np.minimum(x2[i], x2[idx])\n yy2 = np.minimum(y2[i], y2[idx])\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n if method is 'Min':\n o = inter / np.minimum(area[i], area[idx])\n else:\n o = inter / (area[i] + area[idx] - inter)\n I = I[np.where(o <= threshold)]\n pick = pick[0:counter]\n return pick, o\n\n\ndef rerec(bboxA):\n # convert bboxA to square\n h = bboxA[:, 3] - bboxA[:, 1]\n w = bboxA[:, 2] - bboxA[:, 0]\n l = np.maximum(w, h)\n bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5\n bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5\n bboxA[:, 2:4] = bboxA[:, 0:2] + np.transpose(np.tile(l, (2, 1)))\n return bboxA\n\n\ndef pad(total_boxes, w, h):\n # compute the padding coordinates (pad the bounding boxes to square)\n tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32)\n tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32)\n numbox = total_boxes.shape[0]\n\n dx = np.ones((numbox), dtype=np.int32)\n dy = np.ones((numbox), dtype=np.int32)\n edx = tmpw.copy().astype(np.int32)\n edy = tmph.copy().astype(np.int32)\n\n x = total_boxes[:, 0].copy().astype(np.int32)\n y = total_boxes[:, 1].copy().astype(np.int32)\n ex = total_boxes[:, 2].copy().astype(np.int32)\n ey = total_boxes[:, 3].copy().astype(np.int32)\n\n tmp = np.where(ex > w)\n edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1)\n ex[tmp] = w\n\n tmp = np.where(ey > h)\n edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1)\n ey[tmp] = h\n\n tmp = np.where(x < 1)\n dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1)\n x[tmp] = 1\n\n tmp = np.where(y < 1)\n dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1)\n y[tmp] = 1\n\n return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph\n\n\ndef bbreg(boundingbox, reg):\n # calibrate bounding boxes\n if reg.shape[1] == 1:\n reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))\n\n w = boundingbox[:, 2] - boundingbox[:, 0] + 1\n h = boundingbox[:, 3] - boundingbox[:, 1] + 1\n b1 = boundingbox[:, 0] + reg[:, 0] * w\n b2 = boundingbox[:, 1] + reg[:, 1] * h\n b3 = boundingbox[:, 2] + reg[:, 2] * w\n b4 = boundingbox[:, 3] + reg[:, 3] * h\n boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4]))\n return boundingbox\n\n\ndef layer(op):\n def layer_decorated(self, *args, **kwargs):\n # Automatically set a name if not provided.\n name = kwargs.setdefault('name', self.get_unique_name(op.__name__))\n # Figure out the layer inputs.\n if len(self.terminals) == 0:\n raise RuntimeError('No input variables found for layer %s.' % name)\n elif len(self.terminals) == 1:\n layer_input = self.terminals[0]\n else:\n layer_input = list(self.terminals)\n # Perform the operation and get the output.\n layer_output = op(self, layer_input, *args, **kwargs)\n # Add to layer LUT.\n self.layers[name] = layer_output\n # This output is now the input for the next layer.\n self.feed(layer_output)\n # Return self for chained calls.\n return self\n\n return layer_decorated\n\n\nclass Network(object):\n\n def __init__(self, inputs, trainable=True):\n # The input nodes for this network\n self.inputs = inputs\n # The current list of terminal nodes\n self.terminals = []\n # Mapping from layer names to layers\n self.layers = dict(inputs)\n # If true, the resulting variables are set as trainable\n self.trainable = trainable\n\n self.setup()\n\n def setup(self):\n '''Construct the network. '''\n raise NotImplementedError('Must be implemented by the subclass.')\n\n def load(self, data_path, session, ignore_missing=False):\n '''Load network weights.\n data_path: The path to the numpy-serialized network weights\n session: The current TensorFlow session\n ignore_missing: If true, serialized weights for missing layers are ignored.\n '''\n data_dict = np.load(data_path, encoding='latin1').item()\n\n for op_name in data_dict:\n with tf.variable_scope(op_name, reuse=True):\n for param_name, data in iteritems(data_dict[op_name]):\n try:\n var = tf.get_variable(param_name)\n session.run(var.assign(data))\n except ValueError:\n if not ignore_missing:\n raise\n\n def feed(self, *args):\n '''Set the input(s) for the next operation by replacing the terminal nodes.\n The arguments can be either layer names or the actual layers.\n '''\n assert len(args) != 0\n self.terminals = []\n for fed_layer in args:\n if isinstance(fed_layer, string_types):\n try:\n fed_layer = self.layers[fed_layer]\n except KeyError:\n raise KeyError('Unknown layer name fed: %s' % fed_layer)\n self.terminals.append(fed_layer)\n return self\n\n def get_output(self):\n '''Returns the current network output.'''\n return self.terminals[-1]\n\n def get_unique_name(self, prefix):\n '''Returns an index-suffixed unique name for the given prefix.\n This is used for auto-generating layer names based on the type-prefix.\n '''\n ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1\n return '%s_%d' % (prefix, ident)\n\n def make_var(self, name, shape):\n '''Creates a new TensorFlow variable.'''\n return tf.get_variable(name, shape, trainable=self.trainable)\n\n def validate_padding(self, padding):\n '''Verifies that the padding is one of the supported ones.'''\n assert padding in ('SAME', 'VALID')\n\n @layer\n def conv(self,\n inp,\n k_h,\n k_w,\n c_o,\n s_h,\n s_w,\n name,\n relu=True,\n padding='SAME',\n group=1,\n biased=True):\n # Verify that the padding is acceptable\n self.validate_padding(padding)\n # Get the number of channels in the input\n c_i = int(inp.get_shape()[-1])\n # Verify that the grouping parameter is valid\n assert c_i % group == 0\n assert c_o % group == 0\n\n # Convolution for a given input and kernel\n def convolve(i, k):\n return tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)\n\n with tf.variable_scope(name) as scope:\n kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])\n # This is the common-case. Convolve the input without any further complications.\n output = convolve(inp, kernel)\n # Add the biases\n if biased:\n biases = self.make_var('biases', [c_o])\n output = tf.nn.bias_add(output, biases)\n if relu:\n # ReLU non-linearity\n output = tf.nn.relu(output, name=scope.name)\n return output\n\n @layer\n def prelu(self, inp, name):\n with tf.variable_scope(name):\n i = int(inp.get_shape()[-1])\n alpha = self.make_var('alpha', shape=(i,))\n output = tf.nn.relu(inp) + tf.multiply(alpha, -tf.nn.relu(-inp))\n return output\n\n @layer\n def max_pool(self, inp, k_h, k_w, s_h, s_w, name, padding='SAME'):\n self.validate_padding(padding)\n return tf.nn.max_pool(inp,\n ksize=[1, k_h, k_w, 1],\n strides=[1, s_h, s_w, 1],\n padding=padding,\n name=name)\n\n @layer\n def fc(self, inp, num_out, name, relu=True):\n with tf.variable_scope(name):\n input_shape = inp.get_shape()\n if input_shape.ndims == 4:\n # The input is spatial. Vectorize it first.\n dim = 1\n for d in input_shape[1:].as_list():\n dim *= int(d)\n feed_in = tf.reshape(inp, [-1, dim])\n else:\n feed_in, dim = (inp, input_shape[-1].value)\n weights = self.make_var('weights', shape=[dim, num_out])\n biases = self.make_var('biases', [num_out])\n op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b\n fc = op(feed_in, weights, biases, name=name)\n return fc\n\n @layer\n def softmax(self, target, axis, name=None):\n max_axis = tf.reduce_max(target, axis, keepdims=True)\n target_exp = tf.exp(target - max_axis)\n normalize = tf.reduce_sum(target_exp, axis, keepdims=True)\n softmax = tf.div(target_exp, normalize, name)\n return softmax\n\n\nclass PNet(Network):\n def setup(self):\n (self.feed('data')\n .conv(3, 3, 10, 1, 1, padding='VALID', relu=False, name='conv1')\n .prelu(name='PReLU1')\n .max_pool(2, 2, 2, 2, name='pool1')\n .conv(3, 3, 16, 1, 1, padding='VALID', relu=False, name='conv2')\n .prelu(name='PReLU2')\n .conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv3')\n .prelu(name='PReLU3')\n .conv(1, 1, 2, 1, 1, relu=False, name='conv4-1')\n .softmax(3, name='prob1'))\n\n (self.feed('PReLU3')\n .conv(1, 1, 4, 1, 1, relu=False, name='conv4-2'))\n\n\nclass RNet(Network):\n def setup(self):\n (self.feed('data')\n .conv(3, 3, 28, 1, 1, padding='VALID', relu=False, name='conv1')\n .prelu(name='prelu1')\n .max_pool(3, 3, 2, 2, name='pool1')\n .conv(3, 3, 48, 1, 1, padding='VALID', relu=False, name='conv2')\n .prelu(name='prelu2')\n .max_pool(3, 3, 2, 2, padding='VALID', name='pool2')\n .conv(2, 2, 64, 1, 1, padding='VALID', relu=False, name='conv3')\n .prelu(name='prelu3')\n .fc(128, relu=False, name='conv4')\n .prelu(name='prelu4')\n .fc(2, relu=False, name='conv5-1')\n .softmax(1, name='prob1'))\n\n (self.feed('prelu4')\n .fc(4, relu=False, name='conv5-2'))\n\n\nclass ONet(Network):\n def setup(self):\n (self.feed('data')\n .conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1')\n .prelu(name='prelu1')\n .max_pool(3, 3, 2, 2, name='pool1')\n .conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2')\n .prelu(name='prelu2')\n .max_pool(3, 3, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3')\n .prelu(name='prelu3')\n .max_pool(2, 2, 2, 2, name='pool3')\n .conv(2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4')\n .prelu(name='prelu4')\n .fc(256, relu=False, name='conv5')\n .prelu(name='prelu5')\n .fc(2, relu=False, name='conv6-1')\n .softmax(1, name='prob1'))\n\n (self.feed('prelu5')\n .fc(4, relu=False, name='conv6-2'))\n\n (self.feed('prelu5')\n .fc(10, relu=False, name='conv6-3'))\n\n\ndef create_mtcnn(sess, model_path):\n if not model_path:\n model_path, _ = os.path.split(os.path.realpath(__file__))\n\n with tf.variable_scope('pnet'):\n data = tf.placeholder(tf.float32, (None, None, None, 3), 'input')\n pnet = PNet({'data': data})\n pnet.load(os.path.join(model_path, 'det1.npy'), sess)\n with tf.variable_scope('rnet'):\n data = tf.placeholder(tf.float32, (None, 24, 24, 3), 'input')\n rnet = RNet({'data': data})\n rnet.load(os.path.join(model_path, 'det2.npy'), sess)\n with tf.variable_scope('onet'):\n data = tf.placeholder(tf.float32, (None, 48, 48, 3), 'input')\n onet = ONet({'data': data})\n onet.load(os.path.join(model_path, 'det3.npy'), sess)\n\n def pnet_fun(img):\n return sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0': img})\n\n def rnet_fun(img):\n return sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0': img})\n\n def onet_fun(img):\n return sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0': img})\n\n return pnet_fun, rnet_fun, onet_fun\n\n\ndef detect_face(img, pnet, rnet, onet):\n\n minsize = 20 # minimum size of face\n threshold = [0.6, 0.7, 0.7] # three steps's threshold\n factor = 0.709 # scale factor\n accur = None\n score = None\n\n factor_count = 0\n total_boxes = np.empty((0, 9))\n points = []\n h = img.shape[0]\n w = img.shape[1]\n minl = np.amin([h, w])\n m = 12.0 / minsize\n minl = minl * m\n # creat scale pyramid\n scales = []\n while minl >= 12:\n scales += [m * np.power(factor, factor_count)]\n minl = minl * factor\n factor_count += 1\n\n # first stage\n for j in range(len(scales)):\n scale = scales[j]\n hs = int(np.ceil(h * scale))\n ws = int(np.ceil(w * scale))\n im_data = imresample(img, (hs, ws))\n im_data = (im_data - 127.5) * 0.0078125\n img_x = np.expand_dims(im_data, 0)\n img_y = np.transpose(img_x, (0, 2, 1, 3))\n out = pnet(img_y)\n out0 = np.transpose(out[0], (0, 2, 1, 3))\n out1 = np.transpose(out[1], (0, 2, 1, 3))\n\n boxes, _ = generateBoundingBox(out1[0, :, :, 1].copy(), out0[0, :, :, :].copy(), scale, threshold[0])\n\n # inter-scale nms\n pick, accur = nms(boxes.copy(), 0.5, 'Union')\n if boxes.size > 0 and pick.size > 0:\n boxes = boxes[pick, :]\n total_boxes = np.append(total_boxes, boxes, axis=0)\n\n numbox = total_boxes.shape[0]\n if numbox > 0:\n pick, accur = nms(total_boxes.copy(), 0.7, 'Union')\n total_boxes = total_boxes[pick, :]\n regw = total_boxes[:, 2] - total_boxes[:, 0]\n regh = total_boxes[:, 3] - total_boxes[:, 1]\n qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw\n qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh\n qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw\n qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh\n total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))\n total_boxes = rerec(total_boxes.copy())\n total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)\n dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)\n\n numbox = total_boxes.shape[0]\n if numbox > 0:\n # second stage\n tempimg = np.zeros((24, 24, 3, numbox))\n for k in range(0, numbox):\n tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))\n tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :]\n if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:\n tempimg[:, :, :, k] = imresample(tmp, (24, 24))\n else:\n return np.empty()\n tempimg = (tempimg - 127.5) * 0.0078125\n tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))\n out = rnet(tempimg1)\n out0 = np.transpose(out[0])\n out1 = np.transpose(out[1])\n score = out1[1, :]\n ipass = np.where(score > threshold[1])\n total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])\n mv = out0[:, ipass[0]]\n if total_boxes.shape[0] > 0:\n pick, accur = nms(total_boxes, 0.7, 'Union')\n total_boxes = total_boxes[pick, :]\n total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))\n total_boxes = rerec(total_boxes.copy())\n\n numbox = total_boxes.shape[0]\n \n if numbox > 0:\n # third stage\n total_boxes = np.fix(total_boxes).astype(np.int32)\n dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)\n tempimg = np.zeros((48, 48, 3, numbox))\n for k in range(0, numbox):\n tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))\n tmp[dy[k] - 1:edy[k], dx[k] - 1:edx[k], :] = img[y[k] - 1:ey[k], x[k] - 1:ex[k], :]\n if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:\n tempimg[:, :, :, k] = imresample(tmp, (48, 48))\n else:\n return np.empty()\n tempimg = (tempimg - 127.5) * 0.0078125\n tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))\n out = onet(tempimg1)\n out0 = np.transpose(out[0])\n out1 = np.transpose(out[1])\n out2 = np.transpose(out[2])\n score = out2[1, :]\n points = out1\n ipass = np.where(score > threshold[2])\n points = points[:, ipass[0]]\n total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])\n mv = out0[:, ipass[0]]\n\n w = total_boxes[:, 2] - total_boxes[:, 0] + 1\n h = total_boxes[:, 3] - total_boxes[:, 1] + 1\n points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1\n points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1\n \n if total_boxes.shape[0] > 0:\n total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))\n pick, accur = nms(total_boxes.copy(), 0.7, 'Min')\n total_boxes = total_boxes[pick, :]\n points = points[:, pick]\n\n #if score is not None:\n # accur = score\n\n #if accur is not None and pick < accur.size:\n # return total_boxes, points, accur[pick]\n #else:\n return total_boxes, points, None\n"
] |
[
[
"tensorflow.exp",
"tensorflow.nn.conv2d",
"numpy.minimum",
"numpy.tile",
"numpy.load",
"numpy.mean",
"scipy.misc.imresize",
"tensorflow.reshape",
"numpy.where",
"numpy.zeros_like",
"numpy.empty",
"numpy.flipud",
"tensorflow.variable_scope",
"numpy.transpose",
"numpy.sqrt",
"numpy.append",
"tensorflow.nn.bias_add",
"tensorflow.nn.max_pool",
"numpy.vstack",
"numpy.expand_dims",
"numpy.fix",
"numpy.reshape",
"numpy.zeros",
"tensorflow.nn.relu",
"numpy.std",
"numpy.stack",
"numpy.subtract",
"tensorflow.get_variable",
"numpy.argsort",
"numpy.amin",
"tensorflow.reduce_sum",
"tensorflow.placeholder",
"numpy.power",
"numpy.squeeze",
"numpy.ceil",
"numpy.asarray",
"numpy.ones",
"tensorflow.reduce_max",
"tensorflow.div",
"numpy.maximum"
]
] |
ejmichaud/torch-foresight
|
[
"e36a8fdd65f0432b9fa25a5127412b081159956b"
] |
[
"tests/test_ei.py"
] |
[
"import pytest\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom foresight import ei\n\n\n##################################\n#### H ####\n##################################\n\ndef test_H_0():\n x = torch.zeros((4,))\n x[1] = 1\n assert ei.H(x).item() == 0\n\ndef test_H_1():\n x = torch.ones((4,)) / 4\n assert ei.H(x).item() == 2\n\ndef test_H_2():\n x = torch.ones((256,)) / 256\n assert ei.H(x).item() == 8\n\ndef test_H_3():\n x = torch.ones((4,4)) / 4\n assert all(ei.H(x, dim=0) == 2)\n\ndef test_H_4():\n x = torch.ones((4,4)) / 4\n assert all(ei.H(x, dim=1) == 2)\n\ndef test_H_5():\n x = torch.zeros((4,))\n assert ei.H(x).item() == 0\n\n\n##################################\n#### lin_norm ####\n##################################\n\ndef test_lin_norm_0():\n x = torch.ones((4,4))\n x_normed = ei.lin_norm(x) == 0.25\n for row in x_normed:\n assert all(row)\n\ndef test_lin_norm_1():\n \"\"\"Check that negative entries become 0.\"\"\"\n x = torch.ones((5, 5))\n x[:, 0] = -1\n x_normed = ei.lin_norm(x)\n assert all(x_normed[:, 0] == 0)\n for row in x_normed[:, 1:]:\n assert all(row == 0.25)\n\ndef test_lin_norm_2():\n \"\"\"Check that rows of all 0s stay all 0s (no nan values via division by 0).\"\"\"\n x = torch.zeros((4,4))\n x_normed = ei.lin_norm(x)\n for row in x_normed:\n assert all(row == 0)\n\n\n##################################\n#### conv2d_create_matrix ####\n##################################\n\ndef test_conv2d_create_matrix_0():\n m = nn.Conv2d(1, 2, 2)\n m.weight = nn.Parameter(torch.ones((2, 1, 2, 2)))\n matrix = ei.conv2d_create_matrix(m, (1, 1, 3, 3), (1, 2, 2, 2))\n correct_matrix = torch.tensor([\n [1, 1, 0, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 0, 1, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 0, 1, 1, 0],\n [0, 0, 0, 0, 1, 1, 0, 1, 1],\n [1, 1, 0, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 0, 1, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 0, 1, 1, 0],\n [0, 0, 0, 0, 1, 1, 0, 1, 1]\n ]).to(torch.float32).t()\n assert all(torch.flatten(matrix == correct_matrix))\n\ndef test_conv2d_create_matrix_1():\n m = nn.Conv2d(1, 1, 2, stride=2)\n m.weight = nn.Parameter(torch.ones((1, 1, 2, 2)))\n matrix = ei.conv2d_create_matrix(m, (1, 1, 4, 4), (1, 1, 2, 2))\n correct_matrix = torch.tensor([\n [1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1]\n ]).to(torch.float32).t()\n assert all(torch.flatten(matrix == correct_matrix))\n\ndef test_conv2d_create_matrix_2():\n m = nn.Conv2d(2, 1, 2)\n m.weight = nn.Parameter(torch.ones((1, 2, 2, 2)))\n matrix = ei.conv2d_create_matrix(m, (1, 2, 3, 3), (1, 1, 2, 2))\n correct_matrix = torch.tensor([\n [1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0],\n [0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1]\n ]).to(torch.float32).t()\n assert all(torch.flatten(matrix == correct_matrix))\n\ndef test_conv2d_create_matrix_3():\n m = nn.Conv2d(1, 1, 2, padding=1)\n m.weight = nn.Parameter(torch.ones((1, 1, 2, 2)))\n matrix = ei.conv2d_create_matrix(m, (1, 1, 3, 3), (1, 1, 4 ,4))\n correct_matrix = torch.tensor([\n [1, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 1, 0, 0, 0, 0, 0],\n [1, 1, 0, 1, 1, 0, 0, 0, 0],\n [0, 1, 1, 0, 1, 1, 0, 0, 0],\n [0, 0, 1, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 1, 0, 0],\n [0, 0, 0, 1, 1, 0, 1, 1, 0],\n [0, 0, 0, 0, 1, 1, 0, 1, 1],\n [0, 0, 0, 0, 0, 1, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 1]\n ]).to(torch.float32).t()\n assert all(torch.flatten(matrix == correct_matrix))\n\ndef test_conv2d_create_matrix_4():\n m = nn.Conv2d(1, 1, 2, padding=1, stride=2)\n m.weight = nn.Parameter(torch.ones((1, 1, 2, 2)))\n matrix = ei.conv2d_create_matrix(m, (1, 1, 3, 3), (1, 1, 2, 2))\n correct_matrix = torch.tensor([\n [1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 1, 1, 0, 1, 1]\n ]).to(torch.float32).t()\n assert all(torch.flatten(matrix == correct_matrix))\n\ndef test_conv2d_create_matrix_5():\n m = nn.Conv2d(1, 1, (1, 2), padding=1, stride=(2, 2))\n m.weight = nn.Parameter(torch.ones((1, 1, 1, 2)))\n matrix = ei.conv2d_create_matrix(m, (1, 1, 3, 3), (1, 1, 3, 2))\n correct_matrix = torch.tensor([\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0]\n ]).to(torch.float32).t()\n assert all(torch.flatten(matrix == correct_matrix))\n\n\n##################################\n#### avgpool2d_create_matrix ####\n##################################\n\ndef test_avgpool2d_create_matrix_0():\n m = nn.AvgPool2d(2)\n matrix = ei.avgpool2d_create_matrix(m, (1, 1, 4, 4), (1, 1, 2, 2))\n correct_matrix = torch.tensor([\n [0.25, 0.25, 0, 0, 0.25, 0.25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0.25, 0.25, 0, 0, 0.25, 0.25, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0.25, 0.25, 0, 0, 0.25, 0.25, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.25, 0.25, 0, 0, 0.25, 0.25]\n ]).t()\n assert all(torch.flatten(matrix == correct_matrix))\n\ndef test_avgpool2d_create_matrix_1():\n m = nn.AvgPool2d(2, stride=1)\n matrix = ei.avgpool2d_create_matrix(m, (1, 1, 3, 3), (1, 1, 2, 2))\n correct_matrix = torch.tensor([\n [0.25, 0.25, 0, 0.25, 0.25, 0, 0, 0, 0],\n [0, 0.25, 0.25, 0, 0.25, 0.25, 0, 0, 0],\n [0, 0, 0, 0.25, 0.25, 0, 0.25, 0.25, 0],\n [0, 0, 0, 0, 0.25, 0.25, 0, 0.25, 0.25]\n ]).t()\n assert all(torch.flatten(matrix == correct_matrix))\n\ndef test_avgpool2d_create_matrix_2():\n m = nn.AvgPool2d((1, 2), stride=1)\n matrix = ei.avgpool2d_create_matrix(m, (1, 1, 3, 3), (1, 1, 3, 2))\n correct_matrix = torch.tensor([\n [0.5, 0.5, 0, 0, 0, 0, 0, 0, 0],\n [0, 0.5, 0.5, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0.5, 0.5, 0, 0, 0, 0],\n [0, 0, 0, 0, 0.5, 0.5, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0.5, 0.5, 0],\n [0, 0, 0, 0, 0, 0, 0, 0.5, 0.5]\n ]).t()\n assert all(torch.flatten(matrix == correct_matrix))\n\n\n##################################\n#### determinism ####\n##################################\n\ndef test_determinism_0():\n m = nn.Linear(4, 4, bias=False)\n m.weight = nn.Parameter(torch.ones((4, 4)))\n computed_det = ei.determinism(m, input=torch.randn(1, 4))\n true_det = 2\n assert type(computed_det) == float and computed_det == true_det\n\n\n##################################\n#### degeneracy ####\n##################################\n\ndef test_degeneracy_0():\n m = nn.Linear(4, 4, bias=False)\n m.weight = nn.Parameter(torch.ones((4, 4)))\n computed_deg = ei.degeneracy(m, input=torch.randn(1, 4))\n true_deg = 2\n assert type(computed_deg) == float and computed_deg == true_deg\n\n\n##################################\n#### ei ####\n##################################\n\ndef test_ei_0():\n m = nn.Linear(4, 4, bias=False)\n m.weight = nn.Parameter(torch.ones((4, 4)))\n computed_ei = ei.ei(m, input=torch.randn(1, 4))\n true_ei = 0\n assert type(computed_ei) == float and computed_ei == true_ei\n\n\n"
] |
[
[
"torch.zeros",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.ones",
"torch.nn.Conv2d",
"torch.tensor",
"torch.flatten",
"torch.randn"
]
] |
HoseungCha/mist-rnns
|
[
"523d1fa769a7ed29fce22138d1993a8acca28c41"
] |
[
"mnist_train.py"
] |
[
"import os\nimport sys\nimport argparse\nimport shutil\nimport time\n\nimport numpy as np\nimport tensorflow as tf\n\nimport models\nimport optimizers\nimport utils\n\nimport mnist\n\nINPUT_SIZE = 1\nTARGET_SIZE = 10\nBATCH_SIZE = 100\nVAL_BATCH_SIZE = 100\nNUM_OPT_STEPS = 15000\nNUM_STEPS_PER_TRAIN_SUMMARY = 10\nNUM_STEPS_PER_VAL_SUMMARY = 150\n\n\ndef parse_args():\n \"\"\" Parse command-line arguments.\n\n Returns:\n A tuple, `(args, params_str, layer_kwargs)`.\n \"\"\"\n\n description = main.__doc__\n formatter_class = argparse.ArgumentDefaultsHelpFormatter\n parser = argparse.ArgumentParser(description=description, formatter_class=formatter_class)\n\n parser.add_argument('--data_dir', type=str, default=mnist.DEFAULT_DATA_DIR,\n help='''The directory we will load data from and save results to.''')\n parser.add_argument('--debug', type=int, default=0,\n help='''If 1, downsample all images by a factor of 16 and print some useful information.''')\n parser.add_argument('--permute', type=int, default=1,\n help='''If 1, randomly permute the pixel order of each image (using the same random permutation\n for every image).''')\n parser.add_argument('--layer_type', type=str, default='MISTLayer',\n help='''The RNN layer to use. See `layers`.''')\n parser.add_argument('--activation_type', type=str, default='tanh',\n help='''An element-wise activation. See `tensorflow.nn`.''')\n parser.add_argument('--num_hidden_units', type=int, default=139,\n help='''The number of hidden units to use in the recurrent model.''')\n parser.add_argument('--optimizer', type=str, default='ClippingMomentumOptimizer',\n help='''The Optimizer to use. See `optimizers`.''')\n parser.add_argument('--learning_rate', type=float, default=1.0,\n help='''The learning rate.''')\n parser.add_argument('--optional_bias_shift', type=float, default=1.0,\n help='''Used with LSTMLayer and GRULayer. In the case of LSTM, this is\n more commonly known as forget-gate bias.''')\n parser.add_argument('--num_pre_act_mixture_delays', type=int, default=8,\n help='''Used only with the MISTLayer.''')\n parser.add_argument('--trial', type=int, default=0,\n help='''Useful if we'd like to run multiple trials with identical parameters.''')\n\n args = parser.parse_args()\n args.data_dir = os.path.expanduser(args.data_dir)\n args.pre_act_mixture_delays = 2 ** np.arange(args.num_pre_act_mixture_delays, dtype=np.int)\n\n params_str = '_'.join([\n '%d' % args.permute,\n '%s' % args.layer_type,\n '%s' % args.activation_type,\n '%04d' % args.num_hidden_units,\n '%.6f' % args.learning_rate,\n '%.1f' % args.optional_bias_shift,\n '%d' % args.num_pre_act_mixture_delays,\n '%02d' % args.trial\n ])\n\n layer_kwargs = {\n 'optional_bias_shift': args.optional_bias_shift,\n 'pre_act_mixture_delays': args.pre_act_mixture_delays,\n }\n\n return args, params_str, layer_kwargs\n\n\ndef main():\n \"\"\" Train an RNN for sequential (possibly permuted) MNIST recognition. \"\"\"\n\n args, params_str, layer_kwargs = parse_args()\n\n save_dir = os.path.join(args.data_dir, 'results', params_str)\n if os.path.exists(save_dir):\n shutil.rmtree(save_dir)\n os.makedirs(save_dir)\n\n outs = mnist.load_split(args.data_dir, val=True, permute=args.permute, normalize=True, num_val=2000, seed=0)\n train_images, train_labels, val_images, val_labels = outs\n if args.debug:\n train_images = train_images[:, ::4, ::4, :]\n val_images = val_images[:, ::4, ::4, :]\n\n # Flatten the images.\n train_inputs = train_images.reshape([len(train_images), -1, INPUT_SIZE])\n val_inputs = val_images.reshape([len(val_images), -1, INPUT_SIZE])\n\n # Align sequence-level labels with the appropriate time steps by padding with NaNs,\n # and to do so, first convert the labels to floats.\n length = train_inputs.shape[1]\n pad = lambda x: np.pad(x, [[0, 0], [length - 1, 0], [0, 0]], mode='constant', constant_values=np.nan)\n train_labels = pad(train_labels.reshape([-1, 1, 1]).astype(np.float))\n val_labels = pad(val_labels.reshape([-1, 1, 1]).astype(np.float))\n\n train_batches = utils.full_bptt_batch_generator(train_inputs, train_labels, BATCH_SIZE)\n\n model = models.RNNClassificationModel(args.layer_type, INPUT_SIZE, TARGET_SIZE, args.num_hidden_units,\n args.activation_type, **layer_kwargs)\n\n Optimizer = getattr(optimizers, args.optimizer)\n optimize_op = Optimizer(args.learning_rate).minimize(model.valid_seqwise_loss_for_opt)\n\n def _error_rate(valid_predictions, valid_targets):\n incorrect_mask = tf.logical_not(tf.equal(tf.argmax(valid_predictions, 1), tf.argmax(valid_targets, 1)))\n return tf.reduce_mean(tf.to_float(incorrect_mask))\n model.error_rate = _error_rate(model.valid_predictions, model.valid_targets)\n\n tf.summary.scalar('train loss', model.valid_seqwise_loss, collections=['train'])\n tf.summary.scalar('train error rate', model.error_rate, collections=['train'])\n\n model.val_error_rate = tf.placeholder(tf.float32, name='val_error_rate')\n tf.summary.scalar('val error rate', model.val_error_rate, collections=['val'])\n\n train_summary_op = tf.summary.merge_all('train')\n val_summary_op = tf.summary.merge_all('val')\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n sess.run(tf.global_variables_initializer())\n\n file_writer = tf.summary.FileWriter(save_dir, graph=sess.graph, flush_secs=10)\n saver = tf.train.Saver()\n\n best_val_error_rate = 1.0\n start_time = time.time()\n for step in range(NUM_OPT_STEPS):\n\n batch_inputs, batch_labels = next(train_batches)\n batch_targets = utils.one_hot(np.squeeze(batch_labels, 2), TARGET_SIZE)\n\n sess.run(optimize_op,\n feed_dict={model.inputs: batch_inputs,\n model.targets: batch_targets})\n\n if step % NUM_STEPS_PER_TRAIN_SUMMARY == 0:\n\n error_rate, summary = sess.run([model.error_rate, train_summary_op],\n feed_dict={model.inputs: batch_inputs,\n model.targets: batch_targets})\n\n file_writer.add_summary(summary, global_step=step)\n with open(os.path.join(save_dir, 'train_status.txt'), 'a') as f:\n line = '%s %06.1f %d %.4f' % (params_str, time.time() - start_time, step, error_rate)\n print(line, file=f)\n\n if step % NUM_STEPS_PER_VAL_SUMMARY == 0:\n\n val_batches = utils.full_bptt_batch_generator(val_inputs, val_labels, VAL_BATCH_SIZE, num_epochs=1,\n shuffle=False)\n error_rates = []\n for i, (batch_inputs, batch_labels) in enumerate(val_batches):\n batch_targets = utils.one_hot(np.squeeze(batch_labels, 2), TARGET_SIZE)\n valid_predictions, valid_targets, batch_error_rates = sess.run(\n [model.valid_predictions, model.valid_targets, model.error_rate],\n feed_dict={model.inputs: batch_inputs,\n model.targets: batch_targets}\n )\n error_rates.append(batch_error_rates)\n if args.debug and i == 0:\n num_samples = 25\n print('Step: %d. Some targets and predictions:' % step)\n print(np.argmax(valid_targets[:num_samples], axis=1))\n print(np.argmax(valid_predictions[:num_samples], axis=1))\n\n # This is approximate if the validation batch size doesn't evenly divide\n # the number of validation examples.\n val_error_rate = np.mean(error_rates, dtype=np.float)\n if val_error_rate < best_val_error_rate:\n best_val_error_rate = val_error_rate\n saver.save(sess, os.path.join(save_dir, 'model.ckpt'))\n\n summary = sess.run(val_summary_op, feed_dict={model.val_error_rate: val_error_rate})\n file_writer.add_summary(summary, global_step=step)\n with open(os.path.join(save_dir, 'val_status.txt'), 'a') as f:\n line = '%s %06.1f %d %.4f %.4f' % (params_str, time.time() - start_time, step,\n val_error_rate, best_val_error_rate)\n print(line, file=f)\n if args.debug:\n print(line)\n\n file_writer.close()\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.pad",
"tensorflow.summary.scalar",
"tensorflow.argmax",
"tensorflow.Session",
"tensorflow.train.Saver",
"numpy.mean",
"tensorflow.ConfigProto",
"tensorflow.placeholder",
"numpy.arange",
"tensorflow.to_float",
"numpy.argmax",
"tensorflow.summary.merge_all",
"tensorflow.summary.FileWriter",
"tensorflow.global_variables_initializer",
"numpy.squeeze"
]
] |
slac-lcls/ami
|
[
"0feb89248350b80a85689d8230277cea5bf4d470"
] |
[
"ami/graph_nodes.py"
] |
[
"import abc\nimport operator\nimport numpy as np\nfrom networkfox import operation\n\n\nclass Transformation(abc.ABC):\n\n def __init__(self, **kwargs):\n \"\"\"\n Keyword Arguments:\n name (str): Name of node\n inputs (list): List of inputs\n outputs (list): List of outputs\n func (function): Function node will call\n \"\"\"\n\n self.name = kwargs['name']\n\n inputs = kwargs['inputs']\n if type(inputs) is dict:\n self.inputs = list(inputs.values())\n else:\n self.inputs = inputs\n\n outputs = kwargs['outputs']\n if type(outputs) is dict:\n self.outputs = list(outputs.values())\n else:\n self.outputs = outputs\n\n self.func = kwargs['func']\n self.parent = kwargs.get('parent', None)\n self.color = kwargs.get('color', \"\")\n self.begin_run_func = kwargs.get('begin_run', None)\n self.end_run_func = kwargs.get('end_run', None)\n self.begin_step_func = kwargs.get('begin_step', None)\n self.end_step_func = kwargs.get('end_step', None)\n self.exportable = False\n self.is_global_operation = False\n\n def __hash__(self):\n return hash(self.name)\n\n def __eq__(self, other):\n \"\"\"\n Two nodes are considered equal if their name is equal.\n\n Args:\n other (Transformation): Node to compare against.\n \"\"\"\n return bool(self.name is not None and\n self.name == getattr(other, 'name', None))\n\n def __repr__(self):\n return u\"%s(name='%s', color='%s', inputs=%s, outputs=%s)\" % \\\n (self.__class__.__name__, self.name, self.color, self.inputs, self.outputs)\n\n def to_operation(self):\n \"\"\"\n Return NetworkFoX operation node.\n \"\"\"\n return operation(name=self.name, needs=self.inputs, provides=self.outputs, color=self.color,\n metadata={'parent': self.parent})(self.func)\n\n def begin_run(self, color=\"\"):\n if color == self.color and callable(self.begin_run_func):\n return self.begin_run_func()\n\n def end_run(self, color=\"\"):\n if color == self.color and callable(self.end_run_func):\n return self.end_run_func()\n\n def begin_step(self, step, color=\"\"):\n if color == self.color and callable(self.begin_step_func):\n return self.begin_step_func(step)\n\n def end_step(self, step, color=\"\"):\n if color == self.color and callable(self.end_step_func):\n return self.end_step_func(step)\n\n\nclass Map(Transformation):\n\n def __init__(self, **kwargs):\n \"\"\"\n Keyword Arguments:\n name (str): Name of node\n inputs (list): List of inputs\n outputs (list): List of outputs\n func (function): Function node will call\n \"\"\"\n super().__init__(**kwargs)\n\n\nclass StatefulTransformation(Transformation):\n\n def __init__(self, **kwargs):\n \"\"\"\n Keyword Arguments:\n name (str): Name of node\n inputs (list): List of inputs\n outputs (list): List of outputs\n reduction (function): Reduction function\n \"\"\"\n\n reduction = kwargs.pop('reduction', None)\n\n kwargs.setdefault('func', None)\n super().__init__(**kwargs)\n\n if reduction:\n assert hasattr(reduction, '__call__'), 'reduction is not callable'\n self.reduction = reduction\n\n @abc.abstractmethod\n def __call__(self, *args, **kwargs):\n return\n\n @abc.abstractmethod\n def reset(self):\n \"\"\"\n Reset nodes state.\n \"\"\"\n return\n\n def heartbeat_finished(self):\n \"\"\"\n Execute at the end of a heartbeat.\n \"\"\"\n return\n\n def to_operation(self):\n return operation(name=self.name, needs=self.inputs, provides=self.outputs,\n color=self.color, metadata={'parent': self.parent})(self)\n\n\nclass GlobalTransformation(StatefulTransformation):\n\n def __init__(self, **kwargs):\n \"\"\"\n Keyword Arguments:\n name (str): Name of node\n inputs (list): List of inputs\n outputs (list): List of outputs\n reduction (function): Reduction function\n is_expanded (bool): Indicates this node's input comes another part\n of the expanded operation\n num_contributors (int): the number of contributors providing input\n to this part of the global operation\n \"\"\"\n is_expanded = kwargs.pop('is_expanded', False)\n num_contributors = kwargs.pop('num_contributors', None)\n super().__init__(**kwargs)\n self.is_global_operation = True\n self.is_expanded = is_expanded\n self.num_contributors = num_contributors\n\n def on_expand(self):\n \"\"\"\n Called when expanding a global operation to get an extra kwargs that\n should be passed to the expanded nodes when they are constructed.\n\n This is intended to be overrided by subclasses if they need this!\n\n Returns:\n Dictionary of keyword arguments to pass when constructing the\n globally expanded version of this operation\n \"\"\"\n return {\"parent\": self.parent}\n\n\nclass ReduceByKey(GlobalTransformation):\n\n def __init__(self, **kwargs):\n kwargs.setdefault('reduction', operator.add)\n super().__init__(**kwargs)\n self.res = {}\n\n def __call__(self, *args, **kwargs):\n if len(args) == 2:\n # worker\n k, v = args\n if k in self.res:\n self.res[k] = self.reduction(self.res[k], v)\n else:\n self.res[k] = v\n else:\n # localCollector, globalCollector\n for r in args:\n for k, v in r.items():\n if k in self.res:\n self.res[k] = self.reduction(self.res[k], v)\n else:\n self.res[k] = v\n return self.res\n\n def reset(self):\n self.res = {}\n\n def heartbeat_finished(self):\n if self.color != 'globalCollector':\n self.reset()\n\n\nclass Accumulator(GlobalTransformation):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.res_factory = kwargs.pop('res_factory', lambda: 0)\n assert hasattr(self.res_factory, '__call__'), 'res_factory is not callable'\n self.res = self.res_factory()\n\n def __call__(self, *args, **kwargs):\n self.res = self.reduction(self.res, *args)\n return self.res\n\n def reset(self):\n self.res = self.res_factory()\n\n def heartbeat_finished(self):\n if self.color != 'globalCollector':\n self.reset()\n\n def on_expand(self):\n return {'parent': self.parent, 'res_factory': self.res_factory}\n\n\nclass PickN(GlobalTransformation):\n\n def __init__(self, **kwargs):\n N = kwargs.pop('N', 1)\n exportable = kwargs.pop('exportable', False)\n super().__init__(**kwargs)\n self.N = N\n self.exportable = exportable\n self.idx = 0\n self.res = [None]*self.N\n self.clear = False\n\n def __call__(self, *args, **kwargs):\n if self.clear:\n self.res = [None]*self.N\n self.clear = False\n\n if not args and kwargs:\n args = list(kwargs.values())\n if len(args) > 1:\n args = [args]\n elif self.is_expanded and len(args) == 1 and type(args[0]) is list and self.N > 1:\n args = args[0]\n\n for arg in args:\n self.res[self.idx] = arg\n self.idx = (self.idx + 1) % self.N\n\n if not any(x is None for x in self.res):\n self.clear = True\n if self.N > 1:\n return self.res\n elif self.N == 1:\n return self.res[0]\n\n def reset(self):\n self.res = [None]*self.N\n\n\nclass RollingBuffer(GlobalTransformation):\n\n def __init__(self, **kwargs):\n N = kwargs.pop('N', 1)\n use_numpy = kwargs.pop('use_numpy', False)\n unique = kwargs.pop('unique', False)\n super().__init__(**kwargs)\n self.N = N\n self.use_numpy = use_numpy\n self.unique = unique\n self.idx = 0\n self.count = 0\n self.res = None if use_numpy else []\n\n def __call__(self, *args, **kwargs):\n if len(args) == 1:\n dims = 0\n args = args[0]\n elif args:\n dims = len(args)\n elif kwargs:\n args = [kwargs.get(arg, np.nan) for arg in self.inputs]\n dims = len(args)\n if len(args) == 1:\n dims = 0\n args = args[0]\n\n if self.use_numpy:\n if self.is_expanded:\n dtype = args.dtype\n if len(args) > self.N:\n nelem = self.N\n args = args[..., -self.N:]\n else:\n nelem = len(args)\n else:\n dtype = type(args)\n nelem = 1\n if self.res is None:\n self.res = np.zeros(self.N, dtype=dtype)\n self.idx += nelem\n self.res = np.roll(self.res, -nelem)\n self.res[..., -nelem:] = [args] if dims else args\n else:\n if self.is_expanded:\n self.res.extend(args)\n self.idx = min(self.idx + len(args), self.N)\n else:\n if not self.unique:\n self.res.append(args)\n self.idx = min(self.idx + 1, self.N)\n elif self.unique:\n if len(self.res) == 0:\n self.res.append(args)\n self.idx = min(self.idx + 1, self.N)\n elif self.res[self.idx-1] != args:\n self.res.append(args)\n self.idx = min(self.idx + 1, self.N)\n self.res = self.res[-self.idx:]\n\n return self.res[-self.idx:]\n\n def on_expand(self):\n return {'parent': self.parent, 'use_numpy': self.use_numpy, 'unique': self.unique}\n\n def reset(self):\n self.idx = 0\n"
] |
[
[
"numpy.roll",
"numpy.zeros"
]
] |
headrockz/data-analysis-challenge
|
[
"a0bec70654c58698648fd7c7179dd2d451d33f58"
] |
[
"utils/load_idh.py"
] |
[
"import sqlite3\nimport pandas as pd\n\n\nclass LoadIDH:\n def __init__(self, file):\n self.conn = sqlite3.connect(file)\n self.cursor = self.conn.cursor()\n\n def select(self, municipio):\n consult = 'select id_municipio from municipios where municipio like ?'\n self.cursor.execute(consult, (f'%{municipio}%',))\n result = self.cursor.fetchall()\n \n return result\n \n def insert(self, idh, fk_municipio):\n if fk_municipio != []:\n consult = '''INSERT OR IGNORE INTO idh (idh, fk_municipio) VALUES (?, ?)'''\n self.cursor.execute(consult, (idh, fk_municipio[0][0]))\n self.conn.commit()\n\n def close(self):\n self.cursor.close()\n self.conn.close()\n\n\n# Tests\nif __name__ == '__main__':\n data = LoadIDH('database.db')\n\n df = pd.read_excel('../datasets/idh.xlsx')\n\n for i in df.values:\n result = data.select(i[0])\n\n data.insert(i[1], result)\n\n data.close()"
] |
[
[
"pandas.read_excel"
]
] |
kdatta/tensorpack
|
[
"963e5100aa2f91d3a4b02f809b2cfbb50be9094e"
] |
[
"tensorpack/input_source/input_source_base.py"
] |
[
"# -*- coding: utf-8 -*-\n# File: input_source_base.py\n\nimport copy\nfrom abc import ABCMeta, abstractmethod\nfrom contextlib import contextmanager\nimport six\nimport tensorflow as tf\n\nfrom ..callbacks.base import CallbackFactory\nfrom ..tfutils.common import get_op_tensor_name\nfrom ..utils import logger\nfrom ..utils.argtools import call_only_once, memoized_method\nfrom ..compat import tfv1\n\n__all__ = ['InputSource', 'remap_input_source']\n\n\ndef build_or_reuse_placeholder(tensor_spec):\n \"\"\"\n Build a tf.placeholder from the metadata in the given tensor spec, or return an existing one.\n\n Args:\n tensor_spec (tf.TensorSpec):\n\n Returns:\n tf.Tensor:\n \"\"\"\n g = tfv1.get_default_graph()\n name = tensor_spec.name\n try:\n tensor = g.get_tensor_by_name(name + ':0')\n assert \"Placeholder\" in tensor.op.type, \"Tensor {} exists but is not a placeholder!\".format(name)\n assert tensor_spec.is_compatible_with(tensor), \\\n \"Tensor {} exists but is not compatible with the signature!\".format(tensor)\n if tensor.shape.as_list() == tensor_spec.shape.as_list():\n # It might be desirable to use a placeholder of a different shape in some tower\n # (e.g., a less specific shape)\n\n # Comparing `tensor.shape` directly doesn't work, because\n # tensorflow thinks `tf.Dimension(None)` and `tf.Dimension(None)` are not equal.\n return tensor\n except KeyError:\n pass\n with tfv1.name_scope(None): # clear any name scope it might get called in\n ret = tfv1.placeholder(\n tensor_spec.dtype, shape=tensor_spec.shape, name=tensor_spec.name)\n return ret\n\n\ndef get_tensors_inputs(placeholders, tensors, names):\n \"\"\"\n Args:\n placeholders (list[Tensor]):\n tensors (list[Tensor]): list of tf.Tensor\n names (list[str]): names matching the given tensors\n\n Returns:\n list[Tensor]: inputs to used for the tower function,\n with the corresponding placeholders replaced by tensors.\n \"\"\"\n assert len(tensors) == len(names), \\\n \"Input tensors {} and input names {} have different length!\".format(\n tensors, names)\n ret = copy.copy(placeholders)\n placeholder_names = [p.name for p in placeholders]\n for name, tensor in zip(names, tensors):\n tensorname = get_op_tensor_name(name)[1]\n try:\n idx = placeholder_names.index(tensorname)\n except ValueError:\n logger.error(\"Name {} is not a model input!\".format(tensorname))\n raise\n ret[idx] = tensor\n return ret\n\n\ndef get_sublist_by_names(lst, names):\n \"\"\"\n Args:\n lst (list): list of objects with \"name\" property.\n\n Returns:\n list: a sublist of objects, matching names\n \"\"\"\n orig_names = [p.name for p in lst]\n ret = []\n for name in names:\n try:\n idx = orig_names.index(name)\n except ValueError:\n logger.error(\"Name {} doesn't appear in lst {}!\".format(\n name, str(orig_names)))\n raise\n ret.append(lst[idx])\n return ret\n\n\n@six.add_metaclass(ABCMeta)\nclass InputSource(object):\n \"\"\" Base class for the abstract InputSource. \"\"\"\n\n _name_scope = None\n _setup_done = False\n\n def get_input_tensors(self):\n \"\"\"\n Returns:\n list[Tensor]: A list of tensors corresponding to the inputs of the model.\n Will be used as input for the tower function.\n This method should always create and return new tensors when called,\n unless it returns placeholders.\n \"\"\"\n return self._get_input_tensors()\n\n @abstractmethod\n def _get_input_tensors(self):\n pass\n\n @call_only_once\n def setup(self, input_signature):\n \"\"\"\n Args:\n input_signature (list[tf.TensorSpec]): list of specs for each input tensor\n\n Returns:\n list[Callback]: extra callbacks needed by this InputSource.\n callbacks of InputSource cannot use any `trigger*()` method.\n \"\"\"\n self._setup(input_signature)\n self._setup_done = True\n return self.get_callbacks()\n\n def _setup(self, input_signature):\n pass\n\n def setup_done(self):\n \"\"\"\n Returns:\n bool: whether :meth:`setup()` has been called.\n \"\"\"\n return self._setup_done\n\n @memoized_method\n def get_callbacks(self):\n \"\"\"\n An InputSource might need some extra maintenance during training,\n which is done also through the Callback interface.\n This method returns the callbacks and the return value will be memoized.\n\n All callbacks will be automatically marked as `chief_only=False`,\n so they will run on all nodes.\n\n Callbacks returned by :class:`InputSource` only supports a subset of callback's functionalities:\n\n 1. It cannot access the trainer, because an :class:`InputSource` can be used in pure inference.\n 2. It cannot use the following methods: `trigger_{step,epoch}, {before,after}_epoch`.\n\n In other words, these callbacks should only have the basic functionality of `tf.train.SessionRunHooks`.\n\n Returns:\n list[Callback]: extra callbacks needed by this InputSource.\n \"\"\"\n assert self.setup_done()\n ret = [CallbackFactory(\n before_train=lambda _: self.reset_state())] + self._get_callbacks()\n\n for r in ret:\n r.set_chief_only(False) # no input callbacks should be chief-only\n return ret\n\n def _get_callbacks(self):\n return []\n\n def reset_state(self):\n \"\"\"\n Initialize/reinitialize this InputSource.\n Must be called under a default session.\n\n For training, it will get called once by the trainer in `before_train` callbacks.\n For inference, the :class:`InferenceRunner` will call this method each time it is triggered.\n \"\"\"\n self._reset_state()\n\n def _reset_state(self):\n pass\n\n def size(self):\n \"\"\"\n Returns:\n int: epoch size of the InputSource\n \"\"\"\n return self._size()\n\n def _size(self):\n raise NotImplementedError()\n\n @contextmanager\n def cached_name_scope(self):\n \"\"\"\n Yield a context under a cached name scope, whose name is the name of\n this InputSource class.\n \"\"\"\n if self._name_scope:\n with tf.name_scope(self._name_scope):\n yield self._name_scope\n else:\n name = type(self).__name__\n with tf.name_scope(name) as ns:\n self._name_scope = ns\n yield ns\n\n\nclass ProxyInputSource(InputSource):\n \"\"\"\n An InputSource which proxy every method to ``self._input``.\n \"\"\"\n def __init__(self, input):\n assert isinstance(input, InputSource), input\n self._input = input\n\n def _get_input_tensors(self):\n return self._input.get_input_tensors()\n\n def _setup(self, input_signature):\n self._input.setup(input_signature)\n\n def _get_callbacks(self):\n return self._input.get_callbacks()\n\n def _size(self):\n return self._input.size()\n\n def _reset_state(self):\n self._input.reset_state()\n\n\ndef remap_input_source(input, names):\n \"\"\"\n When you have some :class:`InputSource` which doesn't match the inputs of\n your tower function, use `RemapInputSource`.\n It produces placeholders for all the inputs in your model,\n except that the corresponding ones are replaced with the tensor produced\n by the given :class:`InputSource`.\n\n Example:\n\n .. code-block:: python\n\n input1 = QueueInput(ds)\n # assume ds produces data that should be fed to 'image' and 'label',\n # but the graph takes more inputs for some reasons, or takes inputs\n # of a different order, for example like the following:\n\n # input_signature = [tf.TensorSpec((None,10), tf.float32, 'score'),\n # tf.TensorSpec((None,20,20,3), tf.float32, 'label'),\n # tf.TensorSpec((None,), tf.int32, 'image') ]\n\n input2 = remap_input_source(input1, ['image', 'label'])\n # now, if input2 is used with the above input_signature, it will return a\n # placeholder for 'score', plus the tensors returned by input1\n \"\"\"\n def __init__(self, input, names):\n \"\"\"\n Args:\n input(InputSource): a :class:`InputSource`, whose tensors will get mapped.\n names(list[str]): list of input names corresponding to the tensors\n produced by ``input``.\n\n Returns:\n InputSource:\n \"\"\"\n ProxyInputSource.__init__(self, input)\n assert isinstance(names, (list, tuple)), names\n self._names = tuple(names)\n\n def _setup(self, inputs):\n self._all_placehdrs = [build_or_reuse_placeholder(v) for v in inputs]\n inputs_subset = get_sublist_by_names(inputs, self._names)\n self._input.setup(inputs_subset)\n\n def _get_input_tensors(self):\n ret = self._input.get_input_tensors()\n assert len(ret) == len(self._names)\n return get_tensors_inputs(\n self._all_placehdrs, ret, self._names)\n\n oldcls = type(input)\n # inherit oldcls so that type check in various places would work\n cls = type('Remapped' + oldcls.__name__, (ProxyInputSource, oldcls), {\n '__init__': __init__,\n '_setup': _setup,\n '_get_input_tensors': _get_input_tensors})\n return cls(input, names)\n"
] |
[
[
"tensorflow.name_scope"
]
] |
sharma-arjun/gym
|
[
"e689f93a425d97489e590bba0a7d4518de0dcc03"
] |
[
"gym/envs/classic_control/cartpole.py"
] |
[
"\"\"\"\nClassic cart-pole system implemented by Rich Sutton et al.\nCopied from http://incompleteideas.net/sutton/book/code/pole.c\npermalink: https://perma.cc/C9ZM-652R\n\"\"\"\n\nimport math\nimport gym\nfrom gym import spaces, logger\nfrom gym.utils import seeding\nimport numpy as np\n\nclass CartPoleEnv(gym.Env):\n \"\"\"\n Description:\n A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum starts upright, and the goal is to prevent it from falling over by increasing and reducing the cart's velocity.\n\n Source:\n This environment corresponds to the version of the cart-pole problem described by Barto, Sutton, and Anderson\n\n Observation: \n Type: Box(4)\n Num\tObservation Min Max\n 0\tCart Position -2.4 2.4\n 1\tCart Velocity -Inf Inf\n 2\tPole Angle ~-41.8° ~41.8°\n 3\tPole Velocity At Tip -Inf Inf\n \n Actions:\n Type: Discrete(2)\n Num\tAction\n 0\tPush cart to the left\n 1\tPush cart to the right\n \n Note: The amount the velocity is reduced or increased is not fixed as it depends on the angle the pole is pointing. This is because the center of gravity of the pole increases the amount of energy needed to move the cart underneath it\n\n Reward:\n Reward is 1 for every step taken, including the termination step\n\n Starting State:\n All observations are assigned a uniform random value between ±0.05\n\n Episode Termination:\n Pole Angle is more than ±12°\n Cart Position is more than ±2.4 (center of the cart reaches the edge of the display)\n Episode length is greater than 200\n Solved Requirements\n Considered solved when the average reward is greater than or equal to 195.0 over 100 consecutive trials.\n \"\"\"\n \n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second' : 50\n }\n\n def __init__(self):\n self.gravity = 9.8\n self.masscart = 1.0\n self.masspole = 0.1\n self.total_mass = (self.masspole + self.masscart)\n self.length = 0.5 # actually half the pole's length\n self.polemass_length = (self.masspole * self.length)\n self.force_mag = 10.0\n self.tau = 0.02 # seconds between state updates\n\n # Angle at which to fail the episode\n self.theta_threshold_radians = 12 * 2 * math.pi / 360\n self.x_threshold = 2.4\n\n # Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds\n high = np.array([\n self.x_threshold * 2,\n np.finfo(np.float32).max,\n self.theta_threshold_radians * 2,\n np.finfo(np.float32).max])\n\n self.action_space = spaces.Discrete(2)\n self.observation_space = spaces.Box(-high, high)\n\n self.seed()\n self.viewer = None\n self.state = None\n\n self.steps_beyond_done = None\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def step(self, action):\n assert self.action_space.contains(action), \"%r (%s) invalid\"%(action, type(action))\n state = self.state\n x, x_dot, theta, theta_dot = state\n force = self.force_mag if action==1 else -self.force_mag\n costheta = math.cos(theta)\n sintheta = math.sin(theta)\n temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass\n thetaacc = (self.gravity * sintheta - costheta* temp) / (self.length * (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass))\n xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass\n x = x + self.tau * x_dot\n x_dot = x_dot + self.tau * xacc\n theta = theta + self.tau * theta_dot\n theta_dot = theta_dot + self.tau * thetaacc\n self.state = (x,x_dot,theta,theta_dot)\n done = x < -self.x_threshold \\\n or x > self.x_threshold \\\n or theta < -self.theta_threshold_radians \\\n or theta > self.theta_threshold_radians\n done = bool(done)\n\n if not done:\n reward = 1.0\n elif self.steps_beyond_done is None:\n # Pole just fell!\n self.steps_beyond_done = 0\n reward = 1.0\n else:\n if self.steps_beyond_done == 0:\n logger.warn(\"You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.\")\n self.steps_beyond_done += 1\n reward = 0.0\n\n return np.array(self.state), reward, done, {}\n\n def reset(self):\n self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))\n self.steps_beyond_done = None\n return np.array(self.state)\n\n def render(self, mode='human'):\n screen_width = 600\n screen_height = 400\n\n world_width = self.x_threshold*2\n scale = screen_width/world_width\n carty = 100 # TOP OF CART\n polewidth = 10.0\n polelen = scale * 1.0\n cartwidth = 50.0\n cartheight = 30.0\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(screen_width, screen_height)\n l,r,t,b = -cartwidth/2, cartwidth/2, cartheight/2, -cartheight/2\n axleoffset =cartheight/4.0\n cart = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])\n self.carttrans = rendering.Transform()\n cart.add_attr(self.carttrans)\n self.viewer.add_geom(cart)\n l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2\n pole = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])\n pole.set_color(.8,.6,.4)\n self.poletrans = rendering.Transform(translation=(0, axleoffset))\n pole.add_attr(self.poletrans)\n pole.add_attr(self.carttrans)\n self.viewer.add_geom(pole)\n self.axle = rendering.make_circle(polewidth/2)\n self.axle.add_attr(self.poletrans)\n self.axle.add_attr(self.carttrans)\n self.axle.set_color(.5,.5,.8)\n self.viewer.add_geom(self.axle)\n self.track = rendering.Line((0,carty), (screen_width,carty))\n self.track.set_color(0,0,0)\n self.viewer.add_geom(self.track)\n\n if self.state is None: return None\n\n x = self.state\n cartx = x[0]*scale+screen_width/2.0 # MIDDLE OF CART\n self.carttrans.set_translation(cartx, carty)\n self.poletrans.set_rotation(-x[2])\n\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n def close(self):\n if self.viewer: self.viewer.close()\n"
] |
[
[
"numpy.finfo",
"numpy.array"
]
] |
airqj/maskrcnn-benchmark
|
[
"210d1abf24d0edf8ee43c3a49c3da9cde6dddad7",
"210d1abf24d0edf8ee43c3a49c3da9cde6dddad7"
] |
[
"tools/train_net.py",
"maskrcnn_benchmark/engine/MyInference.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nr\"\"\"\nBasic training script for PyTorch\n\"\"\"\n\n# Set up custom environment before nearly anything else is imported\n# NOTE: this should be the first import (no not reorder)\nfrom maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip\n\nimport argparse\nimport os\n\nimport torch\nfrom maskrcnn_benchmark.config import cfg\nfrom maskrcnn_benchmark.data import make_data_loader\nfrom maskrcnn_benchmark.solver import make_lr_scheduler\nfrom maskrcnn_benchmark.solver import make_optimizer\nfrom maskrcnn_benchmark.engine.inference import inference\nfrom maskrcnn_benchmark.engine.trainer import do_train\nfrom maskrcnn_benchmark.modeling.detector import build_detection_model\nfrom maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer\nfrom maskrcnn_benchmark.utils.collect_env import collect_env_info\nfrom maskrcnn_benchmark.utils.comm import synchronize, get_rank\nfrom maskrcnn_benchmark.utils.imports import import_file\nfrom maskrcnn_benchmark.utils.logger import setup_logger\nfrom maskrcnn_benchmark.utils.miscellaneous import mkdir\n\n\ndef train(cfg, local_rank, distributed):\n model = build_detection_model(cfg)\n device = torch.device(cfg.MODEL.DEVICE)\n model.to(device)\n\n optimizer = make_optimizer(cfg, model)\n scheduler = make_lr_scheduler(cfg, optimizer)\n\n if distributed:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[local_rank], output_device=local_rank,\n # this should be removed if we update BatchNorm stats\n broadcast_buffers=False,\n )\n\n arguments = {}\n arguments[\"iteration\"] = 0\n\n output_dir = cfg.OUTPUT_DIR\n\n save_to_disk = get_rank() == 0\n checkpointer = DetectronCheckpointer(\n cfg, model, optimizer, scheduler, output_dir, save_to_disk\n )\n extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)\n arguments.update(extra_checkpoint_data)\n\n data_loader = make_data_loader(\n cfg,\n is_train=True,\n is_distributed=distributed,\n start_iter=arguments[\"iteration\"],\n )\n\n checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD\n\n do_train(\n model,\n data_loader,\n optimizer,\n scheduler,\n checkpointer,\n device,\n checkpoint_period,\n arguments,\n )\n\n return model\n\n\ndef test(cfg, model, distributed):\n if distributed:\n model = model.module\n torch.cuda.empty_cache() # TODO check if it helps\n iou_types = (\"bbox\",)\n if cfg.MODEL.MASK_ON:\n iou_types = iou_types + (\"segm\",)\n output_folders = [None] * len(cfg.DATASETS.TEST)\n dataset_names = cfg.DATASETS.TEST\n if cfg.OUTPUT_DIR:\n for idx, dataset_name in enumerate(dataset_names):\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\", dataset_name)\n mkdir(output_folder)\n output_folders[idx] = output_folder\n data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)\n for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):\n inference(\n model,\n data_loader_val,\n dataset_name=dataset_name,\n iou_types=iou_types,\n box_only=cfg.MODEL.RPN_ONLY,\n device=cfg.MODEL.DEVICE,\n expected_results=cfg.TEST.EXPECTED_RESULTS,\n expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n output_folder=output_folder,\n )\n synchronize()\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch Object Detection Training\")\n parser.add_argument(\n \"--config-file\",\n default=\"\",\n metavar=\"FILE\",\n help=\"path to config file\",\n type=str,\n )\n parser.add_argument(\"--local_rank\", type=int, default=0)\n parser.add_argument(\n \"--skip-test\",\n dest=\"skip_test\",\n help=\"Do not test the final model\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n args = parser.parse_args()\n\n num_gpus = int(os.environ[\"WORLD_SIZE\"]) if \"WORLD_SIZE\" in os.environ else 1\n args.distributed = num_gpus > 1\n\n if args.distributed:\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(\n backend=\"nccl\", init_method=\"env://\"\n )\n synchronize()\n\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n\n output_dir = cfg.OUTPUT_DIR\n if output_dir:\n mkdir(output_dir)\n\n logger = setup_logger(\"maskrcnn_benchmark\", output_dir, get_rank())\n logger.info(\"Using {} GPUs\".format(num_gpus))\n logger.info(args)\n\n logger.info(\"Collecting env info (might take some time)\")\n logger.info(\"\\n\" + collect_env_info())\n\n logger.info(\"Loaded configuration file {}\".format(args.config_file))\n with open(args.config_file, \"r\") as cf:\n config_str = \"\\n\" + cf.read()\n logger.info(config_str)\n logger.info(\"Running with config:\\n{}\".format(cfg))\n\n model = train(cfg, args.local_rank, args.distributed)\n\n if not args.skip_test:\n test(cfg, model, args.distributed)\n\n\nif __name__ == \"__main__\":\n main()\n",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport datetime\nimport logging\nimport time\nimport os\n\nimport torch\nfrom tqdm import tqdm\n\nfrom maskrcnn_benchmark.data.datasets.evaluation import evaluate\nfrom ..utils.comm import is_main_process\nfrom ..utils.comm import all_gather\nfrom ..utils.comm import synchronize\n\n\ndef compute_on_dataset(model, data_loader, device):\n model.eval()\n results_dict = {}\n cpu_device = torch.device(\"cpu\")\n for i, batch in enumerate(tqdm(data_loader)):\n images, image_ids = batch\n images = images.to(device)\n with torch.no_grad():\n output = model(images)\n output = [o.to(cpu_device) for o in output]\n results_dict.update(\n {img_id: result for img_id, result in zip(image_ids, output)}\n )\n return results_dict\n\n\ndef _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):\n all_predictions = all_gather(predictions_per_gpu)\n if not is_main_process():\n return\n # merge the list of dicts\n predictions = {}\n for p in all_predictions:\n predictions.update(p)\n # convert a dict where the key is the index in a list\n image_ids = list(sorted(predictions.keys()))\n if len(image_ids) != image_ids[-1] + 1:\n logger = logging.getLogger(\"maskrcnn_benchmark.inference\")\n logger.warning(\n \"Number of images that were gathered from multiple processes is not \"\n \"a contiguous set. Some images might be missing from the evaluation\"\n )\n\n # convert to a list\n predictions = [predictions[i] for i in image_ids]\n return predictions\n\n\ndef inference(\n model,\n data_loader,\n dataset_name,\n iou_types=(\"bbox\",),\n box_only=False,\n device=\"cuda\",\n):\n # convert to a torch.device for efficiency\n device = torch.device(device)\n num_devices = (\n torch.distributed.get_world_size()\n if torch.distributed.is_initialized()\n else 1\n )\n logger = logging.getLogger(\"maskrcnn_benchmark.inference\")\n dataset = data_loader.dataset\n logger.info(\"Start evaluation on {} dataset({} images).\".format(dataset_name, len(dataset)))\n start_time = time.time()\n predictions = compute_on_dataset(model, data_loader, device)\n # wait for all processes to complete before measuring the time\n synchronize()\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=total_time))\n logger.info(\n \"Total inference time: {} ({} s / img per device, on {} devices)\".format(\n total_time_str, total_time * num_devices / len(dataset), num_devices\n )\n )\n\n predictions = _accumulate_predictions_from_multiple_gpus(predictions)\n return predictions\n"
] |
[
[
"torch.device",
"torch.distributed.init_process_group",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.set_device",
"torch.cuda.empty_cache"
],
[
"torch.device",
"torch.no_grad",
"torch.distributed.is_initialized",
"torch.distributed.get_world_size"
]
] |
WolfNiu/AdversarialDialogue
|
[
"de406d3b624f9500e79bef7c5bc10e24376fefa8"
] |
[
"src/basic/ubuntu_data_generator.py"
] |
[
"\n# coding: utf-8\n\n# In[1]:\n\n\n\"\"\"\nTodo: combine read_lines, load_pickle, etc... to one single function load_file(),\n and use if statement to see which suffix the file has. Also keep an optional param\n suffix=None just in case we want to force it to load with a certain format\n\n\"\"\"\nfrom random import shuffle\nimport numpy as np\n\n\n# In[2]:\n\n\ndef to_str(batch_examples):\n str_batch_examples = [\n [\" \".join([str(token) for token in turn]) \n for turn in example]\n for example in batch_examples]\n return str_batch_examples\n\n\n# In[3]:\n\n\nclass DataGenerator(object):\n def __init__(self,\n norm_dialogues,\n adv_dialogues=None,\n feed_both_examples=False,\n is_training=True, \n batch_size=192,\n max_dialogue_length=3):\n self.norm_dialogues = norm_dialogues\n self.adv_dialogues = adv_dialogues\n self.feed_both_examples = feed_both_examples\n self.is_training = is_training\n \n if self.feed_both_examples: # if we are not feeding both examples\n assert batch_size % 2 == 0\n assert norm_dilogues is not None, \"Error: feeding both examples, need norm dialogues too.\"\n self.batch_size = batch_size // 2 # because we concatenate pos + neg examples in each batch\n else:\n self.batch_size = batch_size\n \n self.max_dialogue_length = max_dialogue_length\n \n def batch_generator(self): \n print(f\"There are {len(self.norm_dialogues)} dialogues.\")\n \n turn_lengths_lst = [\n len(turn) \n for dialogue in self.norm_dialogues\n for turn in dialogue]\n \n if not self.is_training:\n print(\"We are testing ==> no length threshold is applied.\")\n \n length_threshold = int(np.percentile(turn_lengths_lst, 90)) if self.is_training else max(turn_lengths_lst)\n print(\"Length threshold:\", length_threshold)\n print(\"All turns longer than this will be truncated to this length.\")\n \n # Truncate based on length threshold\n norm_dialogues = [\n [turn[(-length_threshold):] # only keeping the last length_threshold tokens\n for turn in dialogue] \n for dialogue in self.norm_dialogues\n if len(dialogue) >= self.max_dialogue_length]\n \n if self.norm_dialogues is not None:\n adv_dialogues = [\n [turn[(-length_threshold):] # only keeping the last length_threshold tokens\n for turn in dialogue] \n for dialogue in self.adv_dialogues\n if len(dialogue) >= self.max_dialogue_length]\n \n num_dialogues = len(norm_dialogues)\n print(f\"There are {num_dialogues} dialogues left.\")\n \n assert num_dialogues >= self.batch_size, \"Error: Number of dialogues less than batch_size\"\n \n if self.is_training: # only shuffle dataset if we are training\n if self.adv_dialogues is None:\n shuffle(norm_dialogues)\n else:\n zipped = list(zip(norm_dialogues, adv_dialogues))\n shuffle(zipped)\n norm_dialogues, adv_dialogues = list(zip(*zipped))\n \n dialogue_indices = list(range(self.batch_size)) # initialize dialogue indices\n next_dialogue_index = self.batch_size # initialize the index of the next dialogue\n start_turn_indices = [0] * self.batch_size # track which turn we are at\n while True:\n norm_batch_examples = [\n norm_dialogues[dialogue_index][start_turn_index: (start_turn_index + self.max_dialogue_length)] \n for (dialogue_index, start_turn_index) \n in zip(dialogue_indices, start_turn_indices)]\n \n if self.adv_dialogues is not None:\n # Avoid modifying target turn\n adv_batch_examples = [\n (adv_dialogues[dialogue_index][start_turn_index: (start_turn_index + self.max_dialogue_length - 1)] \n + norm_dialogues[dialogue_index][(start_turn_index + self.max_dialogue_length - 1): (start_turn_index + self.max_dialogue_length)])\n for (dialogue_index, start_turn_index) \n in zip(dialogue_indices, start_turn_indices)]\n \n if self.feed_both_examples:\n feed_dialogue_indices = dialogue_indices + dialogue_indices\n feed_start_turn_indices = start_turn_indices + start_turn_indices\n feed_batch_examples = norm_batch_examples + adv_batch_examples\n else:\n feed_dialogue_indices = dialogue_indices\n feed_start_turn_indices = start_turn_indices\n feed_batch_examples = adv_batch_examples\n \n turn_lengths_lst = [\n [len(turn) for turn in example]\n for example in feed_batch_examples]\n \n yield (feed_dialogue_indices,\n feed_start_turn_indices, \n to_str(feed_batch_examples), \n turn_lengths_lst)\n \n for i in range(self.batch_size):\n start_turn_indices[i] += 1 # move on to the next example\n # If we've finished the current dialogue\n if start_turn_indices[i] + self.max_dialogue_length > len(norm_dialogues[dialogue_indices[i]]):\n dialogue_indices[i] = next_dialogue_index # move on to the next dialogue\n start_turn_indices[i] = 0 # reset example index\n next_dialogue_index += 1\n if next_dialogue_index >= num_dialogues:\n \"\"\"todo: let the remaining dialgoues finish when out of new dialgoues\"\"\"\n yield None\n return\n\n"
] |
[
[
"numpy.percentile"
]
] |
vipermu/dgl
|
[
"c9ac6c9889423019977e431c8b74a7b6c70cdc01",
"c9ac6c9889423019977e431c8b74a7b6c70cdc01"
] |
[
"examples/pytorch/rgcn/link_predict.py",
"examples/pytorch/rgcn/entity_classify.py"
] |
[
"\"\"\"\nModeling Relational Data with Graph Convolutional Networks\nPaper: https://arxiv.org/abs/1703.06103\nCode: https://github.com/MichSchli/RelationPrediction\n\nDifference compared to MichSchli/RelationPrediction\n* Report raw metrics instead of filtered metrics.\n* By default, we use uniform edge sampling instead of neighbor-based edge\n sampling used in author's code. In practice, we find it achieves similar MRR\n probably because the model only uses one GNN layer so messages are propagated\n among immediate neighbors. User could specify \"--edge-sampler=neighbor\" to switch\n to neighbor-based edge sampling.\n\"\"\"\n\nimport argparse\nimport numpy as np\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport random\nfrom dgl.contrib.data import load_data\nfrom dgl.nn.pytorch import RelGraphConv\n\nfrom model import BaseRGCN\n\nimport utils\n\nclass EmbeddingLayer(nn.Module):\n def __init__(self, num_nodes, h_dim):\n super(EmbeddingLayer, self).__init__()\n self.embedding = torch.nn.Embedding(num_nodes, h_dim)\n\n def forward(self, g, h, r, norm):\n return self.embedding(h.squeeze())\n\nclass RGCN(BaseRGCN):\n def build_input_layer(self):\n return EmbeddingLayer(self.num_nodes, self.h_dim)\n\n def build_hidden_layer(self, idx):\n act = F.relu if idx < self.num_hidden_layers - 1 else None\n return RelGraphConv(self.h_dim, self.h_dim, self.num_rels, \"bdd\",\n self.num_bases, activation=act, self_loop=True,\n dropout=self.dropout)\n\nclass LinkPredict(nn.Module):\n def __init__(self, in_dim, h_dim, num_rels, num_bases=-1,\n num_hidden_layers=1, dropout=0, use_cuda=False, reg_param=0):\n super(LinkPredict, self).__init__()\n self.rgcn = RGCN(in_dim, h_dim, h_dim, num_rels * 2, num_bases,\n num_hidden_layers, dropout, use_cuda)\n self.reg_param = reg_param\n self.w_relation = nn.Parameter(torch.Tensor(num_rels, h_dim))\n nn.init.xavier_uniform_(self.w_relation,\n gain=nn.init.calculate_gain('relu'))\n\n def calc_score(self, embedding, triplets):\n # DistMult\n s = embedding[triplets[:,0]]\n r = self.w_relation[triplets[:,1]]\n o = embedding[triplets[:,2]]\n score = torch.sum(s * r * o, dim=1)\n return score\n\n def forward(self, g, h, r, norm):\n return self.rgcn.forward(g, h, r, norm)\n\n def regularization_loss(self, embedding):\n return torch.mean(embedding.pow(2)) + torch.mean(self.w_relation.pow(2))\n\n def get_loss(self, g, embed, triplets, labels):\n # triplets is a list of data samples (positive and negative)\n # each row in the triplets is a 3-tuple of (source, relation, destination)\n score = self.calc_score(embed, triplets)\n predict_loss = F.binary_cross_entropy_with_logits(score, labels)\n reg_loss = self.regularization_loss(embed)\n return predict_loss + self.reg_param * reg_loss\n\ndef node_norm_to_edge_norm(g, node_norm):\n g = g.local_var()\n # convert to edge norm\n g.ndata['norm'] = node_norm\n g.apply_edges(lambda edges : {'norm' : edges.dst['norm']})\n return g.edata['norm']\n\ndef main(args):\n # load graph data\n data = load_data(args.dataset)\n num_nodes = data.num_nodes\n train_data = data.train\n valid_data = data.valid\n test_data = data.test\n num_rels = data.num_rels\n\n # check cuda\n use_cuda = args.gpu >= 0 and torch.cuda.is_available()\n if use_cuda:\n torch.cuda.set_device(args.gpu)\n\n # create model\n model = LinkPredict(num_nodes,\n args.n_hidden,\n num_rels,\n num_bases=args.n_bases,\n num_hidden_layers=args.n_layers,\n dropout=args.dropout,\n use_cuda=use_cuda,\n reg_param=args.regularization)\n\n # validation and testing triplets\n valid_data = torch.LongTensor(valid_data)\n test_data = torch.LongTensor(test_data)\n\n # build test graph\n test_graph, test_rel, test_norm = utils.build_test_graph(\n num_nodes, num_rels, train_data)\n test_deg = test_graph.in_degrees(\n range(test_graph.number_of_nodes())).float().view(-1,1)\n test_node_id = torch.arange(0, num_nodes, dtype=torch.long).view(-1, 1)\n test_rel = torch.from_numpy(test_rel)\n test_norm = node_norm_to_edge_norm(test_graph, torch.from_numpy(test_norm).view(-1, 1))\n\n if use_cuda:\n model.cuda()\n\n # build adj list and calculate degrees for sampling\n adj_list, degrees = utils.get_adj_and_degrees(num_nodes, train_data)\n\n # optimizer\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n\n model_state_file = 'model_state.pth'\n forward_time = []\n backward_time = []\n\n # training loop\n print(\"start training...\")\n\n epoch = 0\n best_mrr = 0\n while True:\n model.train()\n epoch += 1\n\n # perform edge neighborhood sampling to generate training graph and data\n g, node_id, edge_type, node_norm, data, labels = \\\n utils.generate_sampled_graph_and_labels(\n train_data, args.graph_batch_size, args.graph_split_size,\n num_rels, adj_list, degrees, args.negative_sample,\n args.edge_sampler)\n print(\"Done edge sampling\")\n\n # set node/edge feature\n node_id = torch.from_numpy(node_id).view(-1, 1).long()\n edge_type = torch.from_numpy(edge_type)\n edge_norm = node_norm_to_edge_norm(g, torch.from_numpy(node_norm).view(-1, 1))\n data, labels = torch.from_numpy(data), torch.from_numpy(labels)\n deg = g.in_degrees(range(g.number_of_nodes())).float().view(-1, 1)\n if use_cuda:\n node_id, deg = node_id.cuda(), deg.cuda()\n edge_type, edge_norm = edge_type.cuda(), edge_norm.cuda()\n data, labels = data.cuda(), labels.cuda()\n\n t0 = time.time()\n embed = model(g, node_id, edge_type, edge_norm)\n loss = model.get_loss(g, embed, data, labels)\n t1 = time.time()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_norm) # clip gradients\n optimizer.step()\n t2 = time.time()\n\n forward_time.append(t1 - t0)\n backward_time.append(t2 - t1)\n print(\"Epoch {:04d} | Loss {:.4f} | Best MRR {:.4f} | Forward {:.4f}s | Backward {:.4f}s\".\n format(epoch, loss.item(), best_mrr, forward_time[-1], backward_time[-1]))\n\n optimizer.zero_grad()\n\n # validation\n if epoch % args.evaluate_every == 0:\n # perform validation on CPU because full graph is too large\n if use_cuda:\n model.cpu()\n model.eval()\n print(\"start eval\")\n embed = model(test_graph, test_node_id, test_rel, test_norm)\n mrr = utils.calc_mrr(embed, model.w_relation, valid_data,\n hits=[1, 3, 10], eval_bz=args.eval_batch_size)\n # save best model\n if mrr < best_mrr:\n if epoch >= args.n_epochs:\n break\n else:\n best_mrr = mrr\n torch.save({'state_dict': model.state_dict(), 'epoch': epoch},\n model_state_file)\n if use_cuda:\n model.cuda()\n\n print(\"training done\")\n print(\"Mean forward time: {:4f}s\".format(np.mean(forward_time)))\n print(\"Mean Backward time: {:4f}s\".format(np.mean(backward_time)))\n\n print(\"\\nstart testing:\")\n # use best model checkpoint\n checkpoint = torch.load(model_state_file)\n if use_cuda:\n model.cpu() # test on CPU\n model.eval()\n model.load_state_dict(checkpoint['state_dict'])\n print(\"Using best epoch: {}\".format(checkpoint['epoch']))\n embed = model(test_graph, test_node_id, test_rel, test_norm)\n utils.calc_mrr(embed, model.w_relation, test_data,\n hits=[1, 3, 10], eval_bz=args.eval_batch_size)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='RGCN')\n parser.add_argument(\"--dropout\", type=float, default=0.2,\n help=\"dropout probability\")\n parser.add_argument(\"--n-hidden\", type=int, default=500,\n help=\"number of hidden units\")\n parser.add_argument(\"--gpu\", type=int, default=-1,\n help=\"gpu\")\n parser.add_argument(\"--lr\", type=float, default=1e-2,\n help=\"learning rate\")\n parser.add_argument(\"--n-bases\", type=int, default=100,\n help=\"number of weight blocks for each relation\")\n parser.add_argument(\"--n-layers\", type=int, default=2,\n help=\"number of propagation rounds\")\n parser.add_argument(\"--n-epochs\", type=int, default=6000,\n help=\"number of minimum training epochs\")\n parser.add_argument(\"-d\", \"--dataset\", type=str, required=True,\n help=\"dataset to use\")\n parser.add_argument(\"--eval-batch-size\", type=int, default=500,\n help=\"batch size when evaluating\")\n parser.add_argument(\"--regularization\", type=float, default=0.01,\n help=\"regularization weight\")\n parser.add_argument(\"--grad-norm\", type=float, default=1.0,\n help=\"norm to clip gradient to\")\n parser.add_argument(\"--graph-batch-size\", type=int, default=30000,\n help=\"number of edges to sample in each iteration\")\n parser.add_argument(\"--graph-split-size\", type=float, default=0.5,\n help=\"portion of edges used as positive sample\")\n parser.add_argument(\"--negative-sample\", type=int, default=10,\n help=\"number of negative samples per positive sample\")\n parser.add_argument(\"--evaluate-every\", type=int, default=500,\n help=\"perform evaluation every n epochs\")\n parser.add_argument(\"--edge-sampler\", type=str, default=\"uniform\",\n help=\"type of edge sampler: 'uniform' or 'neighbor'\")\n\n args = parser.parse_args()\n print(args)\n main(args)\n",
"\"\"\"\nModeling Relational Data with Graph Convolutional Networks\nPaper: https://arxiv.org/abs/1703.06103\nCode: https://github.com/tkipf/relational-gcn\n\nDifference compared to tkipf/relation-gcn\n* l2norm applied to all weights\n* remove nodes that won't be touched\n\"\"\"\n\nimport argparse\nimport numpy as np\nimport time\nimport torch\nimport torch.nn.functional as F\nfrom dgl import DGLGraph\nfrom dgl.nn.pytorch import RelGraphConv\nfrom dgl.contrib.data import load_data\nfrom functools import partial\n\nfrom model import BaseRGCN\n\nclass EntityClassify(BaseRGCN):\n def create_features(self):\n features = torch.arange(self.num_nodes)\n if self.use_cuda:\n features = features.cuda()\n return features\n\n def build_input_layer(self):\n return RelGraphConv(self.num_nodes, self.h_dim, self.num_rels, \"basis\",\n self.num_bases, activation=F.relu, self_loop=self.use_self_loop,\n dropout=self.dropout)\n\n def build_hidden_layer(self, idx):\n return RelGraphConv(self.h_dim, self.h_dim, self.num_rels, \"basis\",\n self.num_bases, activation=F.relu, self_loop=self.use_self_loop,\n dropout=self.dropout)\n\n def build_output_layer(self):\n return RelGraphConv(self.h_dim, self.out_dim, self.num_rels, \"basis\",\n self.num_bases, activation=partial(F.softmax, dim=1),\n self_loop=self.use_self_loop)\n\ndef main(args):\n # load graph data\n data = load_data(args.dataset, bfs_level=args.bfs_level, relabel=args.relabel)\n num_nodes = data.num_nodes\n num_rels = data.num_rels\n num_classes = data.num_classes\n labels = data.labels\n train_idx = data.train_idx\n test_idx = data.test_idx\n\n # split dataset into train, validate, test\n if args.validation:\n val_idx = train_idx[:len(train_idx) // 5]\n train_idx = train_idx[len(train_idx) // 5:]\n else:\n val_idx = train_idx\n\n # since the nodes are featureless, the input feature is then the node id.\n feats = torch.arange(num_nodes)\n\n # edge type and normalization factor\n edge_type = torch.from_numpy(data.edge_type)\n edge_norm = torch.from_numpy(data.edge_norm).unsqueeze(1)\n labels = torch.from_numpy(labels).view(-1)\n\n # check cuda\n use_cuda = args.gpu >= 0 and torch.cuda.is_available()\n if use_cuda:\n torch.cuda.set_device(args.gpu)\n feats = feats.cuda()\n edge_type = edge_type.cuda()\n edge_norm = edge_norm.cuda()\n labels = labels.cuda()\n\n # create graph\n g = DGLGraph()\n g.add_nodes(num_nodes)\n g.add_edges(data.edge_src, data.edge_dst)\n\n # create model\n model = EntityClassify(len(g),\n args.n_hidden,\n num_classes,\n num_rels,\n num_bases=args.n_bases,\n num_hidden_layers=args.n_layers - 2,\n dropout=args.dropout,\n use_self_loop=args.use_self_loop,\n use_cuda=use_cuda)\n\n if use_cuda:\n model.cuda()\n\n # optimizer\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2norm)\n\n # training loop\n print(\"start training...\")\n forward_time = []\n backward_time = []\n model.train()\n for epoch in range(args.n_epochs):\n optimizer.zero_grad()\n t0 = time.time()\n logits = model(g, feats, edge_type, edge_norm)\n loss = F.cross_entropy(logits[train_idx], labels[train_idx])\n t1 = time.time()\n loss.backward()\n optimizer.step()\n t2 = time.time()\n\n forward_time.append(t1 - t0)\n backward_time.append(t2 - t1)\n print(\"Epoch {:05d} | Train Forward Time(s) {:.4f} | Backward Time(s) {:.4f}\".\n format(epoch, forward_time[-1], backward_time[-1]))\n train_acc = torch.sum(logits[train_idx].argmax(dim=1) == labels[train_idx]).item() / len(train_idx)\n val_loss = F.cross_entropy(logits[val_idx], labels[val_idx])\n val_acc = torch.sum(logits[val_idx].argmax(dim=1) == labels[val_idx]).item() / len(val_idx)\n print(\"Train Accuracy: {:.4f} | Train Loss: {:.4f} | Validation Accuracy: {:.4f} | Validation loss: {:.4f}\".\n format(train_acc, loss.item(), val_acc, val_loss.item()))\n print()\n\n model.eval()\n logits = model.forward(g, feats, edge_type, edge_norm)\n test_loss = F.cross_entropy(logits[test_idx], labels[test_idx])\n test_acc = torch.sum(logits[test_idx].argmax(dim=1) == labels[test_idx]).item() / len(test_idx)\n print(\"Test Accuracy: {:.4f} | Test loss: {:.4f}\".format(test_acc, test_loss.item()))\n print()\n\n print(\"Mean forward time: {:4f}\".format(np.mean(forward_time[len(forward_time) // 4:])))\n print(\"Mean backward time: {:4f}\".format(np.mean(backward_time[len(backward_time) // 4:])))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='RGCN')\n parser.add_argument(\"--dropout\", type=float, default=0,\n help=\"dropout probability\")\n parser.add_argument(\"--n-hidden\", type=int, default=16,\n help=\"number of hidden units\")\n parser.add_argument(\"--gpu\", type=int, default=-1,\n help=\"gpu\")\n parser.add_argument(\"--lr\", type=float, default=1e-2,\n help=\"learning rate\")\n parser.add_argument(\"--n-bases\", type=int, default=-1,\n help=\"number of filter weight matrices, default: -1 [use all]\")\n parser.add_argument(\"--n-layers\", type=int, default=2,\n help=\"number of propagation rounds\")\n parser.add_argument(\"-e\", \"--n-epochs\", type=int, default=50,\n help=\"number of training epochs\")\n parser.add_argument(\"-d\", \"--dataset\", type=str, required=True,\n help=\"dataset to use\")\n parser.add_argument(\"--l2norm\", type=float, default=0,\n help=\"l2 norm coef\")\n parser.add_argument(\"--relabel\", default=False, action='store_true',\n help=\"remove untouched nodes and relabel\")\n parser.add_argument(\"--use-self-loop\", default=False, action='store_true',\n help=\"include self feature as a special relation\")\n fp = parser.add_mutually_exclusive_group(required=False)\n fp.add_argument('--validation', dest='validation', action='store_true')\n fp.add_argument('--testing', dest='validation', action='store_false')\n parser.set_defaults(validation=True)\n\n args = parser.parse_args()\n print(args)\n args.bfs_level = args.n_layers + 1 # pruning used nodes for memory\n main(args)\n"
] |
[
[
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.arange",
"numpy.mean",
"torch.from_numpy",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.LongTensor",
"torch.load",
"torch.nn.init.calculate_gain",
"torch.Tensor",
"torch.nn.Embedding",
"torch.sum"
],
[
"torch.arange",
"torch.from_numpy",
"torch.cuda.set_device",
"torch.nn.functional.cross_entropy",
"torch.cuda.is_available"
]
] |
Qin-J/Multi-site-transfer-classification-of-major-depressive-disorder
|
[
"f6af292388ec83a9851a2254f38e8d90adfe4e6c"
] |
[
"graph.py"
] |
[
"# Copyright (c) 2016 Michaël Defferrard\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n\nimport sklearn.metrics\nimport sklearn.neighbors\nimport scipy.sparse\nimport scipy.sparse.linalg\nimport scipy.spatial.distance\nimport numpy as np\n\n\ndef grid(m, dtype=np.float32):\n \"\"\"Return the embedding of a grid graph.\"\"\"\n M = m**2\n x = np.linspace(0, 1, m, dtype=dtype)\n y = np.linspace(0, 1, m, dtype=dtype)\n xx, yy = np.meshgrid(x, y)\n z = np.empty((M, 2), dtype)\n z[:, 0] = xx.reshape(M)\n z[:, 1] = yy.reshape(M)\n return z\n\n\ndef distance_scipy_spatial(z, k=4, metric='euclidean'):\n \"\"\"Compute exact pairwise distances.\"\"\"\n d = scipy.spatial.distance.pdist(z, metric)\n d = scipy.spatial.distance.squareform(d)\n # k-NN graph.\n idx = np.argsort(d)[:, 1:k+1]\n d.sort()\n d = d[:, 1:k+1]\n return d, idx\n\n\ndef distance_sklearn_metrics(z, k=4, metric='euclidean'):\n \"\"\"Compute exact pairwise distances.\"\"\"\n d = sklearn.metrics.pairwise.pairwise_distances(\n z, metric=metric, n_jobs=-2)\n # k-NN graph.\n idx = np.argsort(d)[:, 1:k+1]\n d.sort()\n d = d[:, 1:k+1]\n return d, idx\n\n\ndef distance_lshforest(z, k=4, metric='cosine'):\n \"\"\"Return an approximation of the k-nearest cosine distances.\"\"\"\n assert metric is 'cosine'\n # lshf = sklearn.neighbors.LSHForest()\n # lshf.fit(z)\n # dist, idx = lshf.kneighbors(z, n_neighbors=k+1)\n nbrs = sklearn.neighbors.NearestNeighbors(n_neighbors=k+1, algorithm='ball_tree').fit(z)\n dist, idx = nbrs.kneighbors(z)\n assert dist.min() < 1e-10\n dist[dist < 0] = 0\n return dist, idx\n\n\ndef adjacency(dist, idx):\n \"\"\"Return the adjacency matrix of a kNN graph.\"\"\"\n M, k = dist.shape\n assert M, k == idx.shape\n assert dist.min() >= 0\n\n # Weights.\n sigma2 = np.mean(dist[:, -1])**2\n dist = np.exp(- dist**2 / sigma2)\n\n # Weight matrix.\n I = np.arange(0, M).repeat(k)\n J = idx.reshape(M*k)\n V = dist.reshape(M*k)\n W = scipy.sparse.coo_matrix((V, (I, J)), shape=(M, M))\n\n # No self-connections.\n W.setdiag(0)\n\n # Non-directed graph.\n bigger = W.T > W\n W = W - W.multiply(bigger) + W.T.multiply(bigger)\n\n assert W.nnz % 2 == 0\n assert np.abs(W - W.T).mean() < 1e-10\n assert type(W) is scipy.sparse.csr.csr_matrix\n return W\n\n\ndef replace_random_edges(A, noise_level):\n \"\"\"Replace randomly chosen edges by random edges.\"\"\"\n M, M = A.shape\n n = int(noise_level * A.nnz // 2)\n\n indices = np.random.permutation(A.nnz//2)[:n]\n rows = np.random.randint(0, M, n)\n cols = np.random.randint(0, M, n)\n vals = np.random.uniform(0, 1, n)\n assert len(indices) == len(rows) == len(cols) == len(vals)\n\n A_coo = scipy.sparse.triu(A, format='coo')\n assert A_coo.nnz == A.nnz // 2\n assert A_coo.nnz >= n\n A = A.tolil()\n\n for idx, row, col, val in zip(indices, rows, cols, vals):\n old_row = A_coo.row[idx]\n old_col = A_coo.col[idx]\n\n A[old_row, old_col] = 0\n A[old_col, old_row] = 0\n A[row, col] = 1\n A[col, row] = 1\n\n A.setdiag(0)\n A = A.tocsr()\n A.eliminate_zeros()\n return A\n\n\ndef laplacian(W, normalized=True):\n \"\"\"Return the Laplacian of the weigth matrix.\"\"\"\n\n # Degree matrix.\n d = W.sum(axis=0)\n\n # Laplacian matrix.\n if not normalized:\n D = scipy.sparse.diags(d.A.squeeze(), 0)\n L = D - W\n else:\n d += np.spacing(np.array(0, W.dtype))\n d = 1 / np.sqrt(d)\n D = scipy.sparse.diags(d.A.squeeze(), 0)\n I = scipy.sparse.identity(d.size, dtype=W.dtype)\n L = I - D * W * D\n\n # assert np.abs(L - L.T).mean() < 1e-9\n assert type(L) is scipy.sparse.csr.csr_matrix\n return L\n\n\ndef lmax(L, normalized=True):\n \"\"\"Upper-bound on the spectrum.\"\"\"\n if normalized:\n return 2\n else:\n return scipy.sparse.linalg.eigsh(\n L, k=1, which='LM', return_eigenvectors=False)[0]\n\n\ndef fourier(L, algo='eigh', k=1):\n \"\"\"Return the Fourier basis, i.e. the EVD of the Laplacian.\"\"\"\n\n def sort(lamb, U):\n idx = lamb.argsort()\n return lamb[idx], U[:, idx]\n\n if algo is 'eig':\n lamb, U = np.linalg.eig(L.toarray())\n lamb, U = sort(lamb, U)\n elif algo is 'eigh':\n lamb, U = np.linalg.eigh(L.toarray())\n elif algo is 'eigs':\n lamb, U = scipy.sparse.linalg.eigs(L, k=k, which='SM')\n lamb, U = sort(lamb, U)\n elif algo is 'eigsh':\n lamb, U = scipy.sparse.linalg.eigsh(L, k=k, which='SM')\n\n return lamb, U\n\n\ndef rescale_L(L, lmax=2):\n \"\"\"Rescale the Laplacian eigenvalues in [-1,1].\"\"\"\n M, M = L.shape\n I = scipy.sparse.identity(M, format='csr', dtype=L.dtype)\n L /= lmax / 2\n L -= I\n return L\n\ndef rescale_adj(W):\n \n # self connected\n W.setdiag(1)\n \n # Degree matrix.\n d = W.sum(axis=0)\n\n d += np.spacing(np.array(0, W.dtype))\n d = 1 / np.sqrt(d)\n D = scipy.sparse.diags(d.A.squeeze(), 0)\n W = D * W * D\n\n # assert np.abs(L - L.T).mean() < 1e-9\n assert type(W) is scipy.sparse.csr.csr_matrix\n return W\n \n\n\ndef chebyshev(L, X, K):\n \"\"\"Return T_k X where T_k are the Chebyshev polynomials of order up to K.\n Complexity is O(KMN).\"\"\"\n M, N = X.shape\n assert L.dtype == X.dtype\n\n # L = rescale_L(L, lmax)\n # Xt = T @ X: MxM @ MxN.\n Xt = np.empty((K, M, N), L.dtype)\n # Xt_0 = T_0 X = I X = X.\n Xt[0, ...] = X\n # Xt_1 = T_1 X = L X.\n if K > 1:\n Xt[1, ...] = L.dot(X)\n # Xt_k = 2 L Xt_k-1 - Xt_k-2.\n for k in range(2, K):\n Xt[k, ...] = 2 * L.dot(Xt[k-1, ...]) - Xt[k-2, ...]\n return Xt\n"
] |
[
[
"numpy.array",
"numpy.empty",
"numpy.random.permutation",
"numpy.exp",
"numpy.mean",
"numpy.random.uniform",
"numpy.random.randint",
"numpy.arange",
"numpy.argsort",
"numpy.sqrt",
"numpy.abs",
"numpy.linspace",
"numpy.meshgrid"
]
] |
happy-jihye/alias-free-gan-encoder
|
[
"d54df2fc1ee57b2d3259e7f64a79b7fbdc6a4631"
] |
[
"model.py"
] |
[
"import math\r\n\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.nn import functional as F\r\n\r\nfrom stylegan2.model import PixelNorm, EqualLinear, EqualConv2d, ConvLayer, ResBlock\r\nfrom stylegan2.op import conv2d_gradfix, upfirdn2d, fused_leaky_relu\r\n\r\n\r\ndef polyval(coef, x):\r\n res = 0\r\n\r\n for i, c in enumerate(coef):\r\n res += c * x ** (len(coef) - i - 1)\r\n\r\n return res\r\n\r\n\r\ndef bessel_j1(x):\r\n rp = [\r\n -8.99971225705559398224e8,\r\n 4.52228297998194034323e11,\r\n -7.27494245221818276015e13,\r\n 3.68295732863852883286e15,\r\n ]\r\n rq = [\r\n 1.00000000000000000000e0,\r\n 6.20836478118054335476e2,\r\n 2.56987256757748830383e5,\r\n 8.35146791431949253037e7,\r\n 2.21511595479792499675e10,\r\n 4.74914122079991414898e12,\r\n 7.84369607876235854894e14,\r\n 8.95222336184627338078e16,\r\n 5.32278620332680085395e18,\r\n ]\r\n z1 = 1.46819706421238932572e1\r\n z2 = 4.92184563216946036703e1\r\n\r\n pp = [\r\n 7.62125616208173112003e-4,\r\n 7.31397056940917570436e-2,\r\n 1.12719608129684925192e0,\r\n 5.11207951146807644818e0,\r\n 8.42404590141772420927e0,\r\n 5.21451598682361504063e0,\r\n 1.00000000000000000254e0,\r\n ]\r\n pq = [\r\n 5.71323128072548699714e-4,\r\n 6.88455908754495404082e-2,\r\n 1.10514232634061696926e0,\r\n 5.07386386128601488557e0,\r\n 8.39985554327604159757e0,\r\n 5.20982848682361821619e0,\r\n 9.99999999999999997461e-1,\r\n ]\r\n qp = [\r\n 5.10862594750176621635e-2,\r\n 4.98213872951233449420e0,\r\n 7.58238284132545283818e1,\r\n 3.66779609360150777800e2,\r\n 7.10856304998926107277e2,\r\n 5.97489612400613639965e2,\r\n 2.11688757100572135698e2,\r\n 2.52070205858023719784e1,\r\n ]\r\n qq = [\r\n 1.00000000000000000000e0,\r\n 7.42373277035675149943e1,\r\n 1.05644886038262816351e3,\r\n 4.98641058337653607651e3,\r\n 9.56231892404756170795e3,\r\n 7.99704160447350683650e3,\r\n 2.82619278517639096600e3,\r\n 3.36093607810698293419e2,\r\n ]\r\n\r\n x = torch.as_tensor(x, dtype=torch.float64)\r\n\r\n z = x * x\r\n less5 = polyval(rp, z) / polyval(rq, z)\r\n less5 = less5 * x * (z - z1) * (z - z2)\r\n\r\n w = 5 / x\r\n z = w * w\r\n p = polyval(pp, z) / polyval(pq, z)\r\n q = polyval(qp, z) / polyval(qq, z)\r\n xn = x - (3 / 4 * math.pi)\r\n p = p * torch.cos(xn) - w * q * torch.sin(xn)\r\n more5 = p * math.sqrt(2 / math.pi) / torch.sqrt(x)\r\n\r\n y = torch.empty_like(x)\r\n flag = torch.abs(x) < 5\r\n y[flag] = less5[flag]\r\n y[~flag] = more5[~flag]\r\n\r\n return y\r\n\r\n\r\ndef jinc(x):\r\n pix = math.pi * x\r\n return 2 * bessel_j1(pix) / pix\r\n\r\n\r\ndef kaiser_attenuation(n_taps, f_h, sr):\r\n df = (2 * f_h) / (sr / 2)\r\n\r\n return 2.285 * (n_taps - 1) * math.pi * df + 7.95\r\n\r\n\r\ndef kaiser_beta(n_taps, f_h, sr):\r\n atten = kaiser_attenuation(n_taps, f_h, sr)\r\n\r\n if atten > 50:\r\n return 0.1102 * (atten - 8.7)\r\n\r\n elif 50 >= atten >= 21:\r\n return 0.5842 * (atten - 21) ** 0.4 + 0.07886 * (atten - 21)\r\n\r\n else:\r\n return 0.0\r\n\r\n\r\ndef kaiser_window(n_taps, f_h, sr):\r\n beta = kaiser_beta(n_taps, f_h, sr)\r\n ind = torch.arange(n_taps) - (n_taps - 1) / 2\r\n\r\n return torch.i0(beta * torch.sqrt(1 - ((2 * ind) / (n_taps - 1)) ** 2)) / torch.i0(\r\n torch.tensor(beta)\r\n )\r\n\r\n\r\ndef lowpass_filter(n_taps, cutoff, band_half, sr, use_jinc=False):\r\n window = kaiser_window(n_taps, band_half, sr)\r\n ind = torch.arange(n_taps) - (n_taps - 1) / 2\r\n\r\n if use_jinc:\r\n ind_sq = ind.unsqueeze(1) ** 2\r\n window = window.unsqueeze(1)\r\n coeff = jinc(torch.sqrt(ind_sq + ind_sq.T))\r\n lowpass = (2 * cutoff / sr) ** 2 * coeff * window * window.T\r\n lowpass = lowpass.to(torch.float32)\r\n\r\n else:\r\n lowpass = 2 * cutoff / sr * torch.sinc(2 * cutoff / sr * ind) * window\r\n\r\n return lowpass\r\n\r\n\r\ndef filter_parameters(\r\n n_layer,\r\n n_critical,\r\n sr_max,\r\n cutoff_0,\r\n cutoff_n,\r\n stopband_0,\r\n stopband_n,\r\n channel_max,\r\n channel_base,\r\n):\r\n cutoffs = []\r\n stopbands = []\r\n srs = []\r\n band_halfs = []\r\n channels = []\r\n\r\n for i in range(n_layer):\r\n f_c = cutoff_0 * (cutoff_n / cutoff_0) ** min(i / (n_layer - n_critical), 1)\r\n f_t = stopband_0 * (stopband_n / stopband_0) ** min(\r\n i / (n_layer - n_critical), 1\r\n )\r\n s_i = 2 ** math.ceil(math.log(min(2 * f_t, sr_max), 2))\r\n f_h = max(f_t, s_i / 2) - f_c\r\n c_i = min(round(channel_base / s_i), channel_max)\r\n\r\n cutoffs.append(f_c)\r\n stopbands.append(f_t)\r\n srs.append(s_i)\r\n band_halfs.append(f_h)\r\n channels.append(c_i)\r\n\r\n return {\r\n \"cutoffs\": cutoffs,\r\n \"stopbands\": stopbands,\r\n \"srs\": srs,\r\n \"band_halfs\": band_halfs,\r\n \"channels\": channels,\r\n }\r\n\r\n\r\nclass FourierFeature(nn.Module):\r\n def __init__(self, size, dim, cutoff, eps=1e-8):\r\n super().__init__()\r\n\r\n coords = torch.linspace(-0.5, 0.5, size + 1)[:-1]\r\n freqs = torch.linspace(0, cutoff, dim // 4)\r\n\r\n self.register_buffer(\"coords\", coords)\r\n self.register_buffer(\"freqs\", freqs)\r\n self.register_buffer(\r\n \"lf\", freqs.view(1, dim // 4, 1, 1) * 2 * math.pi * 2 / size\r\n )\r\n self.eps = eps\r\n\r\n def forward(self, batch_size, affine=None):\r\n coord_map = torch.ger(self.freqs, self.coords)\r\n coord_map = 2 * math.pi * coord_map\r\n size = self.coords.shape[0]\r\n coord_h = coord_map.view(self.freqs.shape[0], 1, size)\r\n coord_w = coord_h.transpose(1, 2)\r\n\r\n if affine is not None:\r\n norm = torch.norm(affine[:, :2], dim=-1, keepdim=True)\r\n affine = affine / (norm + self.eps)\r\n\r\n r_c, r_s, t_x, t_y = affine.view(\r\n affine.shape[0], 1, 1, 1, affine.shape[-1]\r\n ).unbind(-1)\r\n\r\n coord_h_orig = coord_h.unsqueeze(0)\r\n coord_w_orig = coord_w.unsqueeze(0)\r\n\r\n coord_h = -coord_w_orig * r_s + coord_h_orig * r_c - t_y * self.lf\r\n coord_w = coord_w_orig * r_c + coord_h_orig * r_s - t_x * self.lf\r\n\r\n coord_h = torch.cat((torch.sin(coord_h), torch.cos(coord_h)), 1)\r\n coord_w = torch.cat((torch.sin(coord_w), torch.cos(coord_w)), 1)\r\n\r\n coord_h = coord_h.expand(-1, -1, size, -1)\r\n coord_w = coord_w.expand(-1, -1, -1, size)\r\n coords = torch.cat((coord_h, coord_w), 1)\r\n\r\n return coords\r\n\r\n else:\r\n coord_h = torch.cat((torch.sin(coord_h), torch.cos(coord_h)), 0)\r\n coord_w = torch.cat((torch.sin(coord_w), torch.cos(coord_w)), 0)\r\n\r\n coord_h = coord_h.expand(-1, size, -1)\r\n coord_w = coord_w.expand(-1, -1, size)\r\n coords = torch.cat((coord_h, coord_w), 0)\r\n\r\n return coords.unsqueeze(0).expand(batch_size, -1, -1, -1)\r\n\r\n\r\nclass ModulatedConv2d(nn.Module):\r\n def __init__(\r\n self,\r\n in_channel,\r\n out_channel,\r\n kernel_size,\r\n style_dim,\r\n demodulate=True,\r\n decay=0.9989,\r\n padding=True,\r\n ):\r\n super().__init__()\r\n\r\n self.eps = 1e-8\r\n self.kernel_size = kernel_size\r\n self.in_channel = in_channel\r\n self.out_channel = out_channel\r\n\r\n fan_in = in_channel * kernel_size ** 2\r\n self.scale = 1 / math.sqrt(fan_in)\r\n\r\n if padding:\r\n self.padding = kernel_size // 2\r\n\r\n else:\r\n self.padding = 0\r\n\r\n self.weight = nn.Parameter(\r\n torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)\r\n )\r\n self.register_buffer(\"ema_var\", torch.tensor(1.0))\r\n self.decay = decay\r\n\r\n self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)\r\n\r\n self.demodulate = demodulate\r\n\r\n def __repr__(self):\r\n return f\"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size})\"\r\n\r\n def forward(self, input, style):\r\n batch, in_channel, height, width = input.shape\r\n\r\n style = self.modulation(style).view(batch, 1, in_channel, 1, 1)\r\n weight = self.scale * self.weight * style\r\n\r\n if self.demodulate:\r\n demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)\r\n weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)\r\n\r\n weight = weight.view(\r\n batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size\r\n )\r\n\r\n if self.training:\r\n var = input.pow(2).mean((0, 1, 2, 3))\r\n self.ema_var.mul_(self.decay).add_(var.detach(), alpha=1 - self.decay)\r\n\r\n weight = weight / (torch.sqrt(self.ema_var) + 1e-8)\r\n\r\n input = input.view(1, batch * in_channel, height, width)\r\n out = conv2d_gradfix.conv2d(input, weight, padding=self.padding, groups=batch)\r\n _, _, height, width = out.shape\r\n out = out.view(batch, self.out_channel, height, width)\r\n\r\n return out\r\n\r\n\r\ndef upsample(x, kernel, factor, pad=(0, 0)):\r\n if kernel.ndim == 2:\r\n x = upfirdn2d(x, kernel, up=(factor, factor), pad=(*pad, *pad))\r\n\r\n else:\r\n x = upfirdn2d(x, kernel.unsqueeze(0), up=(factor, 1), pad=(*pad, 0, 0))\r\n x = upfirdn2d(x, kernel.unsqueeze(1), up=(1, factor), pad=(0, 0, *pad))\r\n\r\n return x\r\n\r\n\r\ndef downsample(x, kernel, factor, pad=(0, 0)):\r\n if kernel.ndim == 2:\r\n x = upfirdn2d(x, kernel, down=(factor, factor), pad=(*pad, *pad))\r\n\r\n else:\r\n x = upfirdn2d(x, kernel.unsqueeze(0), down=(factor, 1), pad=(*pad, 0, 0))\r\n x = upfirdn2d(x, kernel.unsqueeze(1), down=(1, factor), pad=(0, 0, *pad))\r\n\r\n return x\r\n\r\n\r\nclass AliasFreeActivation(nn.Module):\r\n def __init__(\r\n self,\r\n out_channel,\r\n negative_slope,\r\n upsample_filter,\r\n downsample_filter,\r\n upsample,\r\n downsample,\r\n margin,\r\n padding,\r\n ):\r\n super().__init__()\r\n\r\n self.bias = nn.Parameter(torch.zeros(out_channel))\r\n\r\n if upsample_filter.ndim > 1:\r\n upsample_filter = upsample_filter * (upsample ** 2)\r\n\r\n else:\r\n upsample_filter = upsample_filter * upsample\r\n\r\n self.register_buffer(\"upsample_filter\", upsample_filter)\r\n self.register_buffer(\"downsample_filter\", downsample_filter)\r\n\r\n self.negative_slope = negative_slope\r\n self.upsample = upsample\r\n self.downsample = downsample\r\n self.margin = margin\r\n\r\n p = upsample_filter.shape[0] - upsample\r\n\r\n if padding:\r\n self.up_pad = ((p + 1) // 2 + upsample - 1, p // 2)\r\n\r\n else:\r\n self.up_pad = ((p + 1) // 2 + upsample * 2 - 1, p // 2 + upsample)\r\n\r\n p = downsample_filter.shape[0] - downsample\r\n self.down_pad = ((p + 1) // 2, p // 2)\r\n\r\n def forward(self, input):\r\n out = input + self.bias.view(1, -1, 1, 1)\r\n out = upsample(out, self.upsample_filter, self.upsample, pad=self.up_pad)\r\n out = fused_leaky_relu(out, negative_slope=self.negative_slope)\r\n out = downsample(\r\n out, self.downsample_filter, self.downsample, pad=self.down_pad\r\n )\r\n m = self.margin\r\n m = m * self.upsample - m * self.downsample\r\n m //= 2\r\n\r\n if m > 0:\r\n out = out[:, :, m:-m, m:-m]\r\n\r\n return out\r\n\r\n\r\nclass AliasFreeConv(nn.Module):\r\n def __init__(\r\n self,\r\n in_channel,\r\n out_channel,\r\n kernel_size,\r\n style_dim,\r\n upsample_filter,\r\n downsample_filter,\r\n upsample=1,\r\n demodulate=True,\r\n margin=10,\r\n ):\r\n super().__init__()\r\n\r\n self.conv = ModulatedConv2d(\r\n in_channel,\r\n out_channel,\r\n kernel_size,\r\n style_dim,\r\n demodulate=demodulate,\r\n padding=False,\r\n )\r\n\r\n self.activation = AliasFreeActivation(\r\n out_channel,\r\n 0.2,\r\n upsample_filter,\r\n downsample_filter,\r\n upsample * 2,\r\n 2,\r\n margin=margin,\r\n padding=kernel_size != 3,\r\n )\r\n\r\n def forward(self, input, style):\r\n out = self.conv(input, style)\r\n out = self.activation(out)\r\n\r\n return out\r\n\r\n\r\nclass ToRGB(nn.Module):\r\n def __init__(self, in_channel, style_dim):\r\n super().__init__()\r\n\r\n self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)\r\n self.bias = nn.Parameter(torch.zeros(3))\r\n\r\n def forward(self, input, style):\r\n out = self.conv(input, style)\r\n out = out + self.bias.view(1, -1, 1, 1)\r\n\r\n return out\r\n\r\n\r\nclass Generator(nn.Module):\r\n def __init__(\r\n self,\r\n style_dim,\r\n n_mlp,\r\n kernel_size,\r\n n_taps,\r\n filter_parameters,\r\n margin=10,\r\n lr_mlp=0.01,\r\n use_jinc=False,\r\n ):\r\n super().__init__()\r\n\r\n self.style_dim = style_dim\r\n self.margin = margin\r\n\r\n layers = [PixelNorm()]\r\n\r\n for i in range(n_mlp):\r\n layers.append(\r\n EqualLinear(\r\n style_dim, style_dim, lr_mul=lr_mlp, activation=\"fused_lrelu\"\r\n )\r\n )\r\n\r\n self.style = nn.Sequential(*layers)\r\n\r\n cutoffs = filter_parameters[\"cutoffs\"]\r\n stopbands = filter_parameters[\"stopbands\"]\r\n srs = filter_parameters[\"srs\"]\r\n band_halfs = filter_parameters[\"band_halfs\"]\r\n channels = filter_parameters[\"channels\"]\r\n\r\n self.input = FourierFeature(srs[0] + margin * 2, channels[0], cutoff=cutoffs[0])\r\n self.affine_fourier = EqualLinear(style_dim, 4)\r\n self.affine_fourier.weight.detach().zero_()\r\n self.affine_fourier.bias.detach().copy_(\r\n torch.tensor([1, 0, 0, 0], dtype=torch.float32)\r\n )\r\n self.conv1 = EqualConv2d(channels[0], channels[0], 1)\r\n\r\n self.convs = nn.ModuleList()\r\n for i in range(len(srs)):\r\n prev = max(i - 1, 0)\r\n sr = srs[i]\r\n\r\n up = 1\r\n if srs[prev] < sr:\r\n up = 2\r\n\r\n up_filter = lowpass_filter(\r\n n_taps * up * 2,\r\n cutoffs[prev],\r\n band_halfs[prev],\r\n srs[i] * up * 2,\r\n use_jinc=use_jinc,\r\n )\r\n down_filter = lowpass_filter(\r\n n_taps * up,\r\n cutoffs[i],\r\n band_halfs[i],\r\n srs[i] * up * 2,\r\n use_jinc=use_jinc,\r\n )\r\n\r\n self.convs.append(\r\n AliasFreeConv(\r\n channels[prev],\r\n channels[i],\r\n kernel_size,\r\n style_dim,\r\n up_filter / up_filter.sum(),\r\n down_filter / down_filter.sum(),\r\n up,\r\n margin=margin,\r\n )\r\n )\r\n\r\n self.to_rgb = ToRGB(channels[-1], style_dim)\r\n \r\n \r\n def mean_latent(self, n_latent):\r\n latent_in = torch.randn(\r\n n_latent, self.style_dim, device=self.conv1.weight.device\r\n )\r\n latent = self.style(latent_in).mean(0, keepdim=True)\r\n\r\n return latent\r\n\r\n def get_latent(self, style, truncation=1, truncation_latent=None):\r\n latent = self.style(style)\r\n\r\n if truncation < 1:\r\n latent = truncation_latent + truncation * (latent - truncation_latent)\r\n\r\n return latent\r\n\r\n def get_transform(self, style, truncation=1, truncation_latent=None):\r\n latent = self.style(style)\r\n\r\n if truncation < 1:\r\n latent = truncation_latent + truncation * (latent - truncation_latent)\r\n\r\n return self.affine_fourier(latent)\r\n\r\n def forward(self, style, truncation=1, truncation_latent=None, transform=None, random_rgb=False, input_is_latent=False):\r\n \r\n if not input_is_latent:\r\n latent = self.style(style)\r\n else:\r\n latent = style\r\n\r\n if truncation < 1:\r\n latent = truncation_latent + truncation * (latent - truncation_latent)\r\n \r\n if transform is None:\r\n transform = self.affine_fourier(latent)\r\n\r\n out = self.input(latent.shape[0], transform)\r\n out = self.conv1(out)\r\n\r\n for conv in self.convs:\r\n out = conv(out, latent)\r\n\r\n out = out[:, :, self.margin : -self.margin, self.margin : -self.margin]\r\n if random_rgb:\r\n random_z = torch.randn(1, 512, device=self.conv1.weight.device)\r\n rgb_latent = self.style(random_z) \r\n out = self.to_rgb(out, rgb_latent) / 4\r\n else:\r\n out = self.to_rgb(out, latent) / 4\r\n\r\n\r\n return out\r\n\r\n"
] |
[
[
"torch.ger",
"torch.cos",
"torch.cat",
"torch.sqrt",
"torch.zeros",
"torch.sinc",
"torch.arange",
"torch.nn.ModuleList",
"torch.nn.Sequential",
"torch.sin",
"torch.norm",
"torch.linspace",
"torch.abs",
"torch.tensor",
"torch.as_tensor",
"torch.randn",
"torch.empty_like"
]
] |
cutz-j/AR-project
|
[
"50d4f407a4f2c42e12bf2bcd54c436df6fa3c9fa",
"50d4f407a4f2c42e12bf2bcd54c436df6fa3c9fa"
] |
[
"MobileNetSSD/realsense_ssd.py",
"RealSenseSDK/API_test.py"
] |
[
"### TENSORFLOW API: MobilnetV2_SSD ###\n## Import packages ##\nimport numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tensorflow as tf\nfrom distutils.version import StrictVersion\nfrom collections import defaultdict\nfrom io import StringIO\n## 시각화 Tool ##\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nimport cv2\n## 통신 LIB ##\nimport mmap as mp\n## API Tool ##\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\nfrom object_detection.utils import ops as utils_ops\n\n################ tensorflow version check ####################\nsys.path.append(\"..\")\nif StrictVersion(tf.__version__) < StrictVersion('1.12.0'):\n raise ImportError('Please upgrade your TensorFlow installation to v1.12.*.')\n# This is needed since the notebook is stored in the object_detection folder.\nsys.path.append(\"..\")\n\n## share memory part ##\nwidth = 640\nheight = 480\nimg_size = width * height * 3\ndepth_size = width * height\nrow = 40\ncol = 7\ninfo_size = row * col\nmemory = np.zeros(shape=[row, col], dtype=float)\n\n################# object detection part #####################\n# Name of the directory containing the object detection module we're using\nMODEL_NAME = 'ssd_mobilenet_v2_coco_2018_03_29'\nMODEL_FILE = MODEL_NAME + '.tar.gz'\nPATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'\nPATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')\n\n## Grab path to current working directory ##\nCWD_PATH = os.getcwd()\nNUM_CLASSES = 80\n\n## Load the label map. ##\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)\n\n## Load the Tensorflow model into memory ##\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n sess = tf.Session(graph=detection_graph)\n\n# Input tensor is the image #\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0') # confidence\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0') # 80classes\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\nwhile(True):\n # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]\n # i.e. a single-column array, where each item in the column has the pixel RGB value\n readMap_image = mp.mmap(0, img_size*4, \"IMAGE\") # IMAGE READ\n readMap_info = mp.mmap(0, info_size*4, \"INFO\") # Detection Info WRITE\n readData = readMap_image.read(img_size * 4) # share memory read\n image = np.frombuffer(readData, dtype=np.int, count=img_size)\n image = image.copy().reshape(height, width, 3)\n color_image = image.astype('uint8')\n res_image = color_image.copy()\n # cv image 변환 bgr --> rgb 변환 #\n res_image[:,:,0] = color_image[:,:,2]\n res_image[:,:,2] = color_image[:,:,0]\n frame_expanded = np.expand_dims(res_image, axis=0)\n \n # signal memory --> point cloud stage: not detecting #\n readMap_signal = mp.mmap(0, 8, \"SIGNAL\")\n signal = np.zeros(shape=[1,1], dtype=np.int32)\n readMap_signal.write(signal)\n memory = np.zeros_like(memory) # memory 초기화 #\n\n # Perform the actual detection by running the model with the image as input\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: frame_expanded})\n\n # Draw the results of the detection (aka 'visulaize the results')\n vis_util.visualize_boxes_and_labels_on_image_array(\n res_image,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=8,\n min_score_thresh=0.60)\n \n # memory write --> detect num이 없을 경우 0 전송 #\n if int(num[0]) == 0:\n memory = np.zeros_like(memory)\n \n else:\n # detect이 됐을 경우, memory에 저장 후 공유메모리 저장 #\n for i in range(int(num[0])):\n memory[i, 0], memory[i, 1], memory[i, 2], memory[i, 3] = boxes[0, i, 0] * height, boxes[0, i, 1] * width, boxes[0, i, 2] * height, boxes[0, i, 3] * width\n memory[i, 4] = scores[0, i]\n memory[i, 5] = float(classes[0, i])\n memory[i, 6] = float(num[0])\n np_arr = memory.ravel().astype(np.float32) \n readMap_info.write(np_arr) # 공유메모리 저장\n \n ## detect 시각화 ##\n cv2.imshow('Object detector', res_image)\n\n############## POINT CLOUD STAGE ######################\n # 파이썬 cv에서 q를 눌렀을 경우 stage 전환 #\n if cv2.waitKey(1) == ord('q'):\n signal[0] = 1 # signal 변환\n cv2.destroyAllWindows() # 시각화 종료\n readMap_signal = mp.mmap(0, 8, \"SIGNAL\")\n readMap_signal.write(signal) # signal 전송\n while(signal[0] == 1):\n ## signal이 1인 동안에는 공유메모리 읽기만을 반복: NOT detecting ##\n readMap_signal = mp.mmap(0, 8, \"SIGNAL\")\n read_signal = readMap_signal.read(4)\n r_signal = np.frombuffer(read_signal, dtype=np.int, count=1)\n if (r_signal[0] == 0):\n signal[0] = 0\n\n # Press 't' to transform\n\n# Clean up\n\nreadMap_image.close()\nreadMap_info.close()\ncv2.destroyAllWindows()\n\n",
"### pyrealsense2 INSTRUCTION ###\nimport pyrealsense2 as rs\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\npipeline = rs.pipeline()\npipeline.start()\n\n#try:\n# while True:\n# frames = pipeline.wait_for_frames()\n# depth = frames.get_depth_frame()\n# \n# if not depth:\n# continue\n# \n# coverage = [0] * 64\n# for y in range(480):\n# for x in range(640):\n# dist = depth.get_distance(x, y)\n# if 0 < dist and dist < 1:\n# coverage[x//10] += 1\n# \n# if y % 20 == 19:\n# line = \"\"\n# for c in coverage:\n# line += \" .:nhBXWW\"[c//25]\n# coverage = [0]*64\n# print(line)\n#\n#finally:\n# pipeline.stop()\n \n### numpy INSTRUCTION ###\nframes = pipeline.wait_for_frames()\ndepth = frames.get_depth_frame()\nimg_data = frames.get_color_frame().as_frame().get_data()\ndepth_data = depth.as_frame().get_data()\nnp_image = np.asanyarray(img_data)\nnp_depth = np.asanyarray(depth_data)\nplt.imshow(np_image)\nplt.imshow(np_depth)"
] |
[
[
"numpy.zeros_like",
"numpy.zeros",
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.GraphDef",
"tensorflow.import_graph_def",
"tensorflow.gfile.GFile",
"numpy.frombuffer",
"numpy.squeeze",
"numpy.expand_dims"
],
[
"numpy.asanyarray",
"matplotlib.pyplot.imshow"
]
] |
beAWARE-project/crisis-classification
|
[
"2061a2ee57fd502bd973fdfcffc6d7098049b5ed"
] |
[
"main/src/CRCL/FloodCRisisCLassification/CRCL_FLOOD_Forecast_v15.py"
] |
[
"# Created Date: 11/09/2018\n# Modified Date: 12/09/2018\n#\n# Implements the 1st algorithm of Crisis Classification module\n# based on the predicted water levels from AMICO for particular 60\n# river sections in the next 54h starting at a specific date/time or\n# the last execution of AMICO module.\n#\n# Groups of River Sections are inserted by a csv/xlsx file\n#\n# CRCL_from_Forecast calculates the scale (1-4) for each river section and the\n# Overall Crisis Level index for each river's group of sections and\n# whole Vicenza city.\n#\n#----------------------------------------------------------------------------------------------------------\n# Inputs: a) Time series of predicted water levels from AMICO for each one of the\n# interest river section in the next 54h starting a specific date/time or\n# for the lastRun of AMICO's program\n# b) Thresholds for each one of the river section\n#\n# Outputs: TOP104_METRIC_REPORT which contains the maximum predicted crisis level in the next 54h for\n# the particular river section (pre-alert visualization)\n#\n# Algorithm 1 from Crisis Classification (based on AAWA)\n#----------------------------------------------------------------------------------------------------------\n#\n\nfrom bus.bus_producer import BusProducer\nimport json, time, re\nimport os, errno\nfrom pathlib import Path\nfrom pandas import read_csv, DataFrame, concat, ExcelWriter\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime, timedelta\nfrom math import pow, ceil\nfrom collections import OrderedDict\n\nfrom CRCL.FloodCRisisCLassification.Topic104_Metric_Report import Top104_Metric_Report\nfrom CRCL.FloodCRisisCLassification.Create_Queries import extract_forecasts, extract_river_sections_loc\nfrom CRCL.FloodCRisisCLassification.Auxiliary_functions import *\nfrom CRCL.FloodCRisisCLassification.topic104Flood import *\n\ndef CrisisClassificationFlood_PreEmerg():\n\n ver = 'Ver15_2nd_Period'\n\n # Create a directory to store the output files and TOPICS\n # Create a path\n current_dirs_parent = os.getcwd()\n root_path_dir = current_dirs_parent + \"/\" + \"CRCL/FloodCRisisCLassification\" + \"/\"\n\n now = datetime.now()\n\n directory = root_path_dir + \"TOPICS_for_2010\" + \"_\" + ver + \"_\" + str(now.year) + \"_\" + str(now.month) + \"_\" + str(now.day)\n os.makedirs(directory, exist_ok=True)\n\n #-----------------------------------------------------------------------------------\n # Fetch data from the OGC SensorThings API\n #\n # User defined values in order to formulate the query\n #\n service_root_URI = 'https://beaware.server.de/SensorThingsService/v1.0/'\n\n SensorThingEntities = ['Things', 'Locations', 'HistoricalLocations',\n 'Datastreams', 'Sensor', 'Observations',\n 'ObservedProperties', 'FeaturesOfInterest', 'MultiDatastreams']\n\n #------------------------------------------------------------------------------------------------\n # STEP 1: Extract the ids, the names, the properties and the location of all river sections\n #\n # https://beaware.server.de/SensorThingsService/v1.0/Things\n # ? $filter=properties/type%20eq%20%27riverSection%27\n # & $select=id,name,properties\n # & $expand=Locations($select=description,location)\n # & $count=true\n # & $top=1000\n\n # Start Timing Step 1\n start_step1 = time.time()\n\n # Store the time steps\n time_duration_step = []\n\n SensorThings = [SensorThingEntities[0], SensorThingEntities[1]]\n\n filt_vals = 'riverSection'\n sel_vals = {'thing_sel': ['id', 'name', 'description','properties'],\n 'loc_sel': ['description', 'location']}\n\n riverSections = extract_river_sections_loc(service_root_URI, SensorThings, filt_vals, sel_vals)\n\n # write json (data) to output file\n flname = directory + \"/\" + 'response_riverSections.txt'\n with open(flname, 'w') as outfile:\n json.dump(OrderedDict(riverSections), outfile)\n\n # count: number of river sections to be examined. Total river sections is 304.\n count = riverSections[\"@iot.count\"]\n\n #------------------------------------------\n # Mapping\n\n mapRS_df = mappingRS(riverSections)\n\n # excel\n # Store FWI dataframe to excel file\n xls = pd.ExcelWriter(directory + \"/\" + \"mappingRS.xlsx\")\n mapRS_df.to_excel(xls, 'Sheet1', index=False)\n xls.save()\n\n\n # End Timing Step 1\n end_step1 = time.time()\n time_duration_step.append( end_step1 - start_step1 )\n\n\n #----------------------------------------------------------------------------------------------------\n # STEP 2: Extract predicted water levels from AMICO for each one of the interest river sections\n # in the next 54h and find the maximum value, compares it with predefined thresholds.\n # If this max value exceeds the thresholds and a new scale (metric) with values 0 to 3\n # is calculated based on the result of comparison. The Overall Crisis Classification Index\n # is calculated per group of river sections and whole region. The appropriate messages\n # are created and sent them to logger.\n #----------------------------------------------------------------------------------------------------\n # 2.1 Extract one measurement (forecast for water river level) from one station at specific date/time\n #\n # ex. Things id 390 -> River section Astico m .00\n # Date -> 2018-01-26T08:00:00.000Z\n\n # Start Timing Step 2\n start_step2 = time.time()\n\n # Import interesting river sections from file (csv)\n csv_fname = root_path_dir + 'Amico_RS_in_Vicenza_v5.csv'\n IntRS = read_csv(csv_fname, sep=\",\")\n\n # Set constant variables which are utilised to create the query to extract Observations of each River Section\n #\n SensorThings = [SensorThingEntities[0], SensorThingEntities[1], SensorThingEntities[3], SensorThingEntities[5]]\n sel_vals = {'dstr_sel': ['id', 'name', 'properties'], 'obs_sel': ['result', 'phenomenonTime', 'id', 'parameters']}\n filt_args={'obs_filt': ['phenomenonTime']}\n\n # Define the Date/Time interval\n dates = ['2010-10-31T00:00:00.000Z', '2010-11-03T00:00:00.000Z']\n\n filt_vals={'obs_filt_vals': dates}\n ord_vals = ['phenomenonTime']\n\n #flag_last_run = True\n flag_last_run = False\n\n #----------------------------------------------------------------------------------------\n # Create new Producer instance using provided configuration message (dict data).\n\n producer = BusProducer()\n\n # Decorate terminal\n print('\\033[95m' + \"\\n***********************\")\n print(\"*** CRCL SERVICE v1.0 ***\")\n print(\"***********************\\n\" + '\\033[0m')\n\n total_irs_names = 0\n total_top104 = 0\n\n # Initialize the list of dictionaries. Each one contains the name of the group of River Sections\n # and a vector of the scale cardinality (count = [n1, n2, n3, n4] for each group)\n # The river sections that belong to each of the groups are defined by the column RS_Group\n\n group_names = IntRS['RS_GroupName'].unique()\n group_ids = IntRS['RS_Group'].unique()\n group_descr = IntRS['RS_GroupDescr'].unique()\n\n # Initialization process to the list of dictionaries for RiverSect_CountScale\n RiverSect_CountScale = []\n for gr in range(len(group_names)):\n item = {'id': group_ids[gr], 'name': group_names[gr], 'descr':group_descr[gr],\n 'count': [0,0,0,0], 'group_center_pos': [] }\n RiverSect_CountScale.append(item)\n\n #---------------------------------------------------------\n # Store forecast values to data.frame\n dfRSF = DataFrame([])\n\n TOTAL_TOPICS104_LIST = []\n\n flag_critical_rs = False\n\n for counter in range(0, count):\n\n print(\"\\n Counter = \", counter)\n print(\"River Section Name: \", riverSections[\"value\"][counter]['name'], \", ID: \", riverSections[\"value\"][counter]['@iot.id'])\n\n if len( IntRS[ IntRS.ix[:,'Name'].str.contains(riverSections[\"value\"][counter]['name']) ] ) != 0:\n\n total_irs_names = total_irs_names + 1\n\n # Find the position which RS has in the IntRS and determine whether it is a critical RS.\n # If it is, then flag_critical_rs will be equal to True.\n pos_rs = IntRS.loc[ IntRS.ix[:,'Name'].str.contains(riverSections[\"value\"][counter]['name']) ].index.values\n\n if IntRS.ix[pos_rs[0], 'RS_Critical'] == 1:\n flag_critical_rs = True\n print(\"\\n This RS is critical:\", flag_critical_rs)\n else:\n print(\"\\n This RS is NOT critical:\", flag_critical_rs)\n\n # find the position of RiverSect_CountScale in which the river section name matches\n # with the name of group of river sections\n group_IntRS = int(IntRS[ IntRS['RS_ID_SensorThingServer'] == riverSections[\"value\"][counter]['@iot.id'] ]['RS_Group'].iloc[0])\n\n # Take the name and the description of the group from the list RiverSect_CountScale\n for rscs_it in range(len(RiverSect_CountScale)):\n name_group = RiverSect_CountScale[RiverSect_CountScale[rscs_it]['id'] == group_IntRS]['name']\n\n # Arrays to store values from the TOP104 (initialize for each river section)\n max_yValues = []\n meas_color = []\n meas_note = []\n max_measurementID = []\n max_measurementTimeStamp = []\n dataSeriesID = []\n dataSeriesName = []\n xVals = []\n dataStreamName = []\n dataStreamID = []\n dataStreamDescript = []\n\n ids = {'th_id': str(riverSections[\"value\"][counter]['@iot.id']) }\n\n if flag_last_run == False:\n response_forecast = extract_forecasts(service_root_URI, SensorThings, ids, sel_vals, ord_vals, filter_args=filt_args, filter_vals=filt_vals)\n else:\n response_forecast = extract_forecasts(service_root_URI, SensorThings, ids, sel_vals, ord_vals, last_run=flag_last_run)\n\n # write json (data) to output file\n #flname = directory + \"/\" + 'response_forecast_' + riverSections[\"value\"][counter]['name'].replace(\" \", \"\") + \".txt\"\n #with open(flname, 'w') as outfile:\n # json.dump(OrderedDict(response_forecast), outfile)\n\n #-----------------------------------------------\n # Update the data frame with new Observations\n Obs = response_forecast['Datastreams'][0]['Observations']\n Obs_df = DataFrame.from_dict(Obs)\n len_Obs = len(Obs_df)\n RS_name = DataFrame( [ riverSections[\"value\"][counter]['name'] ]*len_Obs )\n RS_id = DataFrame( [ riverSections[\"value\"][counter]['@iot.id'] ]*len_Obs )\n\n loc_riverSection = riverSections[\"value\"][counter]['Locations'][0]['location']['coordinates']\n\n temp_df = concat( [ RS_name, RS_id ], axis=1)\n temp_df = concat( [ temp_df, Obs_df['phenomenonTime'], Obs_df['result'] ], axis=1)\n\n Obs_lat = DataFrame( [ loc_riverSection[1] ]*len_Obs )\n Obs_long = DataFrame( [ loc_riverSection[0] ]*len_Obs )\n temp_df = concat( [ temp_df, Obs_lat, Obs_long ], axis=1)\n\n dfRSF = concat( [dfRSF, temp_df] )\n\n #--------------------------------------------\n\n # Extract the thresholds of the response of riverSections query correspond to the specific river section\n thresh = [riverSections[\"value\"][counter]['properties']['treshold1'],\n riverSections[\"value\"][counter]['properties']['treshold2'],\n riverSections[\"value\"][counter]['properties']['treshold3']]\n\n # Extract the observations WL forecasted values and stored in the array yValues\n Obs_yV_length = len(response_forecast['Datastreams'][0]['Observations'])\n\n Obs_yv = []\n for iter in range(0, Obs_yV_length):\n Obs_yv += [response_forecast['Datastreams'][0]['Observations'][iter]['result']]\n\n # Find all the maximum of the Obs_yv and its positions\n Obs_yv_max = max(Obs_yv)\n maxIndexList = [i for i,j in enumerate(Obs_yv) if j == Obs_yv_max]\n first_max_pos = [min(maxIndexList)] # considers only the first maximum value\n\n # Calculate the Crisis Classification Level for each River Section\n # If the maximum value exceeds one of the predefined thresholds then\n # it stored in the topic (flag_extreme=True), otherwise it ignores (flag_extreme = False)\n #\n resp_comparison = compare_forecast_new_scale_thresholds(Obs_yv_max, thresh)\n\n flag_extreme = resp_comparison[len(resp_comparison) - 1]\n\n print(\"**** resp_comparison = \", resp_comparison)\n\n # Update the count in position equal with the scale adding one\n # for the particular group river section defined by name_group or group_IntRS\n RiverSect_CountScale[group_IntRS-1]['count'][ resp_comparison[2] - 1] += 1\n\n # Update the position of the group's center\n if len( RiverSect_CountScale[group_IntRS-1]['group_center_pos']) == 0:\n RiverSect_CountScale[group_IntRS-1]['group_center_pos'] = loc_riverSection\n #else:\n # RiverSect_CountScale[group_IntRS-1]['group_center_pos'][0] += loc_riverSection[0]\n # RiverSect_CountScale[group_IntRS-1]['group_center_pos'][1] += loc_riverSection[1]\n\n # For the cases which exceed one of the alarm thresholds do:\n if flag_extreme == True and resp_comparison[0][0] != '#00FF00':\n max_yValues += [Obs_yv_max] # for forecast\n max_yValues += [resp_comparison[2]] # for scale\n\n meas_color.append( resp_comparison[0][0] ) # for forecast\n meas_color.append(\"\") # for scale\n\n meas_note.append( resp_comparison[1][0] ) # for forecast\n meas_note.append( resp_comparison[3][0] ) # for scale\n\n dataSeriesID += [riverSections[\"value\"][counter]['@iot.id']]*len(max_yValues) # counter + 1\n dataSeriesName += [riverSections[\"value\"][counter]['name']]*len(max_yValues)\n\n # Find details regarding the maximum observation and stored them in the corresponding arrays\n item = response_forecast['Datastreams'][0]['Observations'][first_max_pos[0]]\n max_measurementID += [ str(item['@iot.id']) + '_1' ] # for forecast\n max_measurementID += [ str(item['@iot.id']) + '_2' ] # for scale\n max_measurementTimeStamp += [datetime.utcnow().replace(microsecond=0).isoformat() + 'Z']*len(max_yValues)\n xVals += [ item['phenomenonTime'].replace('.000Z', \"Z\") ]*len(max_yValues)\n\n # --------------------------------------------------------------------------------------------\n # STEP 2.2: Creates the TOPIC_104_METRIC_REPORT\n # ------------------------------------------------------------------------------------------\n #\n # Create the TOPIC 104 (json format) for the maximum value of predicted water levels\n # in the time interval defined by the 'dates' or for the lastRun of AMICO's program\n # for the specific river section.\n #\n # run and count Topic104index\n\n topics104 = topic104FloodIndex_VER14(directory, flag_last_run, response_forecast, max_yValues,\n meas_color, meas_note, max_measurementID, max_measurementTimeStamp,\n dataSeriesID, dataSeriesName, xVals, dataStreamName, dataStreamID,\n dataStreamDescript, dates, thresh, riverSections,\n RiverSect_CountScale, counter, mapRS_df)\n\n TOTAL_TOPICS104_LIST.append( topics104 )\n total_top104 = total_top104 + len(topics104)\n\n if flag_critical_rs == True:\n\n topics104_critical = topic104FloodIndex_critical(directory, flag_last_run, response_forecast, max_yValues,\n meas_color, meas_note, max_measurementID, max_measurementTimeStamp,\n dataSeriesID, dataSeriesName, xVals, dataStreamName, dataStreamID,\n dataStreamDescript, dates, thresh, riverSections,\n RiverSect_CountScale, counter, mapRS_df)\n\n # Append critical topis to the list\n TOTAL_TOPICS104_LIST.append( topics104_critical )\n total_top104 = total_top104 + len(topics104_critical)\n\n\n # Before consider the new RS, turn the flag_critical_rs to false again\n flag_critical_rs = False\n\n\n # End Timing Step 2\n end_step2 = time.time()\n time_duration_step.append( end_step2 - start_step2 )\n\n\n #-----------------------------------------------------\n print(\"\\n len = \", len(TOTAL_TOPICS104_LIST))\n print(\"counter = \", total_top104)\n\n #==========================================================================\n # Start Timing Step 3 - Sending\n start_step3 = time.time()\n\n # Send messages to PSAP\n if len(TOTAL_TOPICS104_LIST) != 0:\n print(\n 'Send message: Max Predicted Water Level value and its Category have been forwarded to logger into 2 separate messages!')\n\n for it in range(len(TOTAL_TOPICS104_LIST)):\n producer.send(\"TOP104_METRIC_REPORT\", TOTAL_TOPICS104_LIST[it])\n #\n # print(\"\\n ***** TOPIC: \")\n # print(top104_forecast[it])\n # print(\"*******\\n\")\n else:\n print('No messages will be forward to logger!!!')\n\n\n # End Timing Step 3 - Sending\n end_step3 = time.time()\n time_duration_step.append( end_step3 - start_step3 )\n\n\n #----------------------------------------------------------------------------------\n # write Data.Frame with forecasts to xlsx output file\n dfRSF.columns = ['RS_Name', 'RS_ID', 'phenomenonTime', 'result', 'Lat', 'Long' ]\n dfxls = ExcelWriter(directory + \"/\" + \"DataFrame_forecasts.xlsx\")\n dfRSF.to_excel(dfxls,'Sheet1', index=False)\n dfxls.save()\n\n #print(\"\\n----- DATA FRAME ----- \\n\")\n #len(dfRSF)\n #print(dfRSF)\n #print(\"\\n-------------------\\n\")\n\n # Update the center (position) of each group\n # for grid in range(len(RiverSect_CountScale)):\n #\n # total_group_counts = sum(RiverSect_CountScale[grid]['count'])\n #\n # if total_group_counts != 0:\n # RiverSect_CountScale[grid]['group_center_pos'][0] = RiverSect_CountScale[grid]['group_center_pos'][0]/total_group_counts\n # RiverSect_CountScale[grid]['group_center_pos'][1] = RiverSect_CountScale[grid]['group_center_pos'][1]/total_group_counts\n\n\n\n #-------------------------------------------------------------------------------------\n # STEP 4: Calculate the Overall Crisis Classification Index & Overall Crisis Level\n\n # Start Timing Step 4\n start_step4 = time.time()\n\n print(\"\\n ======= RiverSect_CountScale BEFORE Calculate the OCCI \\n\")\n print(RiverSect_CountScale)\n\n # flag_scale = TRUE -> new scale is used {1,2,3,4}, otherwise the old scale is used {0,1,2,3}\n\n flag_scale = True\n over_crisis_class_indx = Overall_Crisis_Classification_Index(RiverSect_CountScale, flag_scale)\n\n print(\"\\n***************************\")\n\n print(\"\\n ======= AFTER Calculate the OCCI \\n\")\n print(\"\\n Groups River Sections Pre-Alert Overall Crisis Classification Index = \", over_crisis_class_indx)\n\n #------ OBSOLETE\n # OCL = Overall_Crisis_Level(RiverSect_CountScale, over_crisis_class_indx)\n #-----------------\n\n # Calculate the OCL per group and the OCL for all RS.\n # Use Weighted Average of the Overall Crisis Level over all groups\n\n weights = [1]*len(RiverSect_CountScale)\n\n OCL = Group_Overall_Crisis_Level(RiverSect_CountScale, over_crisis_class_indx, weights)\n\n print(\"\\n***************************\")\n\n print(\"\\n Pre-Alert Overall Crisis Lever = \", OCL)\n print(\"\\n***************************\\n\")\n\n\n # ----------------------------------------------------------------------------------------------\n # Creates TOP104 for the Overall_Crisis_Level index per group of river sections\n # and the whole region of interest\n\n total_top104_index = 0\n\n total_topic104_overall = topic104FloodOverall(directory, RiverSect_CountScale, OCL, total_top104_index, producer)\n\n total_top104 = total_top104 + total_topic104_overall\n\n\n # End Timing Step 4\n end_step4 = time.time()\n time_duration_step.append( end_step4 - start_step4 )\n\n\n #---------------------------------------------------------------------------\n total_time = np.array(time_duration_step).sum()\n\n print(\"\\n ****** EXECUTION TIME: **** \")\n print(\" Time for Step 1. Data Acquisition: \", round(time_duration_step[0], 3), \" seconds\")\n print(\" Time for Step 2. Calculate WL scale create Topics 104 for River Sections: \", round(time_duration_step[1], 3), \" seconds\")\n print(\" Time for Step 3. Sending messages: \", round(time_duration_step[2], 3), \" seconds\")\n print(\" Time for Step 4. Calculate OCCI & PFLCL: \", round(time_duration_step[3], 3), \" seconds\")\n print(\" Total Execution Time: \", round(total_time/60.0, 3), \" minutes\")\n\n print(\"\\n Total interested River Sections = \", total_irs_names)\n print(\" Number of TOP104 which were sent to PSAP is: \", total_top104)\n print(\" ************************** \\n\")\n\n"
] |
[
[
"numpy.array",
"pandas.ExcelWriter",
"pandas.DataFrame.from_dict",
"pandas.DataFrame",
"pandas.concat",
"pandas.read_csv"
]
] |
yanxu55/sasoptpy
|
[
"17006707212c87fdf90ee18b9d23a2c35eae421c",
"17006707212c87fdf90ee18b9d23a2c35eae421c"
] |
[
"sasoptpy/utils.py",
"examples/mining_optimization.py"
] |
[
"#!/usr/bin/env python\n# encoding: utf-8\n#\n# Copyright SAS Institute\n#\n# Licensed under the Apache License, Version 2.0 (the License);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom collections import Iterable\nimport inspect\nimport random\nimport string\n\nimport numpy as np\nimport pandas as pd\n\nimport sasoptpy.model\nimport sasoptpy.components\n\n\n# Constant values\nMIN = 'MIN'\nMAX = 'MAX'\nCONT = 'CONT'\nINT = 'INT'\nBIN = 'BIN'\n\n# Global dictionary\n__namedict = {}\n\n# Counters\n__ctr = {'obj': [0], 'var': [0], 'con': [0], 'expr': [0], 'model': [0],\n 'i': [0], 'set': [0], 'param': [0], 'impvar': [0], 'table': [0]}\n\n__objcnt = 0\n\n\ndef check_name(name, ctype=None):\n '''\n Checks if a name is valid and returns a random string if not\n\n Parameters\n ----------\n name : str\n Name to be checked if unique\n\n Returns\n -------\n str : The given name if valid, a random string otherwise\n '''\n if name and type(name) != str:\n name = ctype + '_' + str(name) if ctype else str(name)\n if name is None or name == '':\n if ctype is None:\n name = 'TMP_' + ''.join(random.choice(string.ascii_uppercase) for\n _ in range(5))\n else:\n name = '{}_{}'.format(ctype, get_counter(ctype))\n else:\n if name in __namedict:\n if ctype is None:\n name = ''.join(random.choice(string.ascii_lowercase) for\n _ in range(5))\n else:\n name = '{}_{}'.format(ctype, get_counter(ctype))\n else:\n name = name.replace(\" \", \"_\")\n while name in __namedict:\n if ctype is None:\n name = ''.join(random.choice(string.ascii_lowercase) for\n _ in range(5))\n else:\n name = '{}_{}'.format(ctype, get_counter(ctype))\n return name\n\n\ndef _is_generated(expr):\n if isinstance(expr, sasoptpy.components.Variable):\n return\n caller = inspect.stack()[2][3]\n if caller == '<genexpr>':\n return True\n\n\ndef exp_range(start, stop, step=1):\n '''\n Creates a set within given range\n\n Parameters\n ----------\n start : :class:`Expression`\n First value of the range\n stop : :class:`Expression`\n Last value of the range\n step : :class:`Expression`, optional\n Step size of the range\n\n Returns\n -------\n :class:`Set`\n Set that represents the range\n\n Examples\n --------\n\n >>> N = so.Parameter(name='N')\n >>> p = so.exp_range(1, N)\n >>> print(p._defn())\n set 1..N;\n\n '''\n regular = isinstance(start, int) and isinstance(stop, int) and\\\n isinstance(step, int)\n if regular:\n return range(start, stop, step)\n stname = start._expr() if hasattr(start, '_expr') else str(start)\n enname = stop._expr() if hasattr(stop, '_expr') else str(stop)\n setname = stname + '..' + enname\n setname = setname.replace(' ', '')\n exset = get_obj_by_name(setname)\n if exset:\n return exset\n return sasoptpy.data.Set(name=setname)\n\n\ndef register_name(name, obj):\n '''\n Adds the name and order of a component into the global reference list\n\n Parameters\n ----------\n name : string\n Name of the object\n obj : object\n Object to be registered to the global name dictionary\n\n Returns\n -------\n int\n Unique object number to represent creation order\n '''\n global __objcnt\n __objcnt += 1\n __namedict[name] = {'ref': obj, 'order': __objcnt}\n return __objcnt\n\n\ndef recursive_walk(obj, func, attr=None, alt=None):\n '''\n Calls a given method recursively for given objects\n\n\n Parameters\n ----------\n func : string\n Name of the method / function be called\n attr : string, optional\n An attribute which triggers an alternative method to be called if\\\n exists\n alt : string, optional\n Name of the alternative method / function to be called if passed attr\\\n exists for given objects\n\n Notes\n -----\n - This function is for internal consumption.\n\n '''\n result = []\n for i in list(obj):\n if isinstance(i, list):\n result.append(recursive_walk(i, func))\n else:\n if attr is None:\n m_call = getattr(i, func)\n result.append(m_call())\n else:\n m_attr = getattr(i, attr)\n if m_attr:\n m_call = getattr(i, alt)\n else:\n m_call = getattr(i, func)\n result.append(m_call())\n return result\n\n\ndef quick_sum(argv):\n '''\n Quick summation function for :class:`Expression` objects\n\n Returns\n -------\n :class:`Expression` object\n Sum of given arguments\n\n Examples\n --------\n\n >>> x = so.VariableGroup(10000, name='x')\n >>> y = so.quick_sum(2*x[i] for i in range(10000))\n\n Notes\n -----\n\n This function is faster for expressions compared to Python's native sum()\n function.\n\n '''\n clocals = argv.gi_frame.f_locals.copy()\n exp = sasoptpy.components.Expression(temp=True)\n iterators = []\n for i in argv:\n exp = exp + i\n if isinstance(i, sasoptpy.components.Expression):\n if i._abstract:\n newlocals = argv.gi_frame.f_locals\n for nl in newlocals.keys():\n if nl not in clocals and\\\n type(newlocals[nl]) == sasoptpy.data.SetIterator:\n iterators.append((nl, newlocals[nl])) # Tuple: nm ref\n if iterators:\n # First pass: make set iterators uniform\n for i in iterators:\n for j in iterators:\n if isinstance(i, sasoptpy.data.SetIterator) and\\\n isinstance(j, sasoptpy.data.SetIterator):\n if i[0] == j[0]:\n j[1]._name = i[1]._name\n it_names = []\n for i in iterators:\n unique = True\n for j in it_names:\n if i[0] == j[0]:\n unique = False\n break\n if unique:\n it_names.append(i)\n # Second pass: check for iterators\n iterators = [p[1] for p in it_names]\n exp = _check_iterator(exp, 'sum', iterators)\n exp._temp = False\n return exp\n\n\ndef _check_iterator(exp, operator, iterators):\n if isinstance(exp, sasoptpy.components.Variable):\n r = exp.copy()\n else:\n r = exp\n if r._name is None:\n r._name = check_name(None, 'expr')\n if r._operator is None:\n r._operator = operator\n for i in iterators:\n if isinstance(i, sasoptpy.data.SetIterator):\n r._iterkey.append(i)\n wrapper = sasoptpy.components.Expression()\n wrapper._linCoef[r._name] = {'ref': r, 'val': 1.0}\n wrapper._abstract = True\n return wrapper\n\n\ndef get_obj_by_name(name):\n '''\n Returns the reference to an object by using the unique name\n\n Returns\n -------\n object\n Reference to the object that has the name\n\n Notes\n -----\n\n If there is a conflict in the namespace, you might not get the object\n you request. Clear the namespace using\n :func:`reset_globals` when needed.\n\n See Also\n --------\n :func:`reset_globals`\n\n Examples\n --------\n\n >>> m.add_variable(name='var_x', lb=0)\n >>> m.add_variables(2, name='var_y', vartype=so.INT)\n >>> x = so.get_obj_by_name('var_x')\n >>> y = so.get_obj_by_name('var_y')\n >>> print(x)\n >>> print(y)\n >>> m.add_constraint(x + y[0] <= 3, name='con_1')\n >>> c1 = so.get_obj_by_name('con_1')\n >>> print(c1)\n var_x\n Variable Group var_y\n [(0,): Variable [ var_y_0 | INT ]]\n [(1,): Variable [ var_y_1 | INT ]]\n var_x + var_y_0 <= 3\n\n '''\n if name in __namedict:\n return __namedict[name]['ref']\n else:\n return None\n\n\ndef dict_to_frame(dictobj, cols=None):\n '''\n Converts dictionaries to DataFrame objects for pretty printing\n\n Parameters\n ----------\n dictobj : dict\n Dictionary to be converted\n cols : list, optional\n Column names\n\n Returns\n -------\n :class:`DataFrame` object\n DataFrame representation of the dictionary\n\n Examples\n --------\n\n >>> d = {'coal': {'period1': 1, 'period2': 5, 'period3': 7},\n >>> 'steel': {'period1': 8, 'period2': 4, 'period3': 3},\n >>> 'copper': {'period1': 5, 'period2': 7, 'period3': 9}}\n >>> df = so.dict_to_frame(d)\n >>> print(df)\n period1 period2 period3\n coal 1 5 7\n copper 5 7 9\n steel 8 4 3\n\n '''\n frobj = pd.DataFrame.from_dict(dictobj, orient='index')\n if isinstance(cols, list):\n frobj.columns = cols\n if isinstance(frobj.index[0], tuple):\n frobj.index = pd.MultiIndex.from_tuples(frobj.index)\n return frobj\n\n\ndef extract_argument_as_list(inp):\n\n if isinstance(inp, int):\n thelist = list(range(0, inp))\n elif isinstance(inp, range):\n thelist = list(inp)\n elif isinstance(inp, tuple):\n thelist = inp[0]\n elif isinstance(inp, list):\n thelist = inp\n elif isinstance(inp, sasoptpy.data.Set):\n thelist = [inp]\n else:\n thelist = list(inp)\n return thelist\n\n\ndef extract_list_value(tuplist, listname):\n '''\n Extracts values inside various object types\n\n Parameters\n ----------\n tuplist : tuple\n Key combination to be extracted\n listname : dict or list or int or float or DataFrame or Series object\n List where the value will be extracted\n\n Returns\n -------\n object\n Corresponding value inside listname\n '''\n if listname is None:\n v = None\n elif isinstance(listname, dict):\n v = listname[tuple_unpack(tuplist)]\n elif np.issubdtype(type(listname), np.number):\n v = listname\n elif isinstance(listname, pd.DataFrame):\n if isinstance(listname.index, pd.MultiIndex):\n v = listname.loc[tuplist[:-1]][tuplist[-1]]\n else:\n v = listname.loc[tuplist]\n elif isinstance(listname, pd.Series):\n v = listname.loc[tuplist]\n else:\n v = listname\n for k in tuplist:\n v = v[k]\n return v\n\n\ndef list_length(listobj):\n '''\n Returns the length of an object if it is a list, tuple or dict\n\n Parameters\n ----------\n listobj : list, tuple or dict\n Object whose length will be returned\n\n Returns\n -------\n int\n Length of the list, tuple or dict\n '''\n if (isinstance(listobj, list) or isinstance(listobj, tuple) or\n isinstance(listobj, dict)):\n return len(listobj)\n else:\n return 1\n\n\ndef get_counter(ctrtype):\n '''\n Returns and increments the list counter for naming\n\n Parameters\n ----------\n ctrtype : string\n Type of the counter, 'obj', 'var', 'con' or 'expr'\n\n Returns\n -------\n int\n Current value of the counter\n '''\n ctr = __ctr[ctrtype]\n ctr[0] = ctr[0] + 1\n return ctr[0]\n\n\ndef _to_optmodel_loop(keys):\n s = ''\n subindex = []\n for key in keys:\n if isinstance(key, tuple):\n for i in flatten_tuple(key):\n subindex.append(str(i))\n elif not isinstance(key, sasoptpy.data.SetIterator):\n subindex.append(str(key))\n if subindex:\n s += '_' + '_'.join(subindex)\n iters = get_iterators(keys)\n conds = get_conditions(keys)\n if len(iters) > 0:\n s += ' {'\n s += ', '.join(iters)\n if len(conds) > 0:\n s += ': '\n s += ' and '.join(conds)\n s += '}'\n return s\n\n\ndef get_iterators(keys):\n '''\n Returns a list of definition strings for a given list of SetIterators\n '''\n iterators = []\n groups = {}\n for key in keys:\n if isinstance(key, sasoptpy.data.SetIterator):\n iterators.append(key._defn())\n elif isinstance(key, tuple):\n for subkey in key:\n if hasattr(subkey, '_group'):\n g = groups.setdefault(subkey._group, [])\n g.append(subkey)\n if groups:\n for kg in groups.values():\n s = '<' + ','.join([i._name for i in kg]) + '> in ' +\\\n kg[0]._set._name\n iterators.append(s)\n return iterators\n\n\ndef get_conditions(keys):\n conditions = []\n for key in keys:\n if isinstance(key, sasoptpy.data.SetIterator):\n if len(key._conditions) > 0:\n conditions.append(key._to_conditions())\n return conditions\n\n\ndef tuple_unpack(tp):\n '''\n Grabs the first element in a tuple, if a tuple is given as argument\n\n Parameters\n ----------\n tp : tuple\n\n Returns\n -------\n object\n The first object inside the tuple.\n '''\n if isinstance(tp, tuple):\n if len(tp) == 1:\n return tp[0]\n return tp\n\n\ndef tuple_pack(obj):\n '''\n Converts a given object to a tuple object\n\n If the object is a tuple, the function returns the input,\n otherwise creates a single dimensional tuple\n\n Parameters\n ----------\n obj : Object\n Object that is converted to a tuple\n\n Returns\n -------\n tuple\n Tuple that includes the original object\n '''\n if isinstance(obj, tuple):\n return obj\n elif isinstance(obj, str):\n return (obj,)\n return (obj,)\n\n\ndef list_pack(obj):\n '''\n Converts a given object to a list\n\n If the object is already a list, the function returns the input,\n otherwise creates a list\n\n Parameters\n ----------\n obj : Object\n Object that is converted to a list\n\n Returns\n -------\n list\n List that includes the original object\n '''\n if isinstance(obj, list):\n return obj\n return [obj]\n\n\ndef reset_globals():\n '''\n Deletes the references inside the global dictionary and restarts counters\n\n Examples\n --------\n\n >>> import sasoptpy as so\n >>> m = so.Model(name='my_model')\n >>> print(so.get_namespace())\n Global namespace:\n Model\n 0 my_model <class 'sasoptpy.model.Model'>,\\\n sasoptpy.Model(name='my_model', session=None)\n VariableGroup\n ConstraintGroup\n Expression\n Variable\n Constraint\n >>> so.reset_globals()\n >>> print(so.get_namespace())\n Global namespace:\n Model\n VariableGroup\n ConstraintGroup\n Expression\n Variable\n Constraint\n\n See also\n --------\n :func:`get_namespace`\n\n '''\n __namedict.clear()\n for i in __ctr:\n __ctr[i] = [0]\n\n\ndef read_frame(df, cols=None):\n '''\n Reads each column in :class:`pandas.DataFrame` into a list of\\\n :class:`pandas.Series` objects\n\n Parameters\n ----------\n df : :class:`pandas.DataFrame` object\n DataFrame to be read\n cols : list of strings, optional\n Column names to be read. By default, it reads all columns\n\n Returns\n -------\n list\n List of :class:`pandas.Series` objects\n\n Examples\n --------\n\n >>> price = pd.DataFrame([\n >>> [1, 5, 7],\n >>> [8, 4, 3],\n >>> [5, 7, 9]], columns=['period1', 'period2', 'period3']).\\\\\n >>> set_index([['coal', 'steel', 'copper']])\n >>> [period2, period3] = so.read_frame(price, ['period2', 'period3'])\n >>> print(period2)\n coal 5\n steel 4\n copper 7\n Name: period2, dtype: int64\n\n '''\n series = []\n if cols is None:\n cols = df.columns\n for col in cols:\n if col in df.columns:\n series.append(df[col])\n else:\n print('WARNING: Column name {} does not exist.'.format(col))\n return series\n\n\ndef read_data(table, key_set, key_cols=None, option='', params=None):\n '''\n (Experimental) Reads a CASTable into PROC OPTMODEL sets\n\n Parameters\n ----------\n table : CASTable\n The CAS table to be read to sets and parameters\n key_set : :class:`sasoptpy.data.Set`\n Set object to be read as the key (index)\n key_cols : list or string, optional\n Column names of the key columns\n option : string, optional\n Additional options for read data command\n params : list, optional\n A list of dictionaries where each dictionary represent parameters\n\n Notes\n -----\n - `key_set` and `key_cols` parameters should be a list. When passing\n a single item, string type can be used instead.\n '''\n\n if key_cols is None:\n key_cols = []\n if params is None:\n params = []\n\n # Reading key\n if key_set is not None and key_cols:\n key_set._colname = key_cols\n\n # Reading parameters\n for p in params:\n p.setdefault('column', None)\n p.setdefault('index', None)\n p['param']._set_loop(table, key_set, p['column'], p['index'])\n\n # Beginning\n if type(table).__name__ == 'CASTable':\n s = 'read data {}'.format(table.name)\n elif type(table).__name__ == 'SASdata':\n s = 'read data {}'.format(table.table)\n else:\n s = 'read data {}'.format(table)\n if option:\n s += ' {}'.format(option)\n s += ' into '\n # Key part\n if key_set is not None:\n s += '{}=[{}] '.format(key_set._name, ' '.join(key_set._colname))\n else:\n s += '[{}] '.format(' '.join(key_set._colname))\n # Parameter list\n parlist = []\n for p in params:\n parlist.append(p['param']._to_read_data())\n s += ' '.join(parlist)\n s += ';'\n return sasoptpy.data.Statement(s)\n\n\ndef read_table(table, session=None, key=['_N_'], columns=None, \n key_type=['num'], col_types=None,\n upload=False, casout=None, ref=True):\n '''\n Reads a CAS Table or pandas DataFrame\n\n Parameters\n ----------\n table : :class:`swat.cas.table.CASTable`, :class:`pandas.DataFrame`\\\n object or string\n Pointer to CAS Table (server data, CASTable),\\\n DataFrame (local data) or\\\n the name of the table at execution (server data, string)\n session : :class:`swat.CAS` or :class:`saspy.SASsession` object\n Session object if the table will be uploaded\n key : list, optional\n List of key columns (for CASTable) or index columns (for DataFrame)\n columns : list, optional\n List of columns to read into parameters\n key_type : list or string, optional\n A list of column types consists of 'num' or 'str' values\n col_types : dict, optional\n Dictionary of column types\n upload : boolean, optional\n Option for uploading a local data to CAS server first\n casout : string or dict, optional\n Casout options if data is uploaded\n ref : boolean, optional\n Switch for returning the read data statement generated by the function\n\n Returns\n -------\n tuple\n A tuple where first element is the key (index), second element\\\n is a list of requested columns and the last element is reference to\\\n the original\n\n See also\n --------\n :func:`Model.read_table`\n :func:`Model.read_data`\n\n '''\n\n if col_types is None:\n col_types = {}\n\n # Type of the given table and the session\n t_type = type(table).__name__\n s_type = type(session).__name__\n\n if (upload and t_type == 'DataFrame' and s_type == 'CAS'):\n table = session.upload_frame(table, casout=casout)\n elif (upload and t_type == 'Series' and s_type == 'CAS'):\n table = pd.DataFrame(table)\n table = session.upload_frame(table, casout=casout)\n elif (upload and t_type == 'DataFrame' and s_type == 'SAS'):\n req_name = casout if isinstance(casout, str) else None\n upname = sasoptpy.utils.check_name(req_name, 'table')\n sasoptpy.utils.register_name(upname, table)\n table = session.df2sd(table, table=upname)\n\n t_type = type(table).__name__\n\n if type(table).__name__ == 'CASTable':\n tname = table.name\n elif type(table).__name__ == 'SASdata':\n tname = table.table\n elif type(table) == str:\n tname = table\n else:\n tname = str(table)\n\n pars = []\n dat = None\n\n if t_type == 'CASTable' or t_type == 'SASdata' or t_type == 'str':\n if not key or key == [None]:\n key = ['_N_']\n keyset = sasoptpy.data.Set(\n name='set_' + ('_'.join([str(i) for i in key])\n if key != ['_N_'] else tname + '_N'),\n settype=key_type)\n pars = []\n if columns is None:\n columns = table.columns.tolist()\n for col in columns:\n coltype = col_types.get(col, 'num')\n pars.append(sasoptpy.data.Parameter(name=col, keys=[keyset],\n p_type=coltype))\n\n dat = read_data(table, key_set=keyset, key_cols=key, params=[\n {'param': pars[i], 'column': columns[i]}\n for i in range(len(pars))])\n elif t_type == 'DataFrame':\n if key and key != [None] and key != ['_N_']:\n table = table.set_index(key)\n keyset = table.index.tolist()\n pars = []\n if columns is None:\n columns = list(table)\n for col in columns:\n pars.append(table[col])\n elif t_type == 'Series':\n keyset = table.index.tolist()\n pars = [table]\n else:\n print('ERROR: Data type is not recognized in read_table: {} ({})'\n .format(table, type(table)))\n return None\n\n if ref:\n return (keyset, pars, dat)\n elif not pars:\n return (keyset, pars)\n else:\n return keyset\n\n\ndef flatten_frame(df, swap=False):\n '''\n Converts a :class:`pandas.DataFrame` object into :class:`pandas.Series`\n\n Parameters\n ----------\n df : :class:`pandas.DataFrame` object\n DataFrame object to be flattened\n swap : boolean, optional\n Option to use columns as first index\n\n Returns\n -------\n :class:`pandas.DataFrame` object\n A new DataFrame where indices consist of index and columns names as\n tuples\n\n Examples\n --------\n\n >>> price = pd.DataFrame([\n >>> [1, 5, 7],\n >>> [8, 4, 3],\n >>> [5, 7, 9]], columns=[\\'period1\\', \\'period2\\', \\'period3\\']).\\\\\n >>> set_index([[\\'coal\\', \\'steel\\', \\'copper\\']])\n >>> print(\\'Price data: \\\\n{}\\'.format(price))\n >>> price_f = so.flatten_frame(price)\n >>> print(\\'Price data: \\\\n{}\\'.format(price_f))\n Price data:\n period1 period2 period3\n coal 1 5 7\n steel 8 4 3\n copper 5 7 9\n Price data:\n (coal, period1) 1\n (coal, period2) 5\n (coal, period3) 7\n (steel, period1) 8\n (steel, period2) 4\n (steel, period3) 3\n (copper, period1) 5\n (copper, period2) 7\n (copper, period3) 9\n dtype: int64\n\n '''\n new_frame = df.stack()\n if swap:\n new_frame = new_frame.swaplevel()\n new_frame.index = new_frame.index.to_series()\n return new_frame\n\n\ndef flatten_tuple(tp):\n '''\n Flattens nested tuples\n\n Parameters\n ----------\n\n tp : tuple\n Nested tuple to be flattened\n\n Returns\n -------\n Generator\n A generator object representing the flat tuple\n\n Examples\n --------\n >>> tp = (3, 4, (5, (1, 0), 2))\n >>> print(list(so.flatten_tuple(tp)))\n [3, 4, 5, 1, 0, 2]\n\n '''\n for elem in tp:\n if isinstance(elem, tuple):\n yield from flatten_tuple(elem)\n else:\n yield elem\n\n\ndef is_equal(a, b):\n '''\n Compares various sasoptpy object types\n '''\n return a == b\n\n\ndef print_model_mps(model):\n '''\n Prints the MPS representation of the model\n\n Parameters\n ----------\n model : :class:`Model`\n Model whose MPS format will be printed\n\n Examples\n --------\n >>> m = so.Model(name='print_example', session=s)\n >>> x = m.add_variable(lb=1, name='x')\n >>> y = m.add_variables(2, name='y', ub=3, vartype=so.INT)\n >>> m.add_constraint(x + y.sum('*') <= 9, name='c1')\n >>> m.add_constraints((x + y[i] >= 2 for i in [0, 1]), name='c2')\n >>> m.set_objective(x+3*y[0], sense=so.MAX, name='obj')\n >>> so.print_model_mps(m)\n NOTE: Initialized model print_example\n Field1 Field2 Field3 Field4 Field5 Field6 _id_\n 0 NAME print_example 0 0 1\n 1 ROWS 2\n 2 MAX obj 3\n 3 L c1 4\n 4 G c2_0 5\n 5 G c2_1 6\n 6 COLUMNS 7\n 7 x obj 1 8\n 8 x c1 1 9\n 9 x c2_0 1 10\n 10 x c2_1 1 11\n 11 MARK0000 'MARKER' 'INTORG' 12\n 12 y_0 obj 3 13\n 13 y_0 c1 1 14\n 14 y_0 c2_0 1 15\n 15 y_1 c1 1 16\n 16 y_1 c2_1 1 17\n 17 MARK0001 'MARKER' 'INTEND' 18\n 18 RHS 19\n 19 RHS c1 9 20\n 20 RHS c2_0 2 21\n 21 RHS c2_1 2 22\n 22 RANGES 23\n 23 BOUNDS 24\n 24 LO BND x 1 25\n 25 UP BND y_0 3 26\n 26 LO BND y_0 0 27\n 27 UP BND y_1 3 28\n 28 LO BND y_1 0 29\n 29 ENDATA 0 0 30\n\n See also\n --------\n :func:`sasoptpy.Model.to_frame`\n\n '''\n with pd.option_context('display.max_rows', None):\n print(model.to_frame())\n\n\ndef get_namespace():\n '''\n Prints details of components registered to the global name dictionary\n\n The list includes models, variables, constraints and expressions\n\n Returns\n -------\n string\n A string representation of the namespace\n '''\n s = 'Global namespace:'\n for c in [sasoptpy.model.Model, sasoptpy.components.VariableGroup,\n sasoptpy.components.ConstraintGroup,\n sasoptpy.components.Expression, sasoptpy.components.Variable,\n sasoptpy.components.Constraint]:\n s += '\\n\\t{}'.format(c.__name__)\n for i, k in enumerate(__namedict):\n if type(__namedict[k]['ref']) is c:\n s += '\\n\\t\\t{:4d} {:{width}} {}, {}'.format(\n i, k, type(__namedict[k]['ref']),\n repr(__namedict[k]['ref']),\n width=len(max(__namedict, key=len)))\n return s\n\n\ndef get_namedict():\n return __namedict\n\n\ndef set_namedict(ss):\n for i in ss:\n register_name(i, ss[i])\n\n\ndef get_len(i):\n '''\n Safe wrapper of len() function\n\n Returns\n -------\n int\n len(i) if parameter i has len() function defined, othwerwise 1\n '''\n try:\n return len(i)\n except TypeError:\n return 1\n\n\ndef _list_item(i):\n it = type(i)\n if it == list:\n return i\n else:\n return [i]\n\n\ndef _to_bracket(prefix, keys):\n if keys is None:\n return prefix\n else:\n s = prefix + '['\n k = tuple_pack(keys)\n s += ', '.join(_to_iterator_expression(k))\n s += ']'\n return s\n\n\ndef _to_quoted_string(item):\n if isinstance(item, int):\n return str(item)\n elif isinstance(item, str):\n return \"'{}'\".format(item)\n elif isinstance(item, tuple):\n return '<' + ','.join(_to_quoted_string(j) for j in item) + '>'\n else:\n return str(item)\n\n\ndef _set_abstract_values(row):\n '''\n Searches for the missing/abstract variable names and set their values\n '''\n orname = row['var'].split('[')[0]\n group = get_obj_by_name(orname)\n if group:\n keys = row['var'].split('[')[1].split(']')[0]\n keys = keys.split(',')\n keys = tuple(int(k) if k.isdigit() else k\n for k in keys)\n if keys in group._vardict:\n group[keys]._value = row['value']\n else:\n group.add_member(keys)._value = row['value']\n return True\n else:\n return False\n\n\ndef _sort_tuple(i):\n i = sasoptpy.utils.tuple_pack(i)\n key = (len(i),)\n for s in i:\n if isinstance(s, str):\n key += (0,)\n elif np.issubdtype(type(s), np.number):\n key += (1,)\n elif isinstance(s, tuple):\n key += (2,)\n key += i\n return(key)\n\n\ndef get_mutable(exp):\n '''\n Returns a mutable copy of the given expression if it is immutable\n\n Parameters\n ----------\n exp : :class:`Variable` or :class:`Expression`\n Object to be wrapped\n\n Returns\n -------\n :class:`Expression`\n Mutable copy of the expression, if the original is immutable\n '''\n if isinstance(exp, sasoptpy.components.Variable):\n r = sasoptpy.components.Expression(exp)\n else:\n r = exp\n r._abstract = exp._abstract\n return r\n\n\ndef get_solution_table(*argv, key=None, sort=True, rhs=False):\n '''\n Returns the requested variable names as a DataFrame table\n\n Parameters\n ----------\n key : list, optional\n Keys for objects\n sort : bool, optional\n Option for sorting the keys\n rhs : bool, optional\n Option for including constant values\n\n Returns\n -------\n :class:`pandas.DataFrame`\n DataFrame object that holds keys and values\n '''\n soltable = []\n listofkeys = []\n keylengths = []\n # Get dimension from first argv\n if(len(argv) == 0):\n return None\n\n if key is None:\n for i, _ in enumerate(argv):\n if isinstance(argv[i], Iterable):\n if isinstance(argv[i], sasoptpy.components.VariableGroup):\n currentkeylist = list(argv[i]._vardict.keys())\n for m in argv[i]._vardict:\n if argv[i]._vardict[m]._abstract:\n continue\n m = sasoptpy.utils.tuple_unpack(m)\n if m not in listofkeys:\n listofkeys.append(m)\n keylengths.append(sasoptpy.utils.list_length(\n currentkeylist[0]))\n elif isinstance(argv[i], sasoptpy.components.ConstraintGroup):\n currentkeylist = list(argv[i]._condict.keys())\n for m in argv[i]._condict:\n m = sasoptpy.utils.tuple_unpack(m)\n if m not in listofkeys:\n listofkeys.append(m)\n keylengths.append(sasoptpy.utils.list_length(\n currentkeylist[0]))\n elif (isinstance(argv[i], pd.Series) or\n (isinstance(argv[i], pd.DataFrame) and\n len(argv[i].columns) == 1)):\n # optinal method: converting to series, argv[i].iloc[0]\n currentkeylist = argv[i].index.values\n for m in currentkeylist:\n m = sasoptpy.utils.tuple_unpack(m)\n if m not in listofkeys:\n listofkeys.append(m)\n keylengths.append(sasoptpy.utils.list_length(\n currentkeylist[0]))\n elif isinstance(argv[i], pd.DataFrame):\n index_list = argv[i].index.tolist()\n col_list = argv[i].columns.tolist()\n for m in index_list:\n for n in col_list:\n current_key = sasoptpy.utils.tuple_pack(m)\n + sasoptpy.utils.tuple_pack(n)\n if current_key not in listofkeys:\n listofkeys.append(current_key)\n keylengths.append(sasoptpy.utils.list_length(\n current_key))\n elif isinstance(argv[i], dict):\n currentkeylist = list(argv[i].keys())\n for m in currentkeylist:\n m = sasoptpy.utils.tuple_unpack(m)\n if m not in listofkeys:\n listofkeys.append(m)\n keylengths.append(sasoptpy.utils.list_length(\n currentkeylist[0]))\n elif isinstance(argv[i], sasoptpy.components.Expression):\n if ('',) not in listofkeys:\n listofkeys.append(('',))\n keylengths.append(1)\n else:\n print('Unknown type: {} {}'.format(type(argv[i]), argv[i]))\n else:\n if ('',) not in listofkeys:\n listofkeys.append(('',))\n keylengths.append(1)\n\n if(sort):\n try:\n listofkeys = sorted(listofkeys,\n key=_sort_tuple)\n except TypeError:\n listofkeys = listofkeys\n\n maxk = max(keylengths)\n else:\n maxk = max(len(i) if isinstance(i, tuple) else 1 for i in key)\n listofkeys = key\n\n for k in listofkeys:\n if isinstance(k, tuple):\n row = list(k)\n else:\n row = [k]\n if sasoptpy.utils.list_length(k) < maxk:\n row.extend(['-']*(maxk-sasoptpy.utils.list_length(k)))\n for i, _ in enumerate(argv):\n if type(argv[i]) == sasoptpy.components.VariableGroup:\n tk = sasoptpy.utils.tuple_pack(k)\n if tk not in argv[i]._vardict or argv[i][tk]._abstract:\n val = '-'\n else:\n val = argv[i][tk].get_value()\n row.append(val)\n elif type(argv[i]) == sasoptpy.components.Variable:\n val = argv[i].get_value() if k == ('',) else '-'\n row.append(val)\n elif type(argv[i]) == sasoptpy.components.Constraint:\n val = argv[i].get_value(rhs=rhs) if k == ('',) else '-'\n row.append(val)\n elif type(argv[i]) == sasoptpy.components.ConstraintGroup:\n tk = sasoptpy.utils.tuple_pack(k)\n val = argv[i][tk].get_value()\\\n if tk in argv[i]._condict else '-'\n row.append(val)\n elif type(argv[i]) == sasoptpy.components.Expression:\n val = argv[i].get_value() if k == ('',) else '-'\n row.append(val)\n elif type(argv[i]) == pd.Series:\n if k in argv[i].index.tolist():\n if type(argv[i][k]) == sasoptpy.components.Expression:\n val = argv[i][k].get_value()\n else:\n val = argv[i][k]\n else:\n val = '-'\n row.append(val)\n elif (type(argv[i]) == pd.DataFrame and\n len(argv[i].columns) == 1):\n for j in argv[i]:\n if k in argv[i].index.tolist():\n cellv = argv[i].loc[k, j]\n if type(cellv) == pd.Series:\n cellv = cellv.iloc[0]\n if type(cellv) == sasoptpy.components.Expression:\n row.append(cellv.get_value())\n else:\n row.append(argv[i].ix[k, j])\n elif sasoptpy.tuple_pack(k) in argv[i].index.tolist():\n tk = sasoptpy.tuple_pack(k)\n cellv = argv[i].loc[tk, j]\n if type(cellv) == pd.Series:\n cellv = cellv.iloc[0]\n if type(cellv) == sasoptpy.components.Expression:\n row.append(cellv.get_value())\n else:\n row.append(argv[i].ix[tk, j])\n else:\n row.append('-')\n elif type(argv[i]) == pd.DataFrame:\n arg_series = argv[i].stack()\n arg_series.index = arg_series.index.to_series()\n if k in arg_series.index.values.tolist():\n if type(arg_series[k]) == sasoptpy.components.Expression:\n val = arg_series[k].get_value()\n else:\n val = arg_series[k]\n else:\n val = '-'\n row.append(val)\n elif type(argv[i]) == sasoptpy.data.ImplicitVar:\n tk = sasoptpy.utils.tuple_pack(k)\n if tk in argv[i]._dict:\n row.append(argv[i][tk].get_value())\n else:\n row.append('-')\n elif isinstance(argv[i], dict):\n if k in argv[i]:\n tk = sasoptpy.utils.tuple_pack(k)\n if type(argv[i][tk]) == sasoptpy.components.Expression:\n row.append(argv[i][tk].get_value())\n elif np.issubdtype(type(argv[i][tk]), np.number):\n row.append(argv[i][tk])\n else:\n row.append('-')\n else:\n row.append('-')\n else:\n try:\n row.append(str(argv[i][k]))\n except TypeError:\n row.append('-')\n soltable.append(row)\n indexlen = len(soltable[0])-len(argv)\n indexcols = [i+1 for i in range(indexlen)]\n inputcols = []\n for a in argv:\n if isinstance(a, pd.DataFrame) and len(a.columns.tolist()) == 1:\n inputcols.extend(a.columns.values.tolist())\n else:\n try:\n inputcols.append(a._name)\n except AttributeError:\n if isinstance(a, pd.DataFrame):\n inputcols.append('DataFrame')\n elif isinstance(a, dict):\n inputcols.append('dict')\n else:\n inputcols.append('arg: {}'.format(a))\n colnames = indexcols + inputcols\n soltablep = pd.DataFrame(soltable, columns=colnames)\n soltablep2 = soltablep.set_index(indexcols)\n pd.set_option('display.multi_sparse', False)\n return soltablep2\n\n\ndef union(*args):\n '''\n Returns a union of :class:`Set`, list or set objects\n '''\n type0 = type(args[0])\n for i in args:\n if type(i) != type0:\n print('ERROR: Cannot perform union on {} {} objects'.format(\n type0, type(i)))\n return None\n if type0 == sasoptpy.data.Set:\n r = sasoptpy.components.Expression()\n names = tuple(i._name for i in args)\n refs = [i for i in args]\n r._linCoef[names] = {\n 'ref': refs,\n 'val': 1.0,\n 'op': 'union'\n }\n r._abstract = True\n return r\n elif type0 == list:\n r = []\n for i in args:\n r += i\n return r\n elif type0 == set:\n r = set()\n for i in args:\n r += i\n return r\n\n\ndef wrap(e, abstract=False):\n '''\n Wraps expression inside another expression\n '''\n wrapper = sasoptpy.components.Expression()\n if hasattr(e, '_name'):\n name = e._name\n else:\n name = check_name(None, 'expr')\n if isinstance(e, sasoptpy.components.Expression):\n wrapper._linCoef[name] = {'ref': e, 'val': 1.0}\n wrapper._abstract = e._abstract or abstract\n elif isinstance(e, dict):\n wrapper._linCoef[name] = {**e}\n return wrapper\n\n\ndef _py_symbol(symbol):\n if symbol == '^':\n return '**'\n else:\n return symbol\n\n\ndef _to_iterator_expression(itlist):\n strlist = []\n for i in itlist:\n if isinstance(i, sasoptpy.components.Expression):\n strlist.append(i._expr())\n elif isinstance(i, str):\n strlist.append(\"'{}'\".format(i))\n else:\n strlist.append(str(i))\n return strlist\n",
"import sasoptpy as so\nimport pandas as pd\n\n\ndef test(cas_conn):\n\n m = so.Model(name='mining_optimization', session=cas_conn)\n\n mine_data = pd.DataFrame([\n ['mine1', 5, 2, 1.0],\n ['mine2', 4, 2.5, 0.7],\n ['mine3', 4, 1.3, 1.5],\n ['mine4', 5, 3, 0.5],\n ], columns=['mine', 'cost', 'extract_ub', 'quality']).\\\n set_index(['mine'])\n\n year_data = pd.DataFrame([\n [1, 0.9],\n [2, 0.8],\n [3, 1.2],\n [4, 0.6],\n [5, 1.0],\n ], columns=['year', 'quality_required']).set_index(['year'])\n\n max_num_worked_per_year = 3\n revenue_per_ton = 10\n discount_rate = 0.10\n\n MINES = mine_data.index.tolist()\n cost = mine_data['cost']\n extract_ub = mine_data['extract_ub']\n quality = mine_data['quality']\n YEARS = year_data.index.tolist()\n quality_required = year_data['quality_required']\n\n isOpen = m.add_variables(MINES, YEARS, vartype=so.BIN, name='isOpen')\n isWorked = m.add_variables(MINES, YEARS, vartype=so.BIN, name='isWorked')\n extract = m.add_variables(MINES, YEARS, lb=0, name='extract')\n [extract[i, j].set_bounds(ub=extract_ub[i]) for i in MINES for j in YEARS]\n\n extractedPerYear = {j: extract.sum('*', j) for j in YEARS}\n discount = {j: 1 / (1+discount_rate) ** (j-1) for j in YEARS}\n\n totalRevenue = revenue_per_ton *\\\n so.quick_sum(discount[j] * extractedPerYear[j] for j in YEARS)\n totalCost = so.quick_sum(discount[j] * cost[i] * isOpen[i, j]\n for i in MINES for j in YEARS)\n m.set_objective(totalRevenue-totalCost, sense=so.MAX, name='totalProfit')\n\n m.add_constraints((extract[i, j] <= extract[i, j]._ub * isWorked[i, j]\n for i in MINES for j in YEARS), name='link')\n\n m.add_constraints((isWorked.sum('*', j) <= max_num_worked_per_year\n for j in YEARS), name='cardinality')\n\n m.add_constraints((isWorked[i, j] <= isOpen[i, j] for i in MINES\n for j in YEARS), name='worked_implies_open')\n\n m.add_constraints((isOpen[i, j] <= isOpen[i, j-1] for i in MINES\n for j in YEARS if j != 1), name='continuity')\n\n m.add_constraints((so.quick_sum(quality[i] * extract[i, j] for i in MINES)\n == quality_required[j] * extractedPerYear[j]\n for j in YEARS), name='quality_con')\n\n res = m.solve()\n if res is not None:\n print(so.get_solution_table(isOpen, isWorked, extract))\n quality_sol = {j: so.quick_sum(quality[i] * extract[i, j].get_value()\n for i in MINES)\n / extractedPerYear[j].get_value() for j in YEARS}\n qs = so.dict_to_frame(quality_sol, ['quality_sol'])\n epy = so.dict_to_frame(extractedPerYear, ['extracted_per_year'])\n print(so.get_solution_table(epy, qs, quality_required))\n\n return m.get_objective_value()\n"
] |
[
[
"pandas.set_option",
"pandas.DataFrame.from_dict",
"pandas.DataFrame",
"pandas.MultiIndex.from_tuples",
"pandas.option_context"
],
[
"pandas.DataFrame"
]
] |
Sha-Lab/SynPo
|
[
"8ac35a01d2c810187b9c14b914bcb792ed73caa9"
] |
[
"train_gridworld.py"
] |
[
"import logging\nimport argparse\nimport ipdb\nimport random\nfrom datetime import datetime\nfrom itertools import product\nfrom tqdm import tqdm\nimport numpy as np\nimport pickle\nfrom IPython import embed\nfrom ipdb import slaunch_ipdb_on_exception\n\nfrom synpo.agent import *\nfrom synpo.component import *\nfrom synpo.utils import *\nimport synpo.gridworld as gridworld\n\nfrom synpo.utils import mkdir, set_seed\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu_id', default=0, type=int)\nparser.add_argument('--batch_size', default=128, type=int)\nparser.add_argument('--weight', default=None, type=str)\nparser.add_argument('--scene', default=None, type=int)\nparser.add_argument('--task', default=None, type=int)\nparser.add_argument('--embedding_dim', default=128, type=int)\nparser.add_argument('--scene_embedding_dim', default=128, type=int)\nparser.add_argument('--task_embedding_dim', default=128, type=int)\nparser.add_argument('--num_obj_types', default=5, type=int)\nparser.add_argument('--task_length', default=2, type=int)\nparser.add_argument('--update_interval', default=1, type=int)\nparser.add_argument('--scene_num', default=5, type=int)\nparser.add_argument('--task_num', default=5, type=int)\nparser.add_argument('--reward_prediction', default=1, type=int,\n help=\"loss weight of reward prediction objective\")\nparser.add_argument('--scene_disentanglement', default=0.1, type=float, \n help=\"loss weight of scene disentanglement prediction objective\")\nparser.add_argument('--task_disentanglement', default=0.1, type=float,\n help=\"loss weight of task disentanglement prediction objective\")\nparser.add_argument('--split_filepath', default=None, type=str, required=True,\n help=\"train/test split filepath\")\nparser.add_argument('--lr', default=0.001, type=float,\n help=\"base learning rate\")\nparser.add_argument('--wd', action='store_true', \n help=\"enable weight decay\")\nparser.add_argument('--mode', default='cloning', choices=['cloning'],\n help=\"training mode [only behavior cloing available for now]\")\nparser.add_argument('--network', default='synpo', choices=['mlp', 'mtl', 'synpo'],\n help=\"select model architecture\")\nparser.add_argument('--postfix', default='', type=str,\n help=\"postfix to the log file\")\nparser.add_argument('--repeat', default=10, type=int,\n help=\"number of test run\")\nparser.add_argument('--evaluate', action='store_true',\n help=\"evaluation mode\")\nparser.add_argument('--visualize', action='store_true',\n help=\"visualize policy [only in evaluation mode]\")\nparser.add_argument('--random_seed', default=0, type=int,\n help=\"random seed value\")\nparser.add_argument('--logger_name', default='log/synpo_{}_{}_{}_{}.log', type=str,\n help=\"logger name format [must have for slots to fill]\")\nparser.add_argument('--norm', action='store_true',\n help=\"whether normalize the scene/task embedding\")\nparser.add_argument('--extend_mode', action='store_true',\n help=\"train on the first (10 ENV, 10 TASK) combinations.\")\nargs = parser.parse_args()\n\ndef get_network(task):\n arg_dim = task.env.observation_space.spaces[1].shape[0]\n grid_dim = task.env.observation_space.spaces[0].shape[0]\n action_dim = task.env.action_space.n\n if args.network == 'mlp':\n network = GridWorldMLP(grid_dim, action_dim, arg_dim, \n scene_num=args.scene_num,\n task_num=args.task_num,\n embed_dim=args.embedding_dim, \n scene_dim=args.scene_embedding_dim, \n task_dim=args.task_embedding_dim,\n gpu=args.gpu_id, \n scene_disentanglement=args.scene_disentanglement, \n task_disentanglement=args.task_disentanglement,\n norm=args.norm)\n elif args.network == 'mtl':\n network = GridWorldMTL(grid_dim, action_dim, arg_dim, \n scene_num=args.scene_num,\n task_num=args.task_num,\n embed_dim=args.embedding_dim,\n scene_dim=args.scene_embedding_dim, \n task_dim=args.task_embedding_dim,\n gpu=args.gpu_id, \n scene_disentanglement=args.scene_disentanglement, \n task_disentanglement=args.task_disentanglement,\n norm=args.norm)\n elif args.network == 'synpo':\n network = GridWorldSynPo(grid_dim, action_dim, arg_dim, \n scene_num=args.scene_num,\n task_num=args.task_num,\n embed_dim=args.embedding_dim,\n scene_dim=args.scene_embedding_dim,\n task_dim=args.task_embedding_dim,\n gpu=args.gpu_id,\n norm=args.norm)\n else:\n raise ValueError('Non-supported Network')\n return network\n\ndef gridworld_behaviour_cloning(args, layouts, train_combos, test_combos):\n config = Config()\n grid_world_task = GridWorldTask(layouts,\n num_obj_types=args.num_obj_types,\n task_length=args.task_length,\n history_length= config.history_length,\n train_combos=train_combos,\n test_combos=test_combos)\n config.task_fn = lambda: grid_world_task\n if args.wd: \n print('with weight decay!')\n config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=args.lr, weight_decay=10e-5)\n else:\n print('without weight decay!')\n config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=args.lr)\n\n network = get_network(grid_world_task)\n if args.weight is not None: network.load_state_dict(torch.load(args.weight)['best_model_weight'])\n \n print(network)\n\n config.network_fn = lambda: network\n config.replay_fn = lambda: TrajectoryReplay(memory_size=20000,\n max_length=200,\n batch_size=64) # number of trajectory per batch\n config.policy_fn = lambda: GreedyPolicy(epsilon=0.1,\n final_step=500000,\n min_epsilon=0.0)\n config.logger = Logger('./log', logger)\n config.test_interval = 2000\n config.exploration_steps = 50000\n config.postfix = args.postfix\n config.tag = network.__class__.__name__\n config.update_interval = 1 # preset\n config.scene_disentanglement_coeff = args.scene_disentanglement\n config.task_disentanglement_coeff = args.task_disentanglement\n return GridBehaviourCloning(config)\n\nif __name__ == '__main__':\n mkdir('data')\n mkdir('log')\n os.system('export OMP_NUM_THREADS=1')\n\n if args.extend_mode: # Hardcoding numbers of scenes and tasks for training\n args.scene_num = 10\n args.task_num = 10\n\n set_seed(args.random_seed, c=args.random_seed)\n if args.split_filepath is None: # Default Multi-task Setting\n layouts = ['map{}'.format(i) for i in range(0, 20) ]\n train_combos = [(i, j) for i, j in product(range(args.scene_num), range(args.task_num))]\n test_combos = [(i, j) for i, j in product(range(args.scene_num), range(args.task_num))]\n else:\n with open(args.split_filepath, 'rb') as handle:\n data = pickle.load(handle)\n args.task_num = data['task_num']\n args.scene_num = data['scene_num']\n train_combos = data['train_combos']\n test_combos = data['test_combos']\n layouts = data['layouts']\n print('num train:', len(train_combos), 'num test:', len(test_combos))\n\n if args.mode == 'cloning':\n print('Loading Episodic Behavior Cloning')\n agent = gridworld_behaviour_cloning(args, layouts, train_combos, test_combos)\n\n agent.reward_prediction = args.reward_prediction\n if args.split_filepath is None: # Default Multi-task Setting\n agent.split_name = 'MTL'\n else:\n agent.split_name = \"-\".join(args.split_filepath.split('/')[-2:])\n if args.evaluate:\n with slaunch_ipdb_on_exception():\n traj_length = []\n if args.scene is not None or args.task is not None:\n if args.scene is not None and args.task is None:\n index_scene = args.scene\n index_task = random.sample([x[1] for x in train_combos if x[0] == args.scene], 1)[0]\n else:\n index_scene = args.scene if args.scene is not None else np.random.randint(args.scene_num)\n index_task = args.task if args.task is not None else np.random.randint(args.task_num)\n for _ in tqdm(range(args.repeat)):\n success, traj_len, _, _ = agent.evaluate(visualize=args.visualize, \n index=(index_scene, index_task)) # main program\n if success: \n traj_length.append(traj_len)\n print('mean length:', np.mean(traj_length))\n else:\n rates = []\n for combo in train_combos:\n success_list = []\n trajectory_list = []\n for _ in tqdm(range(args.repeat)):\n success, traj_len, _ = agent.evaluate(visualize=args.visualize, index=combo) # main program\n success_list.append(success)\n trajectory_list.append(traj_len)\n success_rate = sum(success_list) / len(success_list)\n rates.append(success_rate)\n print('* [Task={}, # of Tests={}] Average success rate: {:.4f}, Average trajectory length: {}'.format( combo, args.repeat,\n success_rate, sum(trajectory_list) / len(trajectory_list) ))\n print('average success rate: {:.4f}'.format(np.mean(rates)))\n else:\n logger.setLevel(logging.INFO)\n handler = logging.FileHandler(args.logger_name.format(agent.__class__.__name__,\n agent.learning_network.__class__.__name__,\n datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\"),\n args.postfix))\n logger.addHandler(handler)\n with slaunch_ipdb_on_exception():\n train_agent(agent) # main program\n\n"
] |
[
[
"numpy.random.randint",
"numpy.mean"
]
] |
Highroad-Consulting/keras-yolo3
|
[
"52323cfd21040e96e53ae106a979841c2252a05b"
] |
[
"yolo3/model.py"
] |
[
"\"\"\"YOLO_v3 Model Defined in Keras.\"\"\"\n\nfrom functools import wraps\n\nimport numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model\nfrom keras.regularizers import l2\n\nfrom yolo3.utils import compose\n\n\n@wraps(Conv2D)\ndef DarknetConv2D(*args, **kwargs):\n \"\"\"Wrapper to set Darknet parameters for Convolution2D.\"\"\"\n darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}\n darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'\n darknet_conv_kwargs.update(kwargs)\n return Conv2D(*args, **darknet_conv_kwargs)\n\ndef DarknetConv2D_BN_Leaky(*args, **kwargs):\n \"\"\"Darknet Convolution2D followed by BatchNormalization and LeakyReLU.\"\"\"\n no_bias_kwargs = {'use_bias': False}\n no_bias_kwargs.update(kwargs)\n return compose(\n DarknetConv2D(*args, **no_bias_kwargs),\n BatchNormalization(),\n LeakyReLU(alpha=0.1))\n\ndef resblock_body(x, num_filters, num_blocks):\n '''A series of resblocks starting with a downsampling Convolution2D'''\n # Darknet uses left and top padding instead of 'same' mode\n x = ZeroPadding2D(((1,0),(1,0)))(x)\n x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x)\n for i in range(num_blocks):\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters//2, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x)\n x = Add()([x,y])\n return x\n\ndef darknet_body(x):\n '''Darknent body having 52 Convolution2D layers'''\n x = DarknetConv2D_BN_Leaky(32, (3,3))(x)\n x = resblock_body(x, 64, 1)\n x = resblock_body(x, 128, 2)\n x = resblock_body(x, 256, 8)\n x = resblock_body(x, 512, 8)\n x = resblock_body(x, 1024, 4)\n return x\n\ndef make_last_layers(x, num_filters, out_filters):\n '''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''\n x = compose(\n DarknetConv2D_BN_Leaky(num_filters, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D_BN_Leaky(num_filters, (1,1)),\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)\n y = compose(\n DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),\n DarknetConv2D(out_filters, (1,1)))(x)\n return x, y\n\n\ndef yolo_body(inputs, num_anchors, num_classes):\n \"\"\"\n Create YOLO_V3 model CNN body in Keras.\n Fine-Grained Feature\n \"\"\"\n darknet = Model(inputs, darknet_body(inputs))\n x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5)) # Here should be num_anchors*5 + num_classes?\n\n x = compose(\n DarknetConv2D_BN_Leaky(256, (1,1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x,darknet.layers[152].output])\n x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5))\n\n x = compose(\n DarknetConv2D_BN_Leaky(128, (1,1)),\n UpSampling2D(2))(x)\n x = Concatenate()([x,darknet.layers[92].output])\n x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))\n\n return Model(inputs, [y1,y2,y3])\n\ndef tiny_yolo_body(inputs, num_anchors, num_classes):\n '''Create Tiny YOLO_v3 model CNN body in keras.'''\n x1 = compose(\n DarknetConv2D_BN_Leaky(16, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(32, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(64, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(128, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(256, (3,3)))(inputs)\n x2 = compose(\n MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),\n DarknetConv2D_BN_Leaky(512, (3,3)),\n MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='same'),\n DarknetConv2D_BN_Leaky(1024, (3,3)),\n DarknetConv2D_BN_Leaky(256, (1,1)))(x1)\n y1 = compose(\n DarknetConv2D_BN_Leaky(512, (3,3)),\n DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2)\n\n x2 = compose(\n DarknetConv2D_BN_Leaky(128, (1,1)),\n UpSampling2D(2))(x2)\n y2 = compose(\n Concatenate(),\n DarknetConv2D_BN_Leaky(256, (3,3)),\n DarknetConv2D(num_anchors*(num_classes+5), (1,1)))([x2,x1])\n\n return Model(inputs, [y1,y2])\n\n\ndef yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):\n \"\"\"Convert final layer features to bounding box parameters.\"\"\"\n num_anchors = len(anchors)\n # Reshape to batch, height, width, num_anchors, box_params.\n anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])\n\n grid_shape = K.shape(feats)[1:3] # height, width\n grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),\n [1, grid_shape[1], 1, 1])\n grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),\n [grid_shape[0], 1, 1, 1])\n grid = K.concatenate([grid_x, grid_y])\n grid = K.cast(grid, K.dtype(feats))\n\n feats = K.reshape(\n feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])\n\n # Adjust preditions to each spatial grid point and anchor size.\n box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))\n box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))\n\n if calc_loss == True:\n return grid, feats, box_xy, box_wh\n\n box_confidence = K.sigmoid(feats[..., 4:5])\n box_class_probs = K.sigmoid(feats[..., 5:])\n\n return box_xy, box_wh, box_confidence, box_class_probs\n\n\ndef yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):\n '''Get corrected boxes'''\n box_yx = box_xy[..., ::-1]\n box_hw = box_wh[..., ::-1]\n input_shape = K.cast(input_shape, K.dtype(box_yx))\n image_shape = K.cast(image_shape, K.dtype(box_yx))\n new_shape = K.round(image_shape * K.min(input_shape/image_shape))\n offset = (input_shape-new_shape)/2./input_shape\n scale = input_shape/new_shape\n box_yx = (box_yx - offset) * scale\n box_hw *= scale\n\n box_mins = box_yx - (box_hw / 2.)\n box_maxes = box_yx + (box_hw / 2.)\n boxes = K.concatenate([\n box_mins[..., 0:1], # y_min\n box_mins[..., 1:2], # x_min\n box_maxes[..., 0:1], # y_max\n box_maxes[..., 1:2] # x_max\n ])\n\n # Scale boxes back to original image shape.\n boxes *= K.concatenate([image_shape, image_shape])\n return boxes\n\n\ndef yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):\n '''Process Conv layer output'''\n box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats,\n anchors, num_classes, input_shape)\n boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)\n boxes = K.reshape(boxes, [-1, 4])\n box_scores = box_confidence * box_class_probs\n box_scores = K.reshape(box_scores, [-1, num_classes])\n return boxes, box_scores\n\n\ndef yolo_eval(yolo_outputs,\n anchors,\n num_classes,\n image_shape,\n max_boxes=20,\n score_threshold=.6,\n iou_threshold=.5):\n \"\"\"Evaluate YOLO model on given input and return filtered boxes.\"\"\"\n num_layers = len(yolo_outputs)\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting\n input_shape = K.shape(yolo_outputs[0])[1:3] * 32\n boxes = []\n box_scores = []\n for l in range(num_layers):\n _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape, image_shape)\n boxes.append(_boxes)\n box_scores.append(_box_scores)\n boxes = K.concatenate(boxes, axis=0)\n box_scores = K.concatenate(box_scores, axis=0)\n\n mask = box_scores >= score_threshold\n max_boxes_tensor = K.constant(max_boxes, dtype='int32')\n boxes_ = []\n scores_ = []\n classes_ = []\n for c in range(num_classes):\n # TODO: use keras backend instead of tf.\n class_boxes = tf.boolean_mask(boxes, mask[:, c])\n class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])\n nms_index = tf.image.non_max_suppression(\n class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)\n class_boxes = K.gather(class_boxes, nms_index)\n class_box_scores = K.gather(class_box_scores, nms_index)\n classes = K.ones_like(class_box_scores, 'int32') * c\n boxes_.append(class_boxes)\n scores_.append(class_box_scores)\n classes_.append(classes)\n boxes_ = K.concatenate(boxes_, axis=0)\n scores_ = K.concatenate(scores_, axis=0)\n classes_ = K.concatenate(classes_, axis=0)\n\n return boxes_, scores_, classes_\n\n\ndef preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):\n '''Preprocess true boxes to training input format\n\n Parameters\n ----------\n true_boxes: array, shape=(m, T, 5)\n Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.\n input_shape: array-like, hw, multiples of 32\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n\n Returns\n -------\n y_true: list of array, shape like yolo_outputs, xywh are reletive value\n\n '''\n assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes'\n num_layers = len(anchors)//3 # default setting\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]\n\n true_boxes = np.array(true_boxes, dtype='float32')\n input_shape = np.array(input_shape, dtype='int32')\n boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2\n boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]\n true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]\n true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]\n\n m = true_boxes.shape[0]\n grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]\n y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),\n dtype='float32') for l in range(num_layers)]\n\n # Expand dim to apply broadcasting.\n anchors = np.expand_dims(anchors, 0)\n anchor_maxes = anchors / 2.\n anchor_mins = -anchor_maxes\n valid_mask = boxes_wh[..., 0]>0\n\n for b in range(m): # loop of true boxes\n # Discard zero rows.\n wh = boxes_wh[b, valid_mask[b]]\n if len(wh)==0: continue\n # Expand dim to apply broadcasting.\n wh = np.expand_dims(wh, -2)\n box_maxes = wh / 2.\n box_mins = -box_maxes\n\n intersect_mins = np.maximum(box_mins, anchor_mins)\n intersect_maxes = np.minimum(box_maxes, anchor_maxes)\n intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n box_area = wh[..., 0] * wh[..., 1]\n anchor_area = anchors[..., 0] * anchors[..., 1]\n iou = intersect_area / (box_area + anchor_area - intersect_area)\n\n # Find best anchor for each true box\n best_anchor = np.argmax(iou, axis=-1)\n\n for t, n in enumerate(best_anchor): # loop of best anchors\n for l in range(num_layers): # loop of 3 output layers\n if n in anchor_mask[l]: # put y_true from true_box info\n i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32')\n j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32')\n k = anchor_mask[l].index(n)\n c = true_boxes[b,t, 4].astype('int32')\n y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4] # put x, y, w, h\n y_true[l][b, j, i, k, 4] = 1 # put 1 if an object eixsts.\n y_true[l][b, j, i, k, 5+c] = 1 # put 1 if an class matches\n\n return y_true\n\n\ndef box_iou(b1, b2):\n '''Return iou tensor\n\n Parameters\n ----------\n b1: tensor, shape=(i1,...,iN, 4), xywh\n b2: tensor, shape=(j, 4), xywh\n\n Returns\n -------\n iou: tensor, shape=(i1,...,iN, j)\n\n '''\n\n # Expand dim to apply broadcasting.\n b1 = K.expand_dims(b1, -2)\n b1_xy = b1[..., :2]\n b1_wh = b1[..., 2:4]\n b1_wh_half = b1_wh/2.\n b1_mins = b1_xy - b1_wh_half\n b1_maxes = b1_xy + b1_wh_half\n\n # Expand dim to apply broadcasting.\n b2 = K.expand_dims(b2, 0)\n b2_xy = b2[..., :2]\n b2_wh = b2[..., 2:4]\n b2_wh_half = b2_wh/2.\n b2_mins = b2_xy - b2_wh_half\n b2_maxes = b2_xy + b2_wh_half\n\n intersect_mins = K.maximum(b1_mins, b2_mins)\n intersect_maxes = K.minimum(b1_maxes, b2_maxes)\n intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n b1_area = b1_wh[..., 0] * b1_wh[..., 1]\n b2_area = b2_wh[..., 0] * b2_wh[..., 1]\n iou = intersect_area / (b1_area + b2_area - intersect_area)\n\n return iou\n\n\ndef yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False):\n '''Return yolo_loss tensor\n\n Parameters\n ----------\n yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body\n y_true: list of array, the output of preprocess_true_boxes\n anchors: array, shape=(N, 2), wh\n num_classes: integer\n ignore_thresh: float, the iou threshold whether to ignore object confidence loss\n\n Returns\n -------\n loss: tensor, shape=(1,)\n\n '''\n num_layers = len(anchors)//3 # default setting, basically 3 for yolo or 2 for tiny yolo.\n yolo_outputs = args[:num_layers]\n y_true = args[num_layers:]\n anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]\n input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))\n grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)]\n loss = 0\n m = K.shape(yolo_outputs[0])[0] # batch size, tensor\n mf = K.cast(m, K.dtype(yolo_outputs[0]))\n\n for l in range(num_layers): # predicts x, y, w, h, box confidence, and class confidence in EACH LAYERS\n object_mask = y_true[l][..., 4:5]\n true_class_probs = y_true[l][..., 5:]\n\n grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],\n anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True) # use 3 boundary boxes from bigger ones.\n pred_box = K.concatenate([pred_xy, pred_wh])\n\n # Darknet raw box to calculate loss.\n raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid\n raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])\n raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf\n box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]\n\n # Find ignore mask, iterate over each of batch.\n ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)\n object_mask_bool = K.cast(object_mask, 'bool')\n def loop_body(b, ignore_mask):\n true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])\n iou = box_iou(pred_box[b], true_box)\n best_iou = K.max(iou, axis=-1)\n ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))\n return b+1, ignore_mask\n _, ignore_mask = K.control_flow_ops.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])\n ignore_mask = ignore_mask.stack()\n ignore_mask = K.expand_dims(ignore_mask, -1)\n\n # K.binary_crossentropy is helpful to avoid exp overflow.\n xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)\n wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])\n # Remarks: confidence_loss, the former term: object exists, the later term : object doesn't exist\n confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \\\n (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask\n class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)\n\n xy_loss = K.sum(xy_loss) / mf\n wh_loss = K.sum(wh_loss) / mf\n confidence_loss = K.sum(confidence_loss) / mf\n class_loss = K.sum(class_loss) / mf\n loss += xy_loss + wh_loss + confidence_loss + class_loss\n if print_loss:\n loss = tf.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(ignore_mask)], message='loss: ')\n return loss\n"
] |
[
[
"numpy.array",
"tensorflow.image.non_max_suppression",
"numpy.minimum",
"numpy.argmax",
"numpy.floor",
"tensorflow.boolean_mask",
"numpy.expand_dims",
"numpy.maximum"
]
] |
ayan-iiitd/scikit-learn
|
[
"02dcedf9ad553a85f43570e78ccbdbb1cea46ef8"
] |
[
"sklearn/utils/estimator_checks.py"
] |
[
"import types\nimport warnings\nimport pickle\nimport re\nfrom copy import deepcopy\nfrom functools import partial, wraps\nfrom inspect import signature\n\nimport numpy as np\nfrom scipy import sparse\nfrom scipy.stats import rankdata\nimport joblib\n\nfrom . import IS_PYPY\nfrom .. import config_context\nfrom ._testing import _get_args\nfrom ._testing import assert_raise_message\nfrom ._testing import assert_array_equal\nfrom ._testing import assert_array_almost_equal\nfrom ._testing import assert_allclose\nfrom ._testing import assert_allclose_dense_sparse\nfrom ._testing import set_random_state\nfrom ._testing import SkipTest\nfrom ._testing import ignore_warnings\nfrom ._testing import create_memmap_backed_data\nfrom ._testing import raises\nfrom . import is_scalar_nan\n\nfrom ..linear_model import LogisticRegression\nfrom ..linear_model import Ridge\n\nfrom ..base import (\n clone,\n ClusterMixin,\n is_classifier,\n is_regressor,\n is_outlier_detector,\n RegressorMixin,\n _is_pairwise,\n)\n\nfrom ..metrics import accuracy_score, adjusted_rand_score, f1_score\nfrom ..random_projection import BaseRandomProjection\nfrom ..feature_selection import SelectKBest\nfrom ..pipeline import make_pipeline\nfrom ..exceptions import DataConversionWarning\nfrom ..exceptions import NotFittedError\nfrom ..exceptions import SkipTestWarning\nfrom ..model_selection import train_test_split\nfrom ..model_selection import ShuffleSplit\nfrom ..model_selection._validation import _safe_split\nfrom ..metrics.pairwise import (rbf_kernel, linear_kernel, pairwise_distances)\n\nfrom .import shuffle\nfrom ._tags import (\n _DEFAULT_TAGS,\n _safe_tags,\n)\nfrom .validation import has_fit_parameter, _num_samples\nfrom ..preprocessing import StandardScaler\nfrom ..preprocessing import scale\nfrom ..datasets import (\n load_iris,\n make_blobs,\n make_multilabel_classification,\n make_regression,\n)\n\nREGRESSION_DATASET = None\nCROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']\n\n\ndef _yield_checks(estimator):\n name = estimator.__class__.__name__\n tags = _safe_tags(estimator)\n pairwise = _is_pairwise(estimator)\n\n yield check_no_attributes_set_in_init\n yield check_estimators_dtypes\n yield check_fit_score_takes_y\n yield check_sample_weights_pandas_series\n yield check_sample_weights_not_an_array\n yield check_sample_weights_list\n yield check_sample_weights_shape\n if has_fit_parameter(estimator, \"sample_weight\") and not pairwise:\n # We skip pairwise because the data is not pairwise\n yield partial(check_sample_weights_invariance, kind='ones')\n yield partial(check_sample_weights_invariance, kind='zeros')\n yield check_estimators_fit_returns_self\n yield partial(check_estimators_fit_returns_self, readonly_memmap=True)\n\n # Check that all estimator yield informative messages when\n # trained on empty datasets\n if not tags[\"no_validation\"]:\n yield check_complex_data\n yield check_dtype_object\n yield check_estimators_empty_data_messages\n\n if name not in CROSS_DECOMPOSITION:\n # cross-decomposition's \"transform\" returns X and Y\n yield check_pipeline_consistency\n\n if not tags[\"allow_nan\"] and not tags[\"no_validation\"]:\n # Test that all estimators check their input for NaN's and infs\n yield check_estimators_nan_inf\n\n if pairwise:\n # Check that pairwise estimator throws error on non-square input\n yield check_nonsquare_error\n\n yield check_estimators_overwrite_params\n if hasattr(estimator, 'sparsify'):\n yield check_sparsify_coefficients\n\n yield check_estimator_sparse_data\n\n # Test that estimators can be pickled, and once pickled\n # give the same answer as before.\n yield check_estimators_pickle\n\n yield check_estimator_get_tags_default_keys\n\ndef _yield_classifier_checks(classifier):\n tags = _safe_tags(classifier)\n\n # test classifiers can handle non-array data and pandas objects\n yield check_classifier_data_not_an_array\n # test classifiers trained on a single label always return this label\n yield check_classifiers_one_label\n yield check_classifiers_classes\n yield check_estimators_partial_fit_n_features\n if tags[\"multioutput\"]:\n yield check_classifier_multioutput\n # basic consistency testing\n yield check_classifiers_train\n yield partial(check_classifiers_train, readonly_memmap=True)\n yield partial(check_classifiers_train, readonly_memmap=True,\n X_dtype='float32')\n yield check_classifiers_regression_target\n if tags[\"multilabel\"]:\n yield check_classifiers_multilabel_representation_invariance\n if not tags[\"no_validation\"]:\n yield check_supervised_y_no_nan\n if not tags['multioutput_only']:\n yield check_supervised_y_2d\n if tags[\"requires_fit\"]:\n yield check_estimators_unfitted\n if 'class_weight' in classifier.get_params().keys():\n yield check_class_weight_classifiers\n\n yield check_non_transformer_estimators_n_iter\n # test if predict_proba is a monotonic transformation of decision_function\n yield check_decision_proba_consistency\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_supervised_y_no_nan(name, estimator_orig):\n # Checks that the Estimator targets are not NaN.\n estimator = clone(estimator_orig)\n rng = np.random.RandomState(888)\n X = rng.randn(10, 5)\n y = np.full(10, np.inf)\n y = _enforce_estimator_tags_y(estimator, y)\n\n match = (\n \"Input contains NaN, infinity or a value too large for \"\n r\"dtype\\('float64'\\).\"\n )\n err_msg = (\n f\"Estimator {name} should have raised error on fitting \"\n \"array y with NaN value.\"\n )\n with raises(ValueError, match=match, err_msg=err_msg):\n estimator.fit(X, y)\n\n\ndef _yield_regressor_checks(regressor):\n tags = _safe_tags(regressor)\n # TODO: test with intercept\n # TODO: test with multiple responses\n # basic testing\n yield check_regressors_train\n yield partial(check_regressors_train, readonly_memmap=True)\n yield partial(check_regressors_train, readonly_memmap=True,\n X_dtype='float32')\n yield check_regressor_data_not_an_array\n yield check_estimators_partial_fit_n_features\n if tags[\"multioutput\"]:\n yield check_regressor_multioutput\n yield check_regressors_no_decision_function\n if not tags[\"no_validation\"] and not tags['multioutput_only']:\n yield check_supervised_y_2d\n yield check_supervised_y_no_nan\n name = regressor.__class__.__name__\n if name != 'CCA':\n # check that the regressor handles int input\n yield check_regressors_int\n if tags[\"requires_fit\"]:\n yield check_estimators_unfitted\n yield check_non_transformer_estimators_n_iter\n\n\ndef _yield_transformer_checks(transformer):\n tags = _safe_tags(transformer)\n # All transformers should either deal with sparse data or raise an\n # exception with type TypeError and an intelligible error message\n if not tags[\"no_validation\"]:\n yield check_transformer_data_not_an_array\n # these don't actually fit the data, so don't raise errors\n yield check_transformer_general\n if tags[\"preserves_dtype\"]:\n yield check_transformer_preserve_dtypes\n yield partial(check_transformer_general, readonly_memmap=True)\n if not _safe_tags(transformer, key=\"stateless\"):\n yield check_transformers_unfitted\n # Dependent on external solvers and hence accessing the iter\n # param is non-trivial.\n external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',\n 'RandomizedLasso', 'LogisticRegressionCV']\n\n name = transformer.__class__.__name__\n if name not in external_solver:\n yield check_transformer_n_iter\n\n\ndef _yield_clustering_checks(clusterer):\n yield check_clusterer_compute_labels_predict\n name = clusterer.__class__.__name__\n if name not in ('WardAgglomeration', \"FeatureAgglomeration\"):\n # this is clustering on the features\n # let's not test that here.\n yield check_clustering\n yield partial(check_clustering, readonly_memmap=True)\n yield check_estimators_partial_fit_n_features\n yield check_non_transformer_estimators_n_iter\n\n\ndef _yield_outliers_checks(estimator):\n\n # checks for outlier detectors that have a fit_predict method\n if hasattr(estimator, 'fit_predict'):\n yield check_outliers_fit_predict\n\n # checks for estimators that can be used on a test set\n if hasattr(estimator, 'predict'):\n yield check_outliers_train\n yield partial(check_outliers_train, readonly_memmap=True)\n # test outlier detectors can handle non-array data\n yield check_classifier_data_not_an_array\n # test if NotFittedError is raised\n if _safe_tags(estimator, key=\"requires_fit\"):\n yield check_estimators_unfitted\n\n\ndef _yield_all_checks(estimator):\n name = estimator.__class__.__name__\n tags = _safe_tags(estimator)\n if \"2darray\" not in tags[\"X_types\"]:\n warnings.warn(\"Can't test estimator {} which requires input \"\n \" of type {}\".format(name, tags[\"X_types\"]),\n SkipTestWarning)\n return\n if tags[\"_skip_test\"]:\n warnings.warn(\"Explicit SKIP via _skip_test tag for estimator \"\n \"{}.\".format(name),\n SkipTestWarning)\n return\n\n for check in _yield_checks(estimator):\n yield check\n if is_classifier(estimator):\n for check in _yield_classifier_checks(estimator):\n yield check\n if is_regressor(estimator):\n for check in _yield_regressor_checks(estimator):\n yield check\n if hasattr(estimator, 'transform'):\n for check in _yield_transformer_checks(estimator):\n yield check\n if isinstance(estimator, ClusterMixin):\n for check in _yield_clustering_checks(estimator):\n yield check\n if is_outlier_detector(estimator):\n for check in _yield_outliers_checks(estimator):\n yield check\n yield check_parameters_default_constructible\n yield check_methods_sample_order_invariance\n yield check_methods_subset_invariance\n yield check_fit2d_1sample\n yield check_fit2d_1feature\n yield check_get_params_invariance\n yield check_set_params\n yield check_dict_unchanged\n yield check_dont_overwrite_parameters\n yield check_fit_idempotent\n if not tags[\"no_validation\"]:\n yield check_n_features_in\n yield check_fit1d\n yield check_fit2d_predict1d\n if tags[\"requires_y\"]:\n yield check_requires_y_none\n if tags[\"requires_positive_X\"]:\n yield check_fit_non_negative\n\n\ndef _get_check_estimator_ids(obj):\n \"\"\"Create pytest ids for checks.\n\n When `obj` is an estimator, this returns the pprint version of the\n estimator (with `print_changed_only=True`). When `obj` is a function, the\n name of the function is returned with its keyword arguments.\n\n `_get_check_estimator_ids` is designed to be used as the `id` in\n `pytest.mark.parametrize` where `check_estimator(..., generate_only=True)`\n is yielding estimators and checks.\n\n Parameters\n ----------\n obj : estimator or function\n Items generated by `check_estimator`.\n\n Returns\n -------\n id : str or None\n\n See Also\n --------\n check_estimator\n \"\"\"\n if callable(obj):\n if not isinstance(obj, partial):\n return obj.__name__\n\n if not obj.keywords:\n return obj.func.__name__\n\n kwstring = \",\".join([\"{}={}\".format(k, v)\n for k, v in obj.keywords.items()])\n return \"{}({})\".format(obj.func.__name__, kwstring)\n if hasattr(obj, \"get_params\"):\n with config_context(print_changed_only=True):\n return re.sub(r\"\\s\", \"\", str(obj))\n\n\ndef _construct_instance(Estimator):\n \"\"\"Construct Estimator instance if possible.\"\"\"\n required_parameters = getattr(Estimator, \"_required_parameters\", [])\n if len(required_parameters):\n if required_parameters in ([\"estimator\"], [\"base_estimator\"]):\n if issubclass(Estimator, RegressorMixin):\n estimator = Estimator(Ridge())\n else:\n estimator = Estimator(LogisticRegression(C=1))\n elif required_parameters in (['estimators'],):\n # Heterogeneous ensemble classes (i.e. stacking, voting)\n if issubclass(Estimator, RegressorMixin):\n estimator = Estimator(estimators=[\n (\"est1\", Ridge(alpha=0.1)),\n (\"est2\", Ridge(alpha=1))\n ])\n else:\n estimator = Estimator(estimators=[\n (\"est1\", LogisticRegression(C=0.1)),\n (\"est2\", LogisticRegression(C=1))\n ])\n else:\n msg = (f\"Can't instantiate estimator {Estimator.__name__} \"\n f\"parameters {required_parameters}\")\n # raise additional warning to be shown by pytest\n warnings.warn(msg, SkipTestWarning)\n raise SkipTest(msg)\n else:\n estimator = Estimator()\n return estimator\n\n\ndef _maybe_mark_xfail(estimator, check, pytest):\n # Mark (estimator, check) pairs as XFAIL if needed (see conditions in\n # _should_be_skipped_or_marked())\n # This is similar to _maybe_skip(), but this one is used by\n # @parametrize_with_checks() instead of check_estimator()\n\n should_be_marked, reason = _should_be_skipped_or_marked(estimator, check)\n if not should_be_marked:\n return estimator, check\n else:\n return pytest.param(estimator, check,\n marks=pytest.mark.xfail(reason=reason))\n\n\ndef _maybe_skip(estimator, check):\n # Wrap a check so that it's skipped if needed (see conditions in\n # _should_be_skipped_or_marked())\n # This is similar to _maybe_mark_xfail(), but this one is used by\n # check_estimator() instead of @parametrize_with_checks which requires\n # pytest\n should_be_skipped, reason = _should_be_skipped_or_marked(estimator, check)\n if not should_be_skipped:\n return check\n\n check_name = (check.func.__name__ if isinstance(check, partial)\n else check.__name__)\n\n @wraps(check)\n def wrapped(*args, **kwargs):\n raise SkipTest(\n f\"Skipping {check_name} for {estimator.__class__.__name__}: \"\n f\"{reason}\"\n )\n\n return wrapped\n\n\ndef _should_be_skipped_or_marked(estimator, check):\n # Return whether a check should be skipped (when using check_estimator())\n # or marked as XFAIL (when using @parametrize_with_checks()), along with a\n # reason.\n # Currently, a check should be skipped or marked if\n # the check is in the _xfail_checks tag of the estimator\n\n check_name = (check.func.__name__ if isinstance(check, partial)\n else check.__name__)\n\n xfail_checks = _safe_tags(estimator, key='_xfail_checks') or {}\n if check_name in xfail_checks:\n return True, xfail_checks[check_name]\n\n return False, 'placeholder reason that will never be used'\n\n\ndef parametrize_with_checks(estimators):\n \"\"\"Pytest specific decorator for parametrizing estimator checks.\n\n The `id` of each check is set to be a pprint version of the estimator\n and the name of the check with its keyword arguments.\n This allows to use `pytest -k` to specify which tests to run::\n\n pytest test_check_estimators.py -k check_estimators_fit_returns_self\n\n Parameters\n ----------\n estimators : list of estimators instances\n Estimators to generated checks for.\n\n .. versionchanged:: 0.24\n Passing a class was deprecated in version 0.23, and support for\n classes was removed in 0.24. Pass an instance instead.\n\n .. versionadded:: 0.24\n\n Returns\n -------\n decorator : `pytest.mark.parametrize`\n\n Examples\n --------\n >>> from sklearn.utils.estimator_checks import parametrize_with_checks\n >>> from sklearn.linear_model import LogisticRegression\n >>> from sklearn.tree import DecisionTreeRegressor\n\n >>> @parametrize_with_checks([LogisticRegression(),\n ... DecisionTreeRegressor()])\n ... def test_sklearn_compatible_estimator(estimator, check):\n ... check(estimator)\n\n \"\"\"\n import pytest\n\n if any(isinstance(est, type) for est in estimators):\n msg = (\"Passing a class was deprecated in version 0.23 \"\n \"and isn't supported anymore from 0.24.\"\n \"Please pass an instance instead.\")\n raise TypeError(msg)\n\n def checks_generator():\n for estimator in estimators:\n name = type(estimator).__name__\n for check in _yield_all_checks(estimator):\n check = partial(check, name)\n yield _maybe_mark_xfail(estimator, check, pytest)\n\n return pytest.mark.parametrize(\"estimator, check\", checks_generator(),\n ids=_get_check_estimator_ids)\n\n\ndef check_estimator(Estimator, generate_only=False):\n \"\"\"Check if estimator adheres to scikit-learn conventions.\n\n This estimator will run an extensive test-suite for input validation,\n shapes, etc, making sure that the estimator complies with `scikit-learn`\n conventions as detailed in :ref:`rolling_your_own_estimator`.\n Additional tests for classifiers, regressors, clustering or transformers\n will be run if the Estimator class inherits from the corresponding mixin\n from sklearn.base.\n\n Setting `generate_only=True` returns a generator that yields (estimator,\n check) tuples where the check can be called independently from each\n other, i.e. `check(estimator)`. This allows all checks to be run\n independently and report the checks that are failing.\n\n scikit-learn provides a pytest specific decorator,\n :func:`~sklearn.utils.parametrize_with_checks`, making it easier to test\n multiple estimators.\n\n Parameters\n ----------\n Estimator : estimator object\n Estimator instance to check.\n\n .. versionchanged:: 0.24\n Passing a class was deprecated in version 0.23, and support for\n classes was removed in 0.24.\n\n generate_only : bool, default=False\n When `False`, checks are evaluated when `check_estimator` is called.\n When `True`, `check_estimator` returns a generator that yields\n (estimator, check) tuples. The check is run by calling\n `check(estimator)`.\n\n .. versionadded:: 0.22\n\n Returns\n -------\n checks_generator : generator\n Generator that yields (estimator, check) tuples. Returned when\n `generate_only=True`.\n \"\"\"\n if isinstance(Estimator, type):\n msg = (\"Passing a class was deprecated in version 0.23 \"\n \"and isn't supported anymore from 0.24.\"\n \"Please pass an instance instead.\")\n raise TypeError(msg)\n\n estimator = Estimator\n name = type(estimator).__name__\n\n def checks_generator():\n for check in _yield_all_checks(estimator):\n check = _maybe_skip(estimator, check)\n yield estimator, partial(check, name)\n\n if generate_only:\n return checks_generator()\n\n for estimator, check in checks_generator():\n try:\n check(estimator)\n except SkipTest as exception:\n # SkipTest is thrown when pandas can't be imported, or by checks\n # that are in the xfail_checks tag\n warnings.warn(str(exception), SkipTestWarning)\n\n\ndef _regression_dataset():\n global REGRESSION_DATASET\n if REGRESSION_DATASET is None:\n X, y = make_regression(\n n_samples=200, n_features=10, n_informative=1,\n bias=5.0, noise=20, random_state=42,\n )\n X = StandardScaler().fit_transform(X)\n REGRESSION_DATASET = X, y\n return REGRESSION_DATASET\n\n\ndef _set_checking_parameters(estimator):\n # set parameters to speed up some estimators and\n # avoid deprecated behaviour\n params = estimator.get_params()\n name = estimator.__class__.__name__\n if (\"n_iter\" in params and name != \"TSNE\"):\n estimator.set_params(n_iter=5)\n if \"max_iter\" in params:\n if estimator.max_iter is not None:\n estimator.set_params(max_iter=min(5, estimator.max_iter))\n # LinearSVR, LinearSVC\n if estimator.__class__.__name__ in ['LinearSVR', 'LinearSVC']:\n estimator.set_params(max_iter=20)\n # NMF\n if estimator.__class__.__name__ == 'NMF':\n # FIXME : init should be removed in 0.26\n estimator.set_params(max_iter=500, init='nndsvda')\n # MLP\n if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:\n estimator.set_params(max_iter=100)\n if \"n_resampling\" in params:\n # randomized lasso\n estimator.set_params(n_resampling=5)\n if \"n_estimators\" in params:\n estimator.set_params(n_estimators=min(5, estimator.n_estimators))\n if \"max_trials\" in params:\n # RANSAC\n estimator.set_params(max_trials=10)\n if \"n_init\" in params:\n # K-Means\n estimator.set_params(n_init=2)\n\n if name == 'TruncatedSVD':\n # TruncatedSVD doesn't run with n_components = n_features\n # This is ugly :-/\n estimator.n_components = 1\n\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = min(estimator.n_clusters, 2)\n\n if hasattr(estimator, \"n_best\"):\n estimator.n_best = 1\n\n if name == \"SelectFdr\":\n # be tolerant of noisy datasets (not actually speed)\n estimator.set_params(alpha=.5)\n\n if name == \"TheilSenRegressor\":\n estimator.max_subpopulation = 100\n\n if isinstance(estimator, BaseRandomProjection):\n # Due to the jl lemma and often very few samples, the number\n # of components of the random matrix projection will be probably\n # greater than the number of features.\n # So we impose a smaller number (avoid \"auto\" mode)\n estimator.set_params(n_components=2)\n\n if isinstance(estimator, SelectKBest):\n # SelectKBest has a default of k=10\n # which is more feature than we have in most case.\n estimator.set_params(k=1)\n\n if name in ('HistGradientBoostingClassifier',\n 'HistGradientBoostingRegressor'):\n # The default min_samples_leaf (20) isn't appropriate for small\n # datasets (only very shallow trees are built) that the checks use.\n estimator.set_params(min_samples_leaf=5)\n\n if name == 'DummyClassifier':\n # the default strategy prior would output constant predictions and fail\n # for check_classifiers_predictions\n estimator.set_params(strategy='stratified')\n\n # Speed-up by reducing the number of CV or splits for CV estimators\n loo_cv = ['RidgeCV']\n if name not in loo_cv and hasattr(estimator, 'cv'):\n estimator.set_params(cv=3)\n if hasattr(estimator, 'n_splits'):\n estimator.set_params(n_splits=3)\n\n if name == 'OneHotEncoder':\n estimator.set_params(handle_unknown='ignore')\n\n\nclass _NotAnArray:\n \"\"\"An object that is convertible to an array.\n\n Parameters\n ----------\n data : array-like\n The data.\n \"\"\"\n\n def __init__(self, data):\n self.data = np.asarray(data)\n\n def __array__(self, dtype=None):\n return self.data\n\n def __array_function__(self, func, types, args, kwargs):\n if func.__name__ == \"may_share_memory\":\n return True\n raise TypeError(\"Don't want to call array_function {}!\".format(\n func.__name__))\n\n\ndef _is_pairwise_metric(estimator):\n \"\"\"Returns True if estimator accepts pairwise metric.\n\n Parameters\n ----------\n estimator : object\n Estimator object to test.\n\n Returns\n -------\n out : bool\n True if _pairwise is set to True and False otherwise.\n \"\"\"\n metric = getattr(estimator, \"metric\", None)\n\n return bool(metric == 'precomputed')\n\n\ndef _pairwise_estimator_convert_X(X, estimator, kernel=linear_kernel):\n\n if _is_pairwise_metric(estimator):\n return pairwise_distances(X, metric='euclidean')\n if _is_pairwise(estimator):\n return kernel(X, X)\n\n return X\n\n\ndef _generate_sparse_matrix(X_csr):\n \"\"\"Generate sparse matrices with {32,64}bit indices of diverse format.\n\n Parameters\n ----------\n X_csr: CSR Matrix\n Input matrix in CSR format.\n\n Returns\n -------\n out: iter(Matrices)\n In format['dok', 'lil', 'dia', 'bsr', 'csr', 'csc', 'coo',\n 'coo_64', 'csc_64', 'csr_64']\n \"\"\"\n\n assert X_csr.format == 'csr'\n yield 'csr', X_csr.copy()\n for sparse_format in ['dok', 'lil', 'dia', 'bsr', 'csc', 'coo']:\n yield sparse_format, X_csr.asformat(sparse_format)\n\n # Generate large indices matrix only if its supported by scipy\n X_coo = X_csr.asformat('coo')\n X_coo.row = X_coo.row.astype('int64')\n X_coo.col = X_coo.col.astype('int64')\n yield \"coo_64\", X_coo\n\n for sparse_format in ['csc', 'csr']:\n X = X_csr.asformat(sparse_format)\n X.indices = X.indices.astype('int64')\n X.indptr = X.indptr.astype('int64')\n yield sparse_format + \"_64\", X\n\n\ndef check_estimator_sparse_data(name, estimator_orig):\n rng = np.random.RandomState(0)\n X = rng.rand(40, 10)\n X[X < .8] = 0\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n X_csr = sparse.csr_matrix(X)\n y = (4 * rng.rand(40)).astype(int)\n # catch deprecation warnings\n with ignore_warnings(category=FutureWarning):\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n tags = _safe_tags(estimator_orig)\n for matrix_format, X in _generate_sparse_matrix(X_csr):\n # catch deprecation warnings\n with ignore_warnings(category=FutureWarning):\n estimator = clone(estimator_orig)\n if name in ['Scaler', 'StandardScaler']:\n estimator.set_params(with_mean=False)\n # fit and predict\n if \"64\" in matrix_format:\n err_msg = (\n f\"Estimator {name} doesn't seem to support {matrix_format} \"\n \"matrix, and is not failing gracefully, e.g. by using \"\n \"check_array(X, accept_large_sparse=False)\"\n )\n else:\n err_msg = (\n f\"Estimator {name} doesn't seem to fail gracefully on sparse \"\n \"data: error message should state explicitly that sparse \"\n \"input is not supported if this is not the case.\"\n )\n with raises(\n (TypeError, ValueError),\n match=[\"sparse\", \"Sparse\"],\n may_pass=True,\n err_msg=err_msg,\n ):\n with ignore_warnings(category=FutureWarning):\n estimator.fit(X, y)\n if hasattr(estimator, \"predict\"):\n pred = estimator.predict(X)\n if tags['multioutput_only']:\n assert pred.shape == (X.shape[0], 1)\n else:\n assert pred.shape == (X.shape[0],)\n if hasattr(estimator, 'predict_proba'):\n probs = estimator.predict_proba(X)\n if tags['binary_only']:\n expected_probs_shape = (X.shape[0], 2)\n else:\n expected_probs_shape = (X.shape[0], 4)\n assert probs.shape == expected_probs_shape\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_sample_weights_pandas_series(name, estimator_orig):\n # check that estimators will accept a 'sample_weight' parameter of\n # type pandas.Series in the 'fit' function.\n estimator = clone(estimator_orig)\n if has_fit_parameter(estimator, \"sample_weight\"):\n try:\n import pandas as pd\n X = np.array([[1, 1], [1, 2], [1, 3], [1, 4],\n [2, 1], [2, 2], [2, 3], [2, 4],\n [3, 1], [3, 2], [3, 3], [3, 4]])\n X = pd.DataFrame(_pairwise_estimator_convert_X(X, estimator_orig))\n y = pd.Series([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])\n weights = pd.Series([1] * 12)\n if _safe_tags(estimator, key=\"multioutput_only\"):\n y = pd.DataFrame(y)\n try:\n estimator.fit(X, y, sample_weight=weights)\n except ValueError:\n raise ValueError(\"Estimator {0} raises error if \"\n \"'sample_weight' parameter is of \"\n \"type pandas.Series\".format(name))\n except ImportError:\n raise SkipTest(\"pandas is not installed: not testing for \"\n \"input of type pandas.Series to class weight.\")\n\n\n@ignore_warnings(category=(FutureWarning))\ndef check_sample_weights_not_an_array(name, estimator_orig):\n # check that estimators will accept a 'sample_weight' parameter of\n # type _NotAnArray in the 'fit' function.\n estimator = clone(estimator_orig)\n if has_fit_parameter(estimator, \"sample_weight\"):\n X = np.array([[1, 1], [1, 2], [1, 3], [1, 4],\n [2, 1], [2, 2], [2, 3], [2, 4],\n [3, 1], [3, 2], [3, 3], [3, 4]])\n X = _NotAnArray(_pairwise_estimator_convert_X(X, estimator_orig))\n y = _NotAnArray([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])\n weights = _NotAnArray([1] * 12)\n if _safe_tags(estimator, key=\"multioutput_only\"):\n y = _NotAnArray(y.data.reshape(-1, 1))\n estimator.fit(X, y, sample_weight=weights)\n\n\n@ignore_warnings(category=(FutureWarning))\ndef check_sample_weights_list(name, estimator_orig):\n # check that estimators will accept a 'sample_weight' parameter of\n # type list in the 'fit' function.\n if has_fit_parameter(estimator_orig, \"sample_weight\"):\n estimator = clone(estimator_orig)\n rnd = np.random.RandomState(0)\n n_samples = 30\n X = _pairwise_estimator_convert_X(rnd.uniform(size=(n_samples, 3)),\n estimator_orig)\n y = np.arange(n_samples) % 3\n y = _enforce_estimator_tags_y(estimator, y)\n sample_weight = [3] * n_samples\n # Test that estimators don't raise any exception\n estimator.fit(X, y, sample_weight=sample_weight)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_sample_weights_shape(name, estimator_orig):\n # check that estimators raise an error if sample_weight\n # shape mismatches the input\n if (has_fit_parameter(estimator_orig, \"sample_weight\") and\n not _is_pairwise(estimator_orig)):\n estimator = clone(estimator_orig)\n X = np.array([[1, 3], [1, 3], [1, 3], [1, 3],\n [2, 1], [2, 1], [2, 1], [2, 1],\n [3, 3], [3, 3], [3, 3], [3, 3],\n [4, 1], [4, 1], [4, 1], [4, 1]])\n y = np.array([1, 1, 1, 1, 2, 2, 2, 2,\n 1, 1, 1, 1, 2, 2, 2, 2])\n y = _enforce_estimator_tags_y(estimator, y)\n\n estimator.fit(X, y, sample_weight=np.ones(len(y)))\n\n with raises(ValueError):\n estimator.fit(X, y, sample_weight=np.ones(2 * len(y)))\n\n with raises(ValueError):\n estimator.fit(X, y, sample_weight=np.ones((len(y), 2)))\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_sample_weights_invariance(name, estimator_orig, kind=\"ones\"):\n # For kind=\"ones\" check that the estimators yield same results for\n # unit weights and no weights\n # For kind=\"zeros\" check that setting sample_weight to 0 is equivalent\n # to removing corresponding samples.\n estimator1 = clone(estimator_orig)\n estimator2 = clone(estimator_orig)\n set_random_state(estimator1, random_state=0)\n set_random_state(estimator2, random_state=0)\n\n X1 = np.array([[1, 3], [1, 3], [1, 3], [1, 3],\n [2, 1], [2, 1], [2, 1], [2, 1],\n [3, 3], [3, 3], [3, 3], [3, 3],\n [4, 1], [4, 1], [4, 1], [4, 1]], dtype=np.float64)\n y1 = np.array([1, 1, 1, 1, 2, 2, 2, 2,\n 1, 1, 1, 1, 2, 2, 2, 2], dtype=int)\n\n if kind == 'ones':\n X2 = X1\n y2 = y1\n sw2 = np.ones(shape=len(y1))\n err_msg = (f\"For {name} sample_weight=None is not equivalent to \"\n f\"sample_weight=ones\")\n elif kind == 'zeros':\n # Construct a dataset that is very different to (X, y) if weights\n # are disregarded, but identical to (X, y) given weights.\n X2 = np.vstack([X1, X1 + 1])\n y2 = np.hstack([y1, 3 - y1])\n sw2 = np.ones(shape=len(y1) * 2)\n sw2[len(y1):] = 0\n X2, y2, sw2 = shuffle(X2, y2, sw2, random_state=0)\n\n err_msg = (f\"For {name}, a zero sample_weight is not equivalent \"\n f\"to removing the sample\")\n else: # pragma: no cover\n raise ValueError\n\n y1 = _enforce_estimator_tags_y(estimator1, y1)\n y2 = _enforce_estimator_tags_y(estimator2, y2)\n\n estimator1.fit(X1, y=y1, sample_weight=None)\n estimator2.fit(X2, y=y2, sample_weight=sw2)\n\n for method in [\"predict\", \"predict_proba\",\n \"decision_function\", \"transform\"]:\n if hasattr(estimator_orig, method):\n X_pred1 = getattr(estimator1, method)(X1)\n X_pred2 = getattr(estimator2, method)(X1)\n assert_allclose_dense_sparse(X_pred1, X_pred2, err_msg=err_msg)\n\n\n@ignore_warnings(category=(FutureWarning, UserWarning))\ndef check_dtype_object(name, estimator_orig):\n # check that estimators treat dtype object as numeric if possible\n rng = np.random.RandomState(0)\n X = _pairwise_estimator_convert_X(rng.rand(40, 10), estimator_orig)\n X = X.astype(object)\n tags = _safe_tags(estimator_orig)\n y = (X[:, 0] * 4).astype(int)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n estimator.fit(X, y)\n if hasattr(estimator, \"predict\"):\n estimator.predict(X)\n\n if hasattr(estimator, \"transform\"):\n estimator.transform(X)\n\n with raises(Exception, match=\"Unknown label type\", may_pass=True):\n estimator.fit(X, y.astype(object))\n\n if 'string' not in tags['X_types']:\n X[0, 0] = {'foo': 'bar'}\n msg = \"argument must be a string.* number\"\n with raises(TypeError, match=msg):\n estimator.fit(X, y)\n else:\n # Estimators supporting string will not call np.asarray to convert the\n # data to numeric and therefore, the error will not be raised.\n # Checking for each element dtype in the input array will be costly.\n # Refer to #11401 for full discussion.\n estimator.fit(X, y)\n\n\ndef check_complex_data(name, estimator_orig):\n # check that estimators raise an exception on providing complex data\n X = np.random.sample(10) + 1j * np.random.sample(10)\n X = X.reshape(-1, 1)\n y = np.random.sample(10) + 1j * np.random.sample(10)\n estimator = clone(estimator_orig)\n with raises(ValueError, match=\"Complex data not supported\"):\n estimator.fit(X, y)\n\n\n@ignore_warnings\ndef check_dict_unchanged(name, estimator_orig):\n # this estimator raises\n # ValueError: Found array with 0 feature(s) (shape=(23, 0))\n # while a minimum of 1 is required.\n # error\n if name in ['SpectralCoclustering']:\n return\n rnd = np.random.RandomState(0)\n if name in ['RANSACRegressor']:\n X = 3 * rnd.uniform(size=(20, 3))\n else:\n X = 2 * rnd.uniform(size=(20, 3))\n\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n\n y = X[:, 0].astype(int)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 1\n\n if hasattr(estimator, \"n_best\"):\n estimator.n_best = 1\n\n set_random_state(estimator, 1)\n\n estimator.fit(X, y)\n for method in [\"predict\", \"transform\", \"decision_function\",\n \"predict_proba\"]:\n if hasattr(estimator, method):\n dict_before = estimator.__dict__.copy()\n getattr(estimator, method)(X)\n assert estimator.__dict__ == dict_before, (\n 'Estimator changes __dict__ during %s' % method)\n\n\ndef _is_public_parameter(attr):\n return not (attr.startswith('_') or attr.endswith('_'))\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_dont_overwrite_parameters(name, estimator_orig):\n # check that fit method only changes or sets private attributes\n if hasattr(estimator_orig.__init__, \"deprecated_original\"):\n # to not check deprecated classes\n return\n estimator = clone(estimator_orig)\n rnd = np.random.RandomState(0)\n X = 3 * rnd.uniform(size=(20, 3))\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = X[:, 0].astype(int)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 1\n\n set_random_state(estimator, 1)\n dict_before_fit = estimator.__dict__.copy()\n estimator.fit(X, y)\n\n dict_after_fit = estimator.__dict__\n\n public_keys_after_fit = [key for key in dict_after_fit.keys()\n if _is_public_parameter(key)]\n\n attrs_added_by_fit = [key for key in public_keys_after_fit\n if key not in dict_before_fit.keys()]\n\n # check that fit doesn't add any public attribute\n assert not attrs_added_by_fit, (\n 'Estimator adds public attribute(s) during' ' the fit method.'\n ' Estimators are only allowed to add private attributes'\n ' either started with _ or ended'\n ' with _ but %s added'\n % ', '.join(attrs_added_by_fit))\n\n # check that fit doesn't change any public attribute\n attrs_changed_by_fit = [key for key in public_keys_after_fit\n if (dict_before_fit[key]\n is not dict_after_fit[key])]\n\n assert not attrs_changed_by_fit, (\n 'Estimator changes public attribute(s) during'\n ' the fit method. Estimators are only allowed'\n ' to change attributes started'\n ' or ended with _, but'\n ' %s changed'\n % ', '.join(attrs_changed_by_fit))\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_fit2d_predict1d(name, estimator_orig):\n # check by fitting a 2d array and predicting with a 1d array\n rnd = np.random.RandomState(0)\n X = 3 * rnd.uniform(size=(20, 3))\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = X[:, 0].astype(int)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 1\n\n set_random_state(estimator, 1)\n estimator.fit(X, y)\n\n for method in [\"predict\", \"transform\", \"decision_function\",\n \"predict_proba\"]:\n if hasattr(estimator, method):\n assert_raise_message(ValueError, \"Reshape your data\",\n getattr(estimator, method), X[0])\n\n\ndef _apply_on_subsets(func, X):\n # apply function on the whole set and on mini batches\n result_full = func(X)\n n_features = X.shape[1]\n result_by_batch = [func(batch.reshape(1, n_features))\n for batch in X]\n\n # func can output tuple (e.g. score_samples)\n if type(result_full) == tuple:\n result_full = result_full[0]\n result_by_batch = list(map(lambda x: x[0], result_by_batch))\n\n if sparse.issparse(result_full):\n result_full = result_full.A\n result_by_batch = [x.A for x in result_by_batch]\n\n return np.ravel(result_full), np.ravel(result_by_batch)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_methods_subset_invariance(name, estimator_orig):\n # check that method gives invariant results if applied\n # on mini batches or the whole set\n rnd = np.random.RandomState(0)\n X = 3 * rnd.uniform(size=(20, 3))\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = X[:, 0].astype(int)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 1\n\n set_random_state(estimator, 1)\n estimator.fit(X, y)\n\n for method in [\"predict\", \"transform\", \"decision_function\",\n \"score_samples\", \"predict_proba\"]:\n\n msg = (\"{method} of {name} is not invariant when applied \"\n \"to a subset.\").format(method=method, name=name)\n\n if hasattr(estimator, method):\n result_full, result_by_batch = _apply_on_subsets(\n getattr(estimator, method), X)\n assert_allclose(result_full, result_by_batch,\n atol=1e-7, err_msg=msg)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_methods_sample_order_invariance(name, estimator_orig):\n # check that method gives invariant results if applied\n # on a subset with different sample order\n rnd = np.random.RandomState(0)\n X = 3 * rnd.uniform(size=(20, 3))\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = X[:, 0].astype(np.int64)\n if _safe_tags(estimator_orig, key='binary_only'):\n y[y == 2] = 1\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 2\n\n set_random_state(estimator, 1)\n estimator.fit(X, y)\n\n idx = np.random.permutation(X.shape[0])\n\n for method in [\"predict\", \"transform\", \"decision_function\",\n \"score_samples\", \"predict_proba\"]:\n msg = (\"{method} of {name} is not invariant when applied to a dataset\"\n \"with different sample order.\").format(method=method, name=name)\n\n if hasattr(estimator, method):\n assert_allclose_dense_sparse(getattr(estimator, method)(X)[idx],\n getattr(estimator, method)(X[idx]),\n atol=1e-9,\n err_msg=msg)\n\n\n@ignore_warnings\ndef check_fit2d_1sample(name, estimator_orig):\n # Check that fitting a 2d array with only one sample either works or\n # returns an informative message. The error message should either mention\n # the number of samples or the number of classes.\n rnd = np.random.RandomState(0)\n X = 3 * rnd.uniform(size=(1, 10))\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n\n y = X[:, 0].astype(int)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 1\n\n set_random_state(estimator, 1)\n\n # min_cluster_size cannot be less than the data size for OPTICS.\n if name == 'OPTICS':\n estimator.set_params(min_samples=1)\n\n msgs = [\"1 sample\", \"n_samples = 1\", \"n_samples=1\", \"one sample\",\n \"1 class\", \"one class\"]\n\n with raises(ValueError, match=msgs, may_pass=True):\n estimator.fit(X, y)\n\n\n@ignore_warnings\ndef check_fit2d_1feature(name, estimator_orig):\n # check fitting a 2d array with only 1 feature either works or returns\n # informative message\n rnd = np.random.RandomState(0)\n X = 3 * rnd.uniform(size=(10, 1))\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = X[:, 0].astype(int)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 1\n # ensure two labels in subsample for RandomizedLogisticRegression\n if name == 'RandomizedLogisticRegression':\n estimator.sample_fraction = 1\n # ensure non skipped trials for RANSACRegressor\n if name == 'RANSACRegressor':\n estimator.residual_threshold = 0.5\n\n y = _enforce_estimator_tags_y(estimator, y)\n set_random_state(estimator, 1)\n\n msgs = [r\"1 feature\\(s\\)\", \"n_features = 1\", \"n_features=1\"]\n\n with raises(ValueError, match=msgs, may_pass=True):\n estimator.fit(X, y)\n\n\n@ignore_warnings\ndef check_fit1d(name, estimator_orig):\n # check fitting 1d X array raises a ValueError\n rnd = np.random.RandomState(0)\n X = 3 * rnd.uniform(size=(20))\n y = X.astype(int)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if hasattr(estimator, \"n_components\"):\n estimator.n_components = 1\n if hasattr(estimator, \"n_clusters\"):\n estimator.n_clusters = 1\n\n set_random_state(estimator, 1)\n with raises(ValueError):\n estimator.fit(X, y)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_transformer_general(name, transformer, readonly_memmap=False):\n X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0, n_features=2, cluster_std=0.1)\n X = StandardScaler().fit_transform(X)\n X -= X.min()\n X = _pairwise_estimator_convert_X(X, transformer)\n\n if readonly_memmap:\n X, y = create_memmap_backed_data([X, y])\n\n _check_transformer(name, transformer, X, y)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_transformer_data_not_an_array(name, transformer):\n X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0, n_features=2, cluster_std=0.1)\n X = StandardScaler().fit_transform(X)\n # We need to make sure that we have non negative data, for things\n # like NMF\n X -= X.min() - .1\n X = _pairwise_estimator_convert_X(X, transformer)\n this_X = _NotAnArray(X)\n this_y = _NotAnArray(np.asarray(y))\n _check_transformer(name, transformer, this_X, this_y)\n # try the same with some list\n _check_transformer(name, transformer, X.tolist(), y.tolist())\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_transformers_unfitted(name, transformer):\n X, y = _regression_dataset()\n\n transformer = clone(transformer)\n with raises(\n (AttributeError, ValueError),\n err_msg=\"The unfitted \"\n f\"transformer {name} does not raise an error when \"\n \"transform is called. Perhaps use \"\n \"check_is_fitted in transform.\",\n ):\n transformer.transform(X)\n\n\ndef _check_transformer(name, transformer_orig, X, y):\n n_samples, n_features = np.asarray(X).shape\n transformer = clone(transformer_orig)\n set_random_state(transformer)\n\n # fit\n\n if name in CROSS_DECOMPOSITION:\n y_ = np.c_[np.asarray(y), np.asarray(y)]\n y_[::2, 1] *= 2\n if isinstance(X, _NotAnArray):\n y_ = _NotAnArray(y_)\n else:\n y_ = y\n\n transformer.fit(X, y_)\n # fit_transform method should work on non fitted estimator\n transformer_clone = clone(transformer)\n X_pred = transformer_clone.fit_transform(X, y=y_)\n\n if isinstance(X_pred, tuple):\n for x_pred in X_pred:\n assert x_pred.shape[0] == n_samples\n else:\n # check for consistent n_samples\n assert X_pred.shape[0] == n_samples\n\n if hasattr(transformer, 'transform'):\n if name in CROSS_DECOMPOSITION:\n X_pred2 = transformer.transform(X, y_)\n X_pred3 = transformer.fit_transform(X, y=y_)\n else:\n X_pred2 = transformer.transform(X)\n X_pred3 = transformer.fit_transform(X, y=y_)\n\n if _safe_tags(transformer_orig, key='non_deterministic'):\n msg = name + ' is non deterministic'\n raise SkipTest(msg)\n if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):\n for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):\n assert_allclose_dense_sparse(\n x_pred, x_pred2, atol=1e-2,\n err_msg=\"fit_transform and transform outcomes \"\n \"not consistent in %s\"\n % transformer)\n assert_allclose_dense_sparse(\n x_pred, x_pred3, atol=1e-2,\n err_msg=\"consecutive fit_transform outcomes \"\n \"not consistent in %s\"\n % transformer)\n else:\n assert_allclose_dense_sparse(\n X_pred, X_pred2,\n err_msg=\"fit_transform and transform outcomes \"\n \"not consistent in %s\"\n % transformer, atol=1e-2)\n assert_allclose_dense_sparse(\n X_pred, X_pred3, atol=1e-2,\n err_msg=\"consecutive fit_transform outcomes \"\n \"not consistent in %s\"\n % transformer)\n assert _num_samples(X_pred2) == n_samples\n assert _num_samples(X_pred3) == n_samples\n\n # raises error on malformed input for transform\n if hasattr(X, 'shape') and \\\n not _safe_tags(transformer, key=\"stateless\") and \\\n X.ndim == 2 and X.shape[1] > 1:\n\n # If it's not an array, it does not have a 'T' property\n with raises(\n ValueError,\n err_msg=f\"The transformer {name} does not raise an error \"\n \"when the number of features in transform is different from \"\n \"the number of features in fit.\"\n ):\n transformer.transform(X[:, :-1])\n\n\n@ignore_warnings\ndef check_pipeline_consistency(name, estimator_orig):\n if _safe_tags(estimator_orig, key='non_deterministic'):\n msg = name + ' is non deterministic'\n raise SkipTest(msg)\n\n # check that make_pipeline(est) gives same score as est\n X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0, n_features=2, cluster_std=0.1)\n X -= X.min()\n X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n set_random_state(estimator)\n pipeline = make_pipeline(estimator)\n estimator.fit(X, y)\n pipeline.fit(X, y)\n\n funcs = [\"score\", \"fit_transform\"]\n\n for func_name in funcs:\n func = getattr(estimator, func_name, None)\n if func is not None:\n func_pipeline = getattr(pipeline, func_name)\n result = func(X, y)\n result_pipe = func_pipeline(X, y)\n assert_allclose_dense_sparse(result, result_pipe)\n\n\n@ignore_warnings\ndef check_fit_score_takes_y(name, estimator_orig):\n # check that all estimators accept an optional y\n # in fit and score so they can be used in pipelines\n rnd = np.random.RandomState(0)\n n_samples = 30\n X = rnd.uniform(size=(n_samples, 3))\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = np.arange(n_samples) % 3\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n set_random_state(estimator)\n\n funcs = [\"fit\", \"score\", \"partial_fit\", \"fit_predict\", \"fit_transform\"]\n for func_name in funcs:\n func = getattr(estimator, func_name, None)\n if func is not None:\n func(X, y)\n args = [p.name for p in signature(func).parameters.values()]\n if args[0] == \"self\":\n # if_delegate_has_method makes methods into functions\n # with an explicit \"self\", so need to shift arguments\n args = args[1:]\n assert args[1] in [\"y\", \"Y\"], (\n \"Expected y or Y as second argument for method \"\n \"%s of %s. Got arguments: %r.\"\n % (func_name, type(estimator).__name__, args))\n\n\n@ignore_warnings\ndef check_estimators_dtypes(name, estimator_orig):\n rnd = np.random.RandomState(0)\n X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)\n X_train_32 = _pairwise_estimator_convert_X(X_train_32, estimator_orig)\n X_train_64 = X_train_32.astype(np.float64)\n X_train_int_64 = X_train_32.astype(np.int64)\n X_train_int_32 = X_train_32.astype(np.int32)\n y = X_train_int_64[:, 0]\n y = _enforce_estimator_tags_y(estimator_orig, y)\n\n methods = [\"predict\", \"transform\", \"decision_function\", \"predict_proba\"]\n\n for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:\n estimator = clone(estimator_orig)\n set_random_state(estimator, 1)\n estimator.fit(X_train, y)\n\n for method in methods:\n if hasattr(estimator, method):\n getattr(estimator, method)(X_train)\n\n\ndef check_transformer_preserve_dtypes(name, transformer_orig):\n # check that dtype are preserved meaning if input X is of some dtype\n # X_transformed should be from the same dtype.\n X, y = make_blobs(\n n_samples=30,\n centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0,\n cluster_std=0.1,\n )\n X = StandardScaler().fit_transform(X)\n X -= X.min()\n X = _pairwise_estimator_convert_X(X, transformer_orig)\n\n for dtype in _safe_tags(transformer_orig, key=\"preserves_dtype\"):\n X_cast = X.astype(dtype)\n transformer = clone(transformer_orig)\n set_random_state(transformer)\n X_trans = transformer.fit_transform(X_cast, y)\n\n if isinstance(X_trans, tuple):\n # cross-decompostion returns a tuple of (x_scores, y_scores)\n # when given y with fit_transform; only check the first element\n X_trans = X_trans[0]\n\n # check that the output dtype is preserved\n assert X_trans.dtype == dtype, (\n f'Estimator transform dtype: {X_trans.dtype} - '\n f'original/expected dtype: {dtype.__name__}'\n )\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_estimators_empty_data_messages(name, estimator_orig):\n e = clone(estimator_orig)\n set_random_state(e, 1)\n\n X_zero_samples = np.empty(0).reshape(0, 3)\n # The precise message can change depending on whether X or y is\n # validated first. Let us test the type of exception only:\n err_msg = (\n f\"The estimator {name} does not raise an error when an \"\n \"empty data is used to train. Perhaps use check_array in train.\"\n )\n with raises(ValueError, err_msg=err_msg):\n e.fit(X_zero_samples, [])\n\n X_zero_features = np.empty(0).reshape(12, 0)\n # the following y should be accepted by both classifiers and regressors\n # and ignored by unsupervised models\n y = _enforce_estimator_tags_y(\n e, np.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0])\n )\n msg = (\n r\"0 feature\\(s\\) \\(shape=\\(\\d*, 0\\)\\) while a minimum of \\d* \"\n \"is required.\"\n )\n with raises(ValueError, match=msg):\n e.fit(X_zero_features, y)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_estimators_nan_inf(name, estimator_orig):\n # Checks that Estimator X's do not contain NaN or inf.\n rnd = np.random.RandomState(0)\n X_train_finite = _pairwise_estimator_convert_X(rnd.uniform(size=(10, 3)),\n estimator_orig)\n X_train_nan = rnd.uniform(size=(10, 3))\n X_train_nan[0, 0] = np.nan\n X_train_inf = rnd.uniform(size=(10, 3))\n X_train_inf[0, 0] = np.inf\n y = np.ones(10)\n y[:5] = 0\n y = _enforce_estimator_tags_y(estimator_orig, y)\n error_string_fit = \"Estimator doesn't check for NaN and inf in fit.\"\n error_string_predict = (\"Estimator doesn't check for NaN and inf in\"\n \" predict.\")\n error_string_transform = (\"Estimator doesn't check for NaN and inf in\"\n \" transform.\")\n for X_train in [X_train_nan, X_train_inf]:\n # catch deprecation warnings\n with ignore_warnings(category=FutureWarning):\n estimator = clone(estimator_orig)\n set_random_state(estimator, 1)\n # try to fit\n with raises(\n ValueError, match=[\"inf\", \"NaN\"], err_msg=error_string_fit\n ):\n estimator.fit(X_train, y)\n # actually fit\n estimator.fit(X_train_finite, y)\n\n # predict\n if hasattr(estimator, \"predict\"):\n with raises(\n ValueError,\n match=[\"inf\", \"NaN\"],\n err_msg=error_string_predict,\n ):\n estimator.predict(X_train)\n\n # transform\n if hasattr(estimator, \"transform\"):\n with raises(\n ValueError,\n match=[\"inf\", \"NaN\"],\n err_msg=error_string_transform,\n ):\n estimator.transform(X_train)\n\n\n@ignore_warnings\ndef check_nonsquare_error(name, estimator_orig):\n \"\"\"Test that error is thrown when non-square data provided.\"\"\"\n\n X, y = make_blobs(n_samples=20, n_features=10)\n estimator = clone(estimator_orig)\n\n with raises(\n ValueError,\n err_msg=f\"The pairwise estimator {name} does not raise an error \"\n \"on non-square data\",\n ):\n estimator.fit(X, y)\n\n\n@ignore_warnings\ndef check_estimators_pickle(name, estimator_orig):\n \"\"\"Test that we can pickle all estimators.\"\"\"\n check_methods = [\"predict\", \"transform\", \"decision_function\",\n \"predict_proba\"]\n\n X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0, n_features=2, cluster_std=0.1)\n\n # some estimators can't do features less than 0\n X -= X.min()\n X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)\n\n tags = _safe_tags(estimator_orig)\n # include NaN values when the estimator should deal with them\n if tags['allow_nan']:\n # set randomly 10 elements to np.nan\n rng = np.random.RandomState(42)\n mask = rng.choice(X.size, 10, replace=False)\n X.reshape(-1)[mask] = np.nan\n\n estimator = clone(estimator_orig)\n\n y = _enforce_estimator_tags_y(estimator, y)\n\n set_random_state(estimator)\n estimator.fit(X, y)\n\n # pickle and unpickle!\n pickled_estimator = pickle.dumps(estimator)\n module_name = estimator.__module__\n if module_name.startswith('sklearn.') and not (\n \"test_\" in module_name or module_name.endswith(\"_testing\")\n ):\n # strict check for sklearn estimators that are not implemented in test\n # modules.\n assert b\"version\" in pickled_estimator\n unpickled_estimator = pickle.loads(pickled_estimator)\n\n result = dict()\n for method in check_methods:\n if hasattr(estimator, method):\n result[method] = getattr(estimator, method)(X)\n\n for method in result:\n unpickled_result = getattr(unpickled_estimator, method)(X)\n assert_allclose_dense_sparse(result[method], unpickled_result)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_estimators_partial_fit_n_features(name, estimator_orig):\n # check if number of features changes between calls to partial_fit.\n if not hasattr(estimator_orig, 'partial_fit'):\n return\n estimator = clone(estimator_orig)\n X, y = make_blobs(n_samples=50, random_state=1)\n X -= X.min()\n y = _enforce_estimator_tags_y(estimator_orig, y)\n\n try:\n if is_classifier(estimator):\n classes = np.unique(y)\n estimator.partial_fit(X, y, classes=classes)\n else:\n estimator.partial_fit(X, y)\n except NotImplementedError:\n return\n\n with raises(\n ValueError,\n err_msg=f\"The estimator {name} does not raise an error when the \"\n \"number of features changes between calls to partial_fit.\",\n ):\n estimator.partial_fit(X[:, :-1], y)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_classifier_multioutput(name, estimator):\n n_samples, n_labels, n_classes = 42, 5, 3\n tags = _safe_tags(estimator)\n estimator = clone(estimator)\n X, y = make_multilabel_classification(random_state=42,\n n_samples=n_samples,\n n_labels=n_labels,\n n_classes=n_classes)\n estimator.fit(X, y)\n y_pred = estimator.predict(X)\n\n assert y_pred.shape == (n_samples, n_classes), (\n \"The shape of the prediction for multioutput data is \"\n \"incorrect. Expected {}, got {}.\"\n .format((n_samples, n_labels), y_pred.shape))\n assert y_pred.dtype.kind == 'i'\n\n if hasattr(estimator, \"decision_function\"):\n decision = estimator.decision_function(X)\n assert isinstance(decision, np.ndarray)\n assert decision.shape == (n_samples, n_classes), (\n \"The shape of the decision function output for \"\n \"multioutput data is incorrect. Expected {}, got {}.\"\n .format((n_samples, n_classes), decision.shape))\n\n dec_pred = (decision > 0).astype(int)\n dec_exp = estimator.classes_[dec_pred]\n assert_array_equal(dec_exp, y_pred)\n\n if hasattr(estimator, \"predict_proba\"):\n y_prob = estimator.predict_proba(X)\n\n if isinstance(y_prob, list) and not tags['poor_score']:\n for i in range(n_classes):\n assert y_prob[i].shape == (n_samples, 2), (\n \"The shape of the probability for multioutput data is\"\n \" incorrect. Expected {}, got {}.\"\n .format((n_samples, 2), y_prob[i].shape))\n assert_array_equal(\n np.argmax(y_prob[i], axis=1).astype(int),\n y_pred[:, i]\n )\n elif not tags['poor_score']:\n assert y_prob.shape == (n_samples, n_classes), (\n \"The shape of the probability for multioutput data is\"\n \" incorrect. Expected {}, got {}.\"\n .format((n_samples, n_classes), y_prob.shape))\n assert_array_equal(y_prob.round().astype(int), y_pred)\n\n if (hasattr(estimator, \"decision_function\") and\n hasattr(estimator, \"predict_proba\")):\n for i in range(n_classes):\n y_proba = estimator.predict_proba(X)[:, i]\n y_decision = estimator.decision_function(X)\n assert_array_equal(rankdata(y_proba), rankdata(y_decision[:, i]))\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_regressor_multioutput(name, estimator):\n estimator = clone(estimator)\n n_samples = n_features = 10\n\n if not _is_pairwise_metric(estimator):\n n_samples = n_samples + 1\n\n X, y = make_regression(random_state=42, n_targets=5,\n n_samples=n_samples, n_features=n_features)\n X = _pairwise_estimator_convert_X(X, estimator)\n\n estimator.fit(X, y)\n y_pred = estimator.predict(X)\n\n assert y_pred.dtype == np.dtype('float64'), (\n \"Multioutput predictions by a regressor are expected to be\"\n \" floating-point precision. Got {} instead\".format(y_pred.dtype))\n assert y_pred.shape == y.shape, (\n \"The shape of the prediction for multioutput data is incorrect.\"\n \" Expected {}, got {}.\")\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_clustering(name, clusterer_orig, readonly_memmap=False):\n clusterer = clone(clusterer_orig)\n X, y = make_blobs(n_samples=50, random_state=1)\n X, y = shuffle(X, y, random_state=7)\n X = StandardScaler().fit_transform(X)\n rng = np.random.RandomState(7)\n X_noise = np.concatenate([X, rng.uniform(low=-3, high=3, size=(5, 2))])\n\n if readonly_memmap:\n X, y, X_noise = create_memmap_backed_data([X, y, X_noise])\n\n n_samples, n_features = X.shape\n # catch deprecation and neighbors warnings\n if hasattr(clusterer, \"n_clusters\"):\n clusterer.set_params(n_clusters=3)\n set_random_state(clusterer)\n if name == 'AffinityPropagation':\n clusterer.set_params(preference=-100)\n clusterer.set_params(max_iter=100)\n\n # fit\n clusterer.fit(X)\n # with lists\n clusterer.fit(X.tolist())\n\n pred = clusterer.labels_\n assert pred.shape == (n_samples,)\n assert adjusted_rand_score(pred, y) > 0.4\n if _safe_tags(clusterer, key='non_deterministic'):\n return\n set_random_state(clusterer)\n with warnings.catch_warnings(record=True):\n pred2 = clusterer.fit_predict(X)\n assert_array_equal(pred, pred2)\n\n # fit_predict(X) and labels_ should be of type int\n assert pred.dtype in [np.dtype('int32'), np.dtype('int64')]\n assert pred2.dtype in [np.dtype('int32'), np.dtype('int64')]\n\n # Add noise to X to test the possible values of the labels\n labels = clusterer.fit_predict(X_noise)\n\n # There should be at least one sample in every cluster. Equivalently\n # labels_ should contain all the consecutive values between its\n # min and its max.\n labels_sorted = np.unique(labels)\n assert_array_equal(labels_sorted, np.arange(labels_sorted[0],\n labels_sorted[-1] + 1))\n\n # Labels are expected to start at 0 (no noise) or -1 (if noise)\n assert labels_sorted[0] in [0, -1]\n # Labels should be less than n_clusters - 1\n if hasattr(clusterer, 'n_clusters'):\n n_clusters = getattr(clusterer, 'n_clusters')\n assert n_clusters - 1 >= labels_sorted[-1]\n # else labels should be less than max(labels_) which is necessarily true\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_clusterer_compute_labels_predict(name, clusterer_orig):\n \"\"\"Check that predict is invariant of compute_labels.\"\"\"\n X, y = make_blobs(n_samples=20, random_state=0)\n clusterer = clone(clusterer_orig)\n set_random_state(clusterer)\n\n if hasattr(clusterer, \"compute_labels\"):\n # MiniBatchKMeans\n X_pred1 = clusterer.fit(X).predict(X)\n clusterer.set_params(compute_labels=False)\n X_pred2 = clusterer.fit(X).predict(X)\n assert_array_equal(X_pred1, X_pred2)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_classifiers_one_label(name, classifier_orig):\n error_string_fit = \"Classifier can't train when only one class is present.\"\n error_string_predict = (\"Classifier can't predict when only one class is \"\n \"present.\")\n rnd = np.random.RandomState(0)\n X_train = rnd.uniform(size=(10, 3))\n X_test = rnd.uniform(size=(10, 3))\n y = np.ones(10)\n # catch deprecation warnings\n with ignore_warnings(category=FutureWarning):\n classifier = clone(classifier_orig)\n with raises(\n ValueError, match=\"class\", may_pass=True, err_msg=error_string_fit\n ) as cm:\n classifier.fit(X_train, y)\n\n if cm.raised_and_matched:\n # ValueError was raised with proper error message\n return\n\n assert_array_equal(\n classifier.predict(X_test), y, err_msg=error_string_predict\n )\n\n\n@ignore_warnings # Warnings are raised by decision function\ndef check_classifiers_train(\n name, classifier_orig, readonly_memmap=False, X_dtype=\"float64\"\n):\n X_m, y_m = make_blobs(n_samples=300, random_state=0)\n X_m = X_m.astype(X_dtype)\n X_m, y_m = shuffle(X_m, y_m, random_state=7)\n X_m = StandardScaler().fit_transform(X_m)\n # generate binary problem from multi-class one\n y_b = y_m[y_m != 2]\n X_b = X_m[y_m != 2]\n\n if name in ['BernoulliNB', 'MultinomialNB', 'ComplementNB',\n 'CategoricalNB']:\n X_m -= X_m.min()\n X_b -= X_b.min()\n\n if readonly_memmap:\n X_m, y_m, X_b, y_b = create_memmap_backed_data([X_m, y_m, X_b, y_b])\n\n problems = [(X_b, y_b)]\n tags = _safe_tags(classifier_orig)\n if not tags['binary_only']:\n problems.append((X_m, y_m))\n\n for (X, y) in problems:\n classes = np.unique(y)\n n_classes = len(classes)\n n_samples, n_features = X.shape\n classifier = clone(classifier_orig)\n X = _pairwise_estimator_convert_X(X, classifier)\n y = _enforce_estimator_tags_y(classifier, y)\n\n set_random_state(classifier)\n # raises error on malformed input for fit\n if not tags[\"no_validation\"]:\n with raises(\n ValueError,\n err_msg=f\"The classifier {name} does not raise an error when \"\n \"incorrect/malformed input data for fit is passed. The number \"\n \"of training examples is not the same as the number of \"\n \"labels. Perhaps use check_X_y in fit.\",\n ):\n classifier.fit(X, y[:-1])\n\n # fit\n classifier.fit(X, y)\n # with lists\n classifier.fit(X.tolist(), y.tolist())\n assert hasattr(classifier, \"classes_\")\n y_pred = classifier.predict(X)\n\n assert y_pred.shape == (n_samples,)\n # training set performance\n if not tags['poor_score']:\n assert accuracy_score(y, y_pred) > 0.83\n\n # raises error on malformed input for predict\n msg_pairwise = (\n \"The classifier {} does not raise an error when shape of X in \"\n \" {} is not equal to (n_test_samples, n_training_samples)\")\n msg = (\"The classifier {} does not raise an error when the number of \"\n \"features in {} is different from the number of features in \"\n \"fit.\")\n\n if not tags[\"no_validation\"]:\n if _is_pairwise(classifier):\n with raises(\n ValueError,\n err_msg=msg_pairwise.format(name, \"predict\"),\n ):\n classifier.predict(X.reshape(-1, 1))\n else:\n with raises(ValueError, err_msg=msg.format(name, \"predict\")):\n classifier.predict(X.T)\n if hasattr(classifier, \"decision_function\"):\n try:\n # decision_function agrees with predict\n decision = classifier.decision_function(X)\n if n_classes == 2:\n if not tags[\"multioutput_only\"]:\n assert decision.shape == (n_samples,)\n else:\n assert decision.shape == (n_samples, 1)\n dec_pred = (decision.ravel() > 0).astype(int)\n assert_array_equal(dec_pred, y_pred)\n else:\n assert decision.shape == (n_samples, n_classes)\n assert_array_equal(np.argmax(decision, axis=1), y_pred)\n\n # raises error on malformed input for decision_function\n if not tags[\"no_validation\"]:\n if _is_pairwise(classifier):\n with raises(\n ValueError,\n err_msg=msg_pairwise.format(\n name, \"decision_function\"\n ),\n ):\n classifier.decision_function(X.reshape(-1, 1))\n else:\n with raises(\n ValueError,\n err_msg=msg.format(name, \"decision_function\"),\n ):\n classifier.decision_function(X.T)\n except NotImplementedError:\n pass\n\n if hasattr(classifier, \"predict_proba\"):\n # predict_proba agrees with predict\n y_prob = classifier.predict_proba(X)\n assert y_prob.shape == (n_samples, n_classes)\n assert_array_equal(np.argmax(y_prob, axis=1), y_pred)\n # check that probas for all classes sum to one\n assert_array_almost_equal(np.sum(y_prob, axis=1),\n np.ones(n_samples))\n if not tags[\"no_validation\"]:\n # raises error on malformed input for predict_proba\n if _is_pairwise(classifier_orig):\n with raises(\n ValueError,\n err_msg=msg_pairwise.format(name, \"predict_proba\"),\n ):\n classifier.predict_proba(X.reshape(-1, 1))\n else:\n with raises(\n ValueError,\n err_msg=msg.format(name, \"predict_proba\"),\n ):\n classifier.predict_proba(X.T)\n if hasattr(classifier, \"predict_log_proba\"):\n # predict_log_proba is a transformation of predict_proba\n y_log_prob = classifier.predict_log_proba(X)\n assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9)\n assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))\n\n\ndef check_outlier_corruption(num_outliers, expected_outliers, decision):\n # Check for deviation from the precise given contamination level that may\n # be due to ties in the anomaly scores.\n if num_outliers < expected_outliers:\n start = num_outliers\n end = expected_outliers + 1\n else:\n start = expected_outliers\n end = num_outliers + 1\n\n # ensure that all values in the 'critical area' are tied,\n # leading to the observed discrepancy between provided\n # and actual contamination levels.\n sorted_decision = np.sort(decision)\n msg = ('The number of predicted outliers is not equal to the expected '\n 'number of outliers and this difference is not explained by the '\n 'number of ties in the decision_function values')\n assert len(np.unique(sorted_decision[start:end])) == 1, msg\n\n\ndef check_outliers_train(name, estimator_orig, readonly_memmap=True):\n n_samples = 300\n X, _ = make_blobs(n_samples=n_samples, random_state=0)\n X = shuffle(X, random_state=7)\n\n if readonly_memmap:\n X = create_memmap_backed_data(X)\n\n n_samples, n_features = X.shape\n estimator = clone(estimator_orig)\n set_random_state(estimator)\n\n # fit\n estimator.fit(X)\n # with lists\n estimator.fit(X.tolist())\n\n y_pred = estimator.predict(X)\n assert y_pred.shape == (n_samples,)\n assert y_pred.dtype.kind == 'i'\n assert_array_equal(np.unique(y_pred), np.array([-1, 1]))\n\n decision = estimator.decision_function(X)\n scores = estimator.score_samples(X)\n for output in [decision, scores]:\n assert output.dtype == np.dtype('float')\n assert output.shape == (n_samples,)\n\n # raises error on malformed input for predict\n with raises(ValueError):\n estimator.predict(X.T)\n\n # decision_function agrees with predict\n dec_pred = (decision >= 0).astype(int)\n dec_pred[dec_pred == 0] = -1\n assert_array_equal(dec_pred, y_pred)\n\n # raises error on malformed input for decision_function\n with raises(ValueError):\n estimator.decision_function(X.T)\n\n # decision_function is a translation of score_samples\n y_dec = scores - estimator.offset_\n assert_allclose(y_dec, decision)\n\n # raises error on malformed input for score_samples\n with raises(ValueError):\n estimator.score_samples(X.T)\n\n # contamination parameter (not for OneClassSVM which has the nu parameter)\n if (hasattr(estimator, 'contamination')\n and not hasattr(estimator, 'novelty')):\n # proportion of outliers equal to contamination parameter when not\n # set to 'auto'. This is true for the training set and cannot thus be\n # checked as follows for estimators with a novelty parameter such as\n # LocalOutlierFactor (tested in check_outliers_fit_predict)\n expected_outliers = 30\n contamination = expected_outliers / n_samples\n estimator.set_params(contamination=contamination)\n estimator.fit(X)\n y_pred = estimator.predict(X)\n\n num_outliers = np.sum(y_pred != 1)\n # num_outliers should be equal to expected_outliers unless\n # there are ties in the decision_function values. this can\n # only be tested for estimators with a decision_function\n # method, i.e. all estimators except LOF which is already\n # excluded from this if branch.\n if num_outliers != expected_outliers:\n decision = estimator.decision_function(X)\n check_outlier_corruption(num_outliers, expected_outliers, decision)\n\n # raises error when contamination is a scalar and not in [0,1]\n for contamination in [-0.5, 2.3]:\n estimator.set_params(contamination=contamination)\n with raises(ValueError):\n estimator.fit(X)\n\n\n@ignore_warnings(category=(FutureWarning))\ndef check_classifiers_multilabel_representation_invariance(\n name, classifier_orig\n):\n\n X, y = make_multilabel_classification(n_samples=100, n_features=20,\n n_classes=5, n_labels=3,\n length=50, allow_unlabeled=True,\n random_state=0)\n\n X_train, y_train = X[:80], y[:80]\n X_test = X[80:]\n\n y_train_list_of_lists = y_train.tolist()\n y_train_list_of_arrays = list(y_train)\n\n classifier = clone(classifier_orig)\n set_random_state(classifier)\n\n y_pred = classifier.fit(X_train, y_train).predict(X_test)\n\n y_pred_list_of_lists = classifier.fit(\n X_train, y_train_list_of_lists).predict(X_test)\n\n y_pred_list_of_arrays = classifier.fit(\n X_train, y_train_list_of_arrays).predict(X_test)\n\n assert_array_equal(y_pred, y_pred_list_of_arrays)\n assert_array_equal(y_pred, y_pred_list_of_lists)\n\n assert y_pred.dtype == y_pred_list_of_arrays.dtype\n assert y_pred.dtype == y_pred_list_of_lists.dtype\n assert type(y_pred) == type(y_pred_list_of_arrays)\n assert type(y_pred) == type(y_pred_list_of_lists)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_estimators_fit_returns_self(\n name, estimator_orig, readonly_memmap=False\n):\n \"\"\"Check if self is returned when calling fit.\"\"\"\n X, y = make_blobs(random_state=0, n_samples=21)\n # some want non-negative input\n X -= X.min()\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n if readonly_memmap:\n X, y = create_memmap_backed_data([X, y])\n\n set_random_state(estimator)\n assert estimator.fit(X, y) is estimator\n\n\n@ignore_warnings\ndef check_estimators_unfitted(name, estimator_orig):\n \"\"\"Check that predict raises an exception in an unfitted estimator.\n\n Unfitted estimators should raise a NotFittedError.\n \"\"\"\n # Common test for Regressors, Classifiers and Outlier detection estimators\n X, y = _regression_dataset()\n\n estimator = clone(estimator_orig)\n for method in ('decision_function', 'predict', 'predict_proba',\n 'predict_log_proba'):\n if hasattr(estimator, method):\n with raises(NotFittedError):\n getattr(estimator, method)(X)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_supervised_y_2d(name, estimator_orig):\n tags = _safe_tags(estimator_orig)\n rnd = np.random.RandomState(0)\n n_samples = 30\n X = _pairwise_estimator_convert_X(\n rnd.uniform(size=(n_samples, 3)), estimator_orig\n )\n y = np.arange(n_samples) % 3\n y = _enforce_estimator_tags_y(estimator_orig, y)\n estimator = clone(estimator_orig)\n set_random_state(estimator)\n # fit\n estimator.fit(X, y)\n y_pred = estimator.predict(X)\n\n set_random_state(estimator)\n # Check that when a 2D y is given, a DataConversionWarning is\n # raised\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\", DataConversionWarning)\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n estimator.fit(X, y[:, np.newaxis])\n y_pred_2d = estimator.predict(X)\n msg = \"expected 1 DataConversionWarning, got: %s\" % (\n \", \".join([str(w_x) for w_x in w]))\n if not tags['multioutput']:\n # check that we warned if we don't support multi-output\n assert len(w) > 0, msg\n assert \"DataConversionWarning('A column-vector y\" \\\n \" was passed when a 1d array was expected\" in msg\n assert_allclose(y_pred.ravel(), y_pred_2d.ravel())\n\n\n@ignore_warnings\ndef check_classifiers_predictions(X, y, name, classifier_orig):\n classes = np.unique(y)\n classifier = clone(classifier_orig)\n if name == 'BernoulliNB':\n X = X > X.mean()\n set_random_state(classifier)\n\n classifier.fit(X, y)\n y_pred = classifier.predict(X)\n\n if hasattr(classifier, \"decision_function\"):\n decision = classifier.decision_function(X)\n assert isinstance(decision, np.ndarray)\n if len(classes) == 2:\n dec_pred = (decision.ravel() > 0).astype(int)\n dec_exp = classifier.classes_[dec_pred]\n assert_array_equal(dec_exp, y_pred,\n err_msg=\"decision_function does not match \"\n \"classifier for %r: expected '%s', got '%s'\" %\n (classifier, \", \".join(map(str, dec_exp)),\n \", \".join(map(str, y_pred))))\n elif getattr(classifier, 'decision_function_shape', 'ovr') == 'ovr':\n decision_y = np.argmax(decision, axis=1).astype(int)\n y_exp = classifier.classes_[decision_y]\n assert_array_equal(y_exp, y_pred,\n err_msg=\"decision_function does not match \"\n \"classifier for %r: expected '%s', got '%s'\" %\n (classifier, \", \".join(map(str, y_exp)),\n \", \".join(map(str, y_pred))))\n\n # training set performance\n if name != \"ComplementNB\":\n # This is a pathological data set for ComplementNB.\n # For some specific cases 'ComplementNB' predicts less classes\n # than expected\n assert_array_equal(np.unique(y), np.unique(y_pred))\n assert_array_equal(classes, classifier.classes_,\n err_msg=\"Unexpected classes_ attribute for %r: \"\n \"expected '%s', got '%s'\" %\n (classifier, \", \".join(map(str, classes)),\n \", \".join(map(str, classifier.classes_))))\n\n\ndef _choose_check_classifiers_labels(name, y, y_names):\n # Semisupervised classifers use -1 as the indicator for an unlabeled\n # sample.\n return y if name in [\"LabelPropagation\",\n \"LabelSpreading\",\n \"SelfTrainingClassifier\"] else y_names\n\n\ndef check_classifiers_classes(name, classifier_orig):\n X_multiclass, y_multiclass = make_blobs(n_samples=30, random_state=0,\n cluster_std=0.1)\n X_multiclass, y_multiclass = shuffle(X_multiclass, y_multiclass,\n random_state=7)\n X_multiclass = StandardScaler().fit_transform(X_multiclass)\n # We need to make sure that we have non negative data, for things\n # like NMF\n X_multiclass -= X_multiclass.min() - .1\n\n X_binary = X_multiclass[y_multiclass != 2]\n y_binary = y_multiclass[y_multiclass != 2]\n\n X_multiclass = _pairwise_estimator_convert_X(X_multiclass, classifier_orig)\n X_binary = _pairwise_estimator_convert_X(X_binary, classifier_orig)\n\n labels_multiclass = [\"one\", \"two\", \"three\"]\n labels_binary = [\"one\", \"two\"]\n\n y_names_multiclass = np.take(labels_multiclass, y_multiclass)\n y_names_binary = np.take(labels_binary, y_binary)\n\n problems = [(X_binary, y_binary, y_names_binary)]\n if not _safe_tags(classifier_orig, key='binary_only'):\n problems.append((X_multiclass, y_multiclass, y_names_multiclass))\n\n for X, y, y_names in problems:\n for y_names_i in [y_names, y_names.astype('O')]:\n y_ = _choose_check_classifiers_labels(name, y, y_names_i)\n check_classifiers_predictions(X, y_, name, classifier_orig)\n\n labels_binary = [-1, 1]\n y_names_binary = np.take(labels_binary, y_binary)\n y_binary = _choose_check_classifiers_labels(name, y_binary, y_names_binary)\n check_classifiers_predictions(X_binary, y_binary, name, classifier_orig)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_regressors_int(name, regressor_orig):\n X, _ = _regression_dataset()\n X = _pairwise_estimator_convert_X(X[:50], regressor_orig)\n rnd = np.random.RandomState(0)\n y = rnd.randint(3, size=X.shape[0])\n y = _enforce_estimator_tags_y(regressor_orig, y)\n rnd = np.random.RandomState(0)\n # separate estimators to control random seeds\n regressor_1 = clone(regressor_orig)\n regressor_2 = clone(regressor_orig)\n set_random_state(regressor_1)\n set_random_state(regressor_2)\n\n if name in CROSS_DECOMPOSITION:\n y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])\n y_ = y_.T\n else:\n y_ = y\n\n # fit\n regressor_1.fit(X, y_)\n pred1 = regressor_1.predict(X)\n regressor_2.fit(X, y_.astype(float))\n pred2 = regressor_2.predict(X)\n assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_regressors_train(\n name, regressor_orig, readonly_memmap=False, X_dtype=np.float64\n):\n X, y = _regression_dataset()\n X = X.astype(X_dtype)\n X = _pairwise_estimator_convert_X(X, regressor_orig)\n y = scale(y) # X is already scaled\n regressor = clone(regressor_orig)\n y = _enforce_estimator_tags_y(regressor, y)\n if name in CROSS_DECOMPOSITION:\n rnd = np.random.RandomState(0)\n y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])\n y_ = y_.T\n else:\n y_ = y\n\n if readonly_memmap:\n X, y, y_ = create_memmap_backed_data([X, y, y_])\n\n if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):\n # linear regressors need to set alpha, but not generalized CV ones\n regressor.alpha = 0.01\n if name == 'PassiveAggressiveRegressor':\n regressor.C = 0.01\n\n # raises error on malformed input for fit\n with raises(\n ValueError,\n err_msg=f\"The classifier {name} does not raise an error when \"\n \"incorrect/malformed input data for fit is passed. The number of \"\n \"training examples is not the same as the number of labels. Perhaps \"\n \"use check_X_y in fit.\",\n ):\n regressor.fit(X, y[:-1])\n # fit\n set_random_state(regressor)\n regressor.fit(X, y_)\n regressor.fit(X.tolist(), y_.tolist())\n y_pred = regressor.predict(X)\n assert y_pred.shape == y_.shape\n\n # TODO: find out why PLS and CCA fail. RANSAC is random\n # and furthermore assumes the presence of outliers, hence\n # skipped\n if not _safe_tags(regressor, key=\"poor_score\"):\n assert regressor.score(X, y_) > 0.5\n\n\n@ignore_warnings\ndef check_regressors_no_decision_function(name, regressor_orig):\n # check that regressors don't have a decision_function, predict_proba, or\n # predict_log_proba method.\n rng = np.random.RandomState(0)\n regressor = clone(regressor_orig)\n\n X = rng.normal(size=(10, 4))\n X = _pairwise_estimator_convert_X(X, regressor_orig)\n y = _enforce_estimator_tags_y(regressor, X[:, 0])\n\n regressor.fit(X, y)\n funcs = [\"decision_function\", \"predict_proba\", \"predict_log_proba\"]\n for func_name in funcs:\n assert not hasattr(regressor, func_name)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_class_weight_classifiers(name, classifier_orig):\n\n if _safe_tags(classifier_orig, key='binary_only'):\n problems = [2]\n else:\n problems = [2, 3]\n\n for n_centers in problems:\n # create a very noisy dataset\n X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,\n random_state=0)\n\n # can't use gram_if_pairwise() here, setting up gram matrix manually\n if _is_pairwise(classifier_orig):\n X_test = rbf_kernel(X_test, X_train)\n X_train = rbf_kernel(X_train, X_train)\n\n n_centers = len(np.unique(y_train))\n\n if n_centers == 2:\n class_weight = {0: 1000, 1: 0.0001}\n else:\n class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}\n\n classifier = clone(classifier_orig).set_params(\n class_weight=class_weight)\n if hasattr(classifier, \"n_iter\"):\n classifier.set_params(n_iter=100)\n if hasattr(classifier, \"max_iter\"):\n classifier.set_params(max_iter=1000)\n if hasattr(classifier, \"min_weight_fraction_leaf\"):\n classifier.set_params(min_weight_fraction_leaf=0.01)\n if hasattr(classifier, \"n_iter_no_change\"):\n classifier.set_params(n_iter_no_change=20)\n\n set_random_state(classifier)\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n # XXX: Generally can use 0.89 here. On Windows, LinearSVC gets\n # 0.88 (Issue #9111)\n if not _safe_tags(classifier_orig, key='poor_score'):\n assert np.mean(y_pred == 0) > 0.87\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_class_weight_balanced_classifiers(\n name, classifier_orig, X_train, y_train, X_test, y_test, weights\n):\n classifier = clone(classifier_orig)\n if hasattr(classifier, \"n_iter\"):\n classifier.set_params(n_iter=100)\n if hasattr(classifier, \"max_iter\"):\n classifier.set_params(max_iter=1000)\n\n set_random_state(classifier)\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n\n classifier.set_params(class_weight='balanced')\n classifier.fit(X_train, y_train)\n y_pred_balanced = classifier.predict(X_test)\n assert (f1_score(y_test, y_pred_balanced, average='weighted') >\n f1_score(y_test, y_pred, average='weighted'))\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_class_weight_balanced_linear_classifier(name, Classifier):\n \"\"\"Test class weights with non-contiguous class labels.\"\"\"\n # this is run on classes, not instances, though this should be changed\n X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],\n [1.0, 1.0], [1.0, 0.0]])\n y = np.array([1, 1, 1, -1, -1])\n\n classifier = Classifier()\n\n if hasattr(classifier, \"n_iter\"):\n # This is a very small dataset, default n_iter are likely to prevent\n # convergence\n classifier.set_params(n_iter=1000)\n if hasattr(classifier, \"max_iter\"):\n classifier.set_params(max_iter=1000)\n if hasattr(classifier, 'cv'):\n classifier.set_params(cv=3)\n set_random_state(classifier)\n\n # Let the model compute the class frequencies\n classifier.set_params(class_weight='balanced')\n coef_balanced = classifier.fit(X, y).coef_.copy()\n\n # Count each label occurrence to reweight manually\n n_samples = len(y)\n n_classes = float(len(np.unique(y)))\n\n class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),\n -1: n_samples / (np.sum(y == -1) * n_classes)}\n classifier.set_params(class_weight=class_weight)\n coef_manual = classifier.fit(X, y).coef_.copy()\n\n assert_allclose(coef_balanced, coef_manual,\n err_msg=\"Classifier %s is not computing\"\n \" class_weight=balanced properly.\"\n % name)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_estimators_overwrite_params(name, estimator_orig):\n X, y = make_blobs(random_state=0, n_samples=21)\n # some want non-negative input\n X -= X.min()\n X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)\n estimator = clone(estimator_orig)\n y = _enforce_estimator_tags_y(estimator, y)\n\n set_random_state(estimator)\n\n # Make a physical copy of the original estimator parameters before fitting.\n params = estimator.get_params()\n original_params = deepcopy(params)\n\n # Fit the model\n estimator.fit(X, y)\n\n # Compare the state of the model parameters with the original parameters\n new_params = estimator.get_params()\n for param_name, original_value in original_params.items():\n new_value = new_params[param_name]\n\n # We should never change or mutate the internal state of input\n # parameters by default. To check this we use the joblib.hash function\n # that introspects recursively any subobjects to compute a checksum.\n # The only exception to this rule of immutable constructor parameters\n # is possible RandomState instance but in this check we explicitly\n # fixed the random_state params recursively to be integer seeds.\n assert joblib.hash(new_value) == joblib.hash(original_value), (\n \"Estimator %s should not change or mutate \"\n \" the parameter %s from %s to %s during fit.\"\n % (name, param_name, original_value, new_value))\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_no_attributes_set_in_init(name, estimator_orig):\n \"\"\"Check setting during init.\"\"\"\n try:\n # Clone fails if the estimator does not store\n # all parameters as an attribute during init\n estimator = clone(estimator_orig)\n except AttributeError:\n raise AttributeError(f\"Estimator {name} should store all \"\n \"parameters as an attribute during init.\")\n\n if hasattr(type(estimator).__init__, \"deprecated_original\"):\n return\n\n init_params = _get_args(type(estimator).__init__)\n if IS_PYPY:\n # __init__ signature has additional objects in PyPy\n for key in ['obj']:\n if key in init_params:\n init_params.remove(key)\n parents_init_params = [param for params_parent in\n (_get_args(parent) for parent in\n type(estimator).__mro__)\n for param in params_parent]\n\n # Test for no setting apart from parameters during init\n invalid_attr = (set(vars(estimator)) - set(init_params)\n - set(parents_init_params))\n assert not invalid_attr, (\n \"Estimator %s should not set any attribute apart\"\n \" from parameters during init. Found attributes %s.\"\n % (name, sorted(invalid_attr)))\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_sparsify_coefficients(name, estimator_orig):\n X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],\n [-1, -2], [2, 2], [-2, -2]])\n y = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])\n y = _enforce_estimator_tags_y(estimator_orig, y)\n est = clone(estimator_orig)\n\n est.fit(X, y)\n pred_orig = est.predict(X)\n\n # test sparsify with dense inputs\n est.sparsify()\n assert sparse.issparse(est.coef_)\n pred = est.predict(X)\n assert_array_equal(pred, pred_orig)\n\n # pickle and unpickle with sparse coef_\n est = pickle.loads(pickle.dumps(est))\n assert sparse.issparse(est.coef_)\n pred = est.predict(X)\n assert_array_equal(pred, pred_orig)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_classifier_data_not_an_array(name, estimator_orig):\n X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1],\n [0, 3], [1, 0], [2, 0], [4, 4], [2, 3], [3, 2]])\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = np.array([1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2])\n y = _enforce_estimator_tags_y(estimator_orig, y)\n for obj_type in [\"NotAnArray\", \"PandasDataframe\"]:\n check_estimators_data_not_an_array(name, estimator_orig, X, y,\n obj_type)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_regressor_data_not_an_array(name, estimator_orig):\n X, y = _regression_dataset()\n X = _pairwise_estimator_convert_X(X, estimator_orig)\n y = _enforce_estimator_tags_y(estimator_orig, y)\n for obj_type in [\"NotAnArray\", \"PandasDataframe\"]:\n check_estimators_data_not_an_array(name, estimator_orig, X, y,\n obj_type)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_estimators_data_not_an_array(name, estimator_orig, X, y, obj_type):\n if name in CROSS_DECOMPOSITION:\n raise SkipTest(\"Skipping check_estimators_data_not_an_array \"\n \"for cross decomposition module as estimators \"\n \"are not deterministic.\")\n # separate estimators to control random seeds\n estimator_1 = clone(estimator_orig)\n estimator_2 = clone(estimator_orig)\n set_random_state(estimator_1)\n set_random_state(estimator_2)\n\n if obj_type not in [\"NotAnArray\", 'PandasDataframe']:\n raise ValueError(\"Data type {0} not supported\".format(obj_type))\n\n if obj_type == \"NotAnArray\":\n y_ = _NotAnArray(np.asarray(y))\n X_ = _NotAnArray(np.asarray(X))\n else:\n # Here pandas objects (Series and DataFrame) are tested explicitly\n # because some estimators may handle them (especially their indexing)\n # specially.\n try:\n import pandas as pd\n y_ = np.asarray(y)\n if y_.ndim == 1:\n y_ = pd.Series(y_)\n else:\n y_ = pd.DataFrame(y_)\n X_ = pd.DataFrame(np.asarray(X))\n\n except ImportError:\n raise SkipTest(\"pandas is not installed: not checking estimators \"\n \"for pandas objects.\")\n\n # fit\n estimator_1.fit(X_, y_)\n pred1 = estimator_1.predict(X_)\n estimator_2.fit(X, y)\n pred2 = estimator_2.predict(X)\n assert_allclose(pred1, pred2, atol=1e-2, err_msg=name)\n\n\ndef check_parameters_default_constructible(name, Estimator):\n # test default-constructibility\n # get rid of deprecation warnings\n\n Estimator = Estimator.__class__\n\n with ignore_warnings(category=FutureWarning):\n estimator = _construct_instance(Estimator)\n # test cloning\n clone(estimator)\n # test __repr__\n repr(estimator)\n # test that set_params returns self\n assert estimator.set_params() is estimator\n\n # test if init does nothing but set parameters\n # this is important for grid_search etc.\n # We get the default parameters from init and then\n # compare these against the actual values of the attributes.\n\n # this comes from getattr. Gets rid of deprecation decorator.\n init = getattr(estimator.__init__, 'deprecated_original',\n estimator.__init__)\n\n try:\n def param_filter(p):\n \"\"\"Identify hyper parameters of an estimator.\"\"\"\n return (p.name != 'self' and\n p.kind != p.VAR_KEYWORD and\n p.kind != p.VAR_POSITIONAL)\n\n init_params = [p for p in signature(init).parameters.values()\n if param_filter(p)]\n\n except (TypeError, ValueError):\n # init is not a python function.\n # true for mixins\n return\n params = estimator.get_params()\n # they can need a non-default argument\n init_params = init_params[len(getattr(\n estimator, '_required_parameters', [])):]\n\n for init_param in init_params:\n assert init_param.default != init_param.empty, (\n \"parameter %s for %s has no default value\"\n % (init_param.name, type(estimator).__name__))\n allowed_types = {\n str,\n int,\n float,\n bool,\n tuple,\n type(None),\n type,\n types.FunctionType,\n joblib.Memory,\n }\n # Any numpy numeric such as np.int32.\n allowed_types.update(np.core.numerictypes.allTypes.values())\n assert type(init_param.default) in allowed_types, (\n f\"Parameter '{init_param.name}' of estimator \"\n f\"'{Estimator.__name__}' is of type \"\n f\"{type(init_param.default).__name__} which is not \"\n f\"allowed. All init parameters have to be immutable to \"\n f\"make cloning possible. Therefore we restrict the set of \"\n f\"legal types to \"\n f\"{set(type.__name__ for type in allowed_types)}.\"\n )\n if init_param.name not in params.keys():\n # deprecated parameter, not in get_params\n assert init_param.default is None, (\n f\"Estimator parameter '{init_param.name}' of estimator \"\n f\"'{Estimator.__name__}' is not returned by get_params. \"\n f\"If it is deprecated, set its default value to None.\"\n )\n continue\n\n param_value = params[init_param.name]\n if isinstance(param_value, np.ndarray):\n assert_array_equal(param_value, init_param.default)\n else:\n failure_text = (\n f\"Parameter {init_param.name} was mutated on init. All \"\n f\"parameters must be stored unchanged.\"\n )\n if is_scalar_nan(param_value):\n # Allows to set default parameters to np.nan\n assert param_value is init_param.default, failure_text\n else:\n assert param_value == init_param.default, failure_text\n\n\ndef _enforce_estimator_tags_y(estimator, y):\n # Estimators with a `requires_positive_y` tag only accept strictly positive\n # data\n if _safe_tags(estimator, key=\"requires_positive_y\"):\n # Create strictly positive y. The minimal increment above 0 is 1, as\n # y could be of integer dtype.\n y += 1 + abs(y.min())\n # Estimators with a `binary_only` tag only accept up to two unique y values\n if _safe_tags(estimator, key=\"binary_only\") and y.size > 0:\n y = np.where(y == y.flat[0], y, y.flat[0] + 1)\n # Estimators in mono_output_task_error raise ValueError if y is of 1-D\n # Convert into a 2-D y for those estimators.\n if _safe_tags(estimator, key=\"multioutput_only\"):\n return np.reshape(y, (-1, 1))\n return y\n\n\ndef _enforce_estimator_tags_x(estimator, X):\n # Pairwise estimators only accept\n # X of shape (`n_samples`, `n_samples`)\n if _is_pairwise(estimator):\n X = X.dot(X.T)\n # Estimators with `1darray` in `X_types` tag only accept\n # X of shape (`n_samples`,)\n if '1darray' in _safe_tags(estimator, key='X_types'):\n X = X[:, 0]\n # Estimators with a `requires_positive_X` tag only accept\n # strictly positive data\n if _safe_tags(estimator, key='requires_positive_X'):\n X -= X.min()\n return X\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_non_transformer_estimators_n_iter(name, estimator_orig):\n # Test that estimators that are not transformers with a parameter\n # max_iter, return the attribute of n_iter_ at least 1.\n\n # These models are dependent on external solvers like\n # libsvm and accessing the iter parameter is non-trivial.\n # SelfTrainingClassifier does not perform an iteration if all samples are\n # labeled, hence n_iter_ = 0 is valid.\n not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',\n 'RidgeClassifier', 'SVC', 'RandomizedLasso',\n 'LogisticRegressionCV', 'LinearSVC',\n 'LogisticRegression', 'SelfTrainingClassifier']\n\n # Tested in test_transformer_n_iter\n not_run_check_n_iter += CROSS_DECOMPOSITION\n if name in not_run_check_n_iter:\n return\n\n # LassoLars stops early for the default alpha=1.0 the iris dataset.\n if name == 'LassoLars':\n estimator = clone(estimator_orig).set_params(alpha=0.)\n else:\n estimator = clone(estimator_orig)\n if hasattr(estimator, 'max_iter'):\n iris = load_iris()\n X, y_ = iris.data, iris.target\n y_ = _enforce_estimator_tags_y(estimator, y_)\n\n set_random_state(estimator, 0)\n\n estimator.fit(X, y_)\n\n assert estimator.n_iter_ >= 1\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_transformer_n_iter(name, estimator_orig):\n # Test that transformers with a parameter max_iter, return the\n # attribute of n_iter_ at least 1.\n estimator = clone(estimator_orig)\n if hasattr(estimator, \"max_iter\"):\n if name in CROSS_DECOMPOSITION:\n # Check using default data\n X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]\n y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]\n\n else:\n X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],\n random_state=0, n_features=2, cluster_std=0.1)\n X -= X.min() - 0.1\n set_random_state(estimator, 0)\n estimator.fit(X, y_)\n\n # These return a n_iter per component.\n if name in CROSS_DECOMPOSITION:\n for iter_ in estimator.n_iter_:\n assert iter_ >= 1\n else:\n assert estimator.n_iter_ >= 1\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_get_params_invariance(name, estimator_orig):\n # Checks if get_params(deep=False) is a subset of get_params(deep=True)\n e = clone(estimator_orig)\n\n shallow_params = e.get_params(deep=False)\n deep_params = e.get_params(deep=True)\n\n assert all(item in deep_params.items() for item in\n shallow_params.items())\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_set_params(name, estimator_orig):\n # Check that get_params() returns the same thing\n # before and after set_params() with some fuzz\n estimator = clone(estimator_orig)\n\n orig_params = estimator.get_params(deep=False)\n msg = \"get_params result does not match what was passed to set_params\"\n\n estimator.set_params(**orig_params)\n curr_params = estimator.get_params(deep=False)\n assert set(orig_params.keys()) == set(curr_params.keys()), msg\n for k, v in curr_params.items():\n assert orig_params[k] is v, msg\n\n # some fuzz values\n test_values = [-np.inf, np.inf, None]\n\n test_params = deepcopy(orig_params)\n for param_name in orig_params.keys():\n default_value = orig_params[param_name]\n for value in test_values:\n test_params[param_name] = value\n try:\n estimator.set_params(**test_params)\n except (TypeError, ValueError) as e:\n e_type = e.__class__.__name__\n # Exception occurred, possibly parameter validation\n warnings.warn(\"{0} occurred during set_params of param {1} on \"\n \"{2}. It is recommended to delay parameter \"\n \"validation until fit.\".format(e_type,\n param_name,\n name))\n\n change_warning_msg = \"Estimator's parameters changed after \" \\\n \"set_params raised {}\".format(e_type)\n params_before_exception = curr_params\n curr_params = estimator.get_params(deep=False)\n try:\n assert (set(params_before_exception.keys()) ==\n set(curr_params.keys()))\n for k, v in curr_params.items():\n assert params_before_exception[k] is v\n except AssertionError:\n warnings.warn(change_warning_msg)\n else:\n curr_params = estimator.get_params(deep=False)\n assert (set(test_params.keys()) ==\n set(curr_params.keys())), msg\n for k, v in curr_params.items():\n assert test_params[k] is v, msg\n test_params[param_name] = default_value\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_classifiers_regression_target(name, estimator_orig):\n # Check if classifier throws an exception when fed regression targets\n\n X, y = _regression_dataset()\n\n X = X + 1 + abs(X.min(axis=0)) # be sure that X is non-negative\n e = clone(estimator_orig)\n msg = \"Unknown label type: \"\n if not _safe_tags(e, key=\"no_validation\"):\n with raises(ValueError, match=msg):\n e.fit(X, y)\n\n\n@ignore_warnings(category=FutureWarning)\ndef check_decision_proba_consistency(name, estimator_orig):\n # Check whether an estimator having both decision_function and\n # predict_proba methods has outputs with perfect rank correlation.\n\n centers = [(2, 2), (4, 4)]\n X, y = make_blobs(n_samples=100, random_state=0, n_features=4,\n centers=centers, cluster_std=1.0, shuffle=True)\n X_test = np.random.randn(20, 2) + 4\n estimator = clone(estimator_orig)\n\n if (hasattr(estimator, \"decision_function\") and\n hasattr(estimator, \"predict_proba\")):\n\n estimator.fit(X, y)\n # Since the link function from decision_function() to predict_proba()\n # is sometimes not precise enough (typically expit), we round to the\n # 10th decimal to avoid numerical issues.\n a = estimator.predict_proba(X_test)[:, 1].round(decimals=10)\n b = estimator.decision_function(X_test).round(decimals=10)\n assert_array_equal(rankdata(a), rankdata(b))\n\n\ndef check_outliers_fit_predict(name, estimator_orig):\n # Check fit_predict for outlier detectors.\n\n n_samples = 300\n X, _ = make_blobs(n_samples=n_samples, random_state=0)\n X = shuffle(X, random_state=7)\n n_samples, n_features = X.shape\n estimator = clone(estimator_orig)\n\n set_random_state(estimator)\n\n y_pred = estimator.fit_predict(X)\n assert y_pred.shape == (n_samples,)\n assert y_pred.dtype.kind == 'i'\n assert_array_equal(np.unique(y_pred), np.array([-1, 1]))\n\n # check fit_predict = fit.predict when the estimator has both a predict and\n # a fit_predict method. recall that it is already assumed here that the\n # estimator has a fit_predict method\n if hasattr(estimator, 'predict'):\n y_pred_2 = estimator.fit(X).predict(X)\n assert_array_equal(y_pred, y_pred_2)\n\n if hasattr(estimator, \"contamination\"):\n # proportion of outliers equal to contamination parameter when not\n # set to 'auto'\n expected_outliers = 30\n contamination = float(expected_outliers)/n_samples\n estimator.set_params(contamination=contamination)\n y_pred = estimator.fit_predict(X)\n\n num_outliers = np.sum(y_pred != 1)\n # num_outliers should be equal to expected_outliers unless\n # there are ties in the decision_function values. this can\n # only be tested for estimators with a decision_function\n # method\n if (num_outliers != expected_outliers and\n hasattr(estimator, 'decision_function')):\n decision = estimator.decision_function(X)\n check_outlier_corruption(num_outliers, expected_outliers, decision)\n\n # raises error when contamination is a scalar and not in [0,1]\n for contamination in [-0.5, 2.3]:\n estimator.set_params(contamination=contamination)\n with raises(ValueError):\n estimator.fit_predict(X)\n\n\ndef check_fit_non_negative(name, estimator_orig):\n # Check that proper warning is raised for non-negative X\n # when tag requires_positive_X is present\n X = np.array([[-1., 1], [-1., 1]])\n y = np.array([1, 2])\n estimator = clone(estimator_orig)\n with raises(ValueError):\n estimator.fit(X, y)\n\n\ndef check_fit_idempotent(name, estimator_orig):\n # Check that est.fit(X) is the same as est.fit(X).fit(X). Ideally we would\n # check that the estimated parameters during training (e.g. coefs_) are\n # the same, but having a universal comparison function for those\n # attributes is difficult and full of edge cases. So instead we check that\n # predict(), predict_proba(), decision_function() and transform() return\n # the same results.\n\n check_methods = [\"predict\", \"transform\", \"decision_function\",\n \"predict_proba\"]\n rng = np.random.RandomState(0)\n\n estimator = clone(estimator_orig)\n set_random_state(estimator)\n if 'warm_start' in estimator.get_params().keys():\n estimator.set_params(warm_start=False)\n\n n_samples = 100\n X = rng.normal(loc=100, size=(n_samples, 2))\n X = _pairwise_estimator_convert_X(X, estimator)\n if is_regressor(estimator_orig):\n y = rng.normal(size=n_samples)\n else:\n y = rng.randint(low=0, high=2, size=n_samples)\n y = _enforce_estimator_tags_y(estimator, y)\n\n train, test = next(ShuffleSplit(test_size=.2, random_state=rng).split(X))\n X_train, y_train = _safe_split(estimator, X, y, train)\n X_test, y_test = _safe_split(estimator, X, y, test, train)\n\n # Fit for the first time\n estimator.fit(X_train, y_train)\n\n result = {method: getattr(estimator, method)(X_test)\n for method in check_methods\n if hasattr(estimator, method)}\n\n # Fit again\n set_random_state(estimator)\n estimator.fit(X_train, y_train)\n\n for method in check_methods:\n if hasattr(estimator, method):\n new_result = getattr(estimator, method)(X_test)\n if np.issubdtype(new_result.dtype, np.floating):\n tol = 2*np.finfo(new_result.dtype).eps\n else:\n tol = 2*np.finfo(np.float64).eps\n assert_allclose_dense_sparse(\n result[method], new_result,\n atol=max(tol, 1e-9), rtol=max(tol, 1e-7),\n err_msg=\"Idempotency check failed for method {}\".format(method)\n )\n\n\ndef check_n_features_in(name, estimator_orig):\n # Make sure that n_features_in_ attribute doesn't exist until fit is\n # called, and that its value is correct.\n\n rng = np.random.RandomState(0)\n\n estimator = clone(estimator_orig)\n set_random_state(estimator)\n if 'warm_start' in estimator.get_params():\n estimator.set_params(warm_start=False)\n\n n_samples = 100\n X = rng.normal(loc=100, size=(n_samples, 2))\n X = _pairwise_estimator_convert_X(X, estimator)\n if is_regressor(estimator_orig):\n y = rng.normal(size=n_samples)\n else:\n y = rng.randint(low=0, high=2, size=n_samples)\n y = _enforce_estimator_tags_y(estimator, y)\n\n assert not hasattr(estimator, 'n_features_in_')\n estimator.fit(X, y)\n if hasattr(estimator, 'n_features_in_'):\n assert estimator.n_features_in_ == X.shape[1]\n else:\n warnings.warn(\n \"As of scikit-learn 0.23, estimators should expose a \"\n \"n_features_in_ attribute, unless the 'no_validation' tag is \"\n \"True. This attribute should be equal to the number of features \"\n \"passed to the fit method. \"\n \"An error will be raised from version 0.25 when calling \"\n \"check_estimator(). \"\n \"See SLEP010: \"\n \"https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep010/proposal.html\", # noqa\n FutureWarning\n )\n\n\ndef check_requires_y_none(name, estimator_orig):\n # Make sure that an estimator with requires_y=True fails gracefully when\n # given y=None\n\n rng = np.random.RandomState(0)\n\n estimator = clone(estimator_orig)\n set_random_state(estimator)\n\n n_samples = 100\n X = rng.normal(loc=100, size=(n_samples, 2))\n X = _pairwise_estimator_convert_X(X, estimator)\n\n warning_msg = (\"As of scikit-learn 0.23, estimators should have a \"\n \"'requires_y' tag set to the appropriate value. \"\n \"The default value of the tag is False. \"\n \"An error will be raised from version 0.25 when calling \"\n \"check_estimator() if the tag isn't properly set.\")\n\n expected_err_msgs = (\n \"requires y to be passed, but the target y is None\",\n \"Expected array-like (array or non-string sequence), got None\",\n \"y should be a 1d array\"\n )\n\n try:\n estimator.fit(X, None)\n except ValueError as ve:\n if not any(msg in str(ve) for msg in expected_err_msgs):\n warnings.warn(warning_msg, FutureWarning)\n\n\ndef check_n_features_in_after_fitting(name, estimator_orig):\n # Make sure that n_features_in are checked after fitting\n tags = _safe_tags(estimator_orig)\n\n if \"2darray\" not in tags[\"X_types\"] or tags[\"no_validation\"]:\n return\n\n rng = np.random.RandomState(0)\n\n estimator = clone(estimator_orig)\n set_random_state(estimator)\n if 'warm_start' in estimator.get_params():\n estimator.set_params(warm_start=False)\n\n n_samples = 100\n X = rng.normal(loc=100, size=(n_samples, 2))\n X = _pairwise_estimator_convert_X(X, estimator)\n if is_regressor(estimator):\n y = rng.normal(size=n_samples)\n else:\n y = rng.randint(low=0, high=2, size=n_samples)\n y = _enforce_estimator_tags_y(estimator, y)\n\n estimator.fit(X, y)\n assert estimator.n_features_in_ == X.shape[1]\n\n # check methods will check n_features_in_\n check_methods = [\"predict\", \"transform\", \"decision_function\",\n \"predict_proba\"]\n X_bad = X[:, [1]]\n\n msg = (f\"X has 1 features, but \\\\w+ is expecting {X.shape[1]} \"\n \"features as input\")\n for method in check_methods:\n if not hasattr(estimator, method):\n continue\n with raises(ValueError, match=msg):\n getattr(estimator, method)(X_bad)\n\n # partial_fit will check in the second call\n if not hasattr(estimator, \"partial_fit\"):\n return\n\n estimator = clone(estimator_orig)\n if is_classifier(estimator):\n estimator.partial_fit(X, y, classes=np.unique(y))\n else:\n estimator.partial_fit(X, y)\n assert estimator.n_features_in_ == X.shape[1]\n\n with raises(ValueError, match=msg):\n estimator.partial_fit(X_bad, y)\n\n\ndef check_estimator_get_tags_default_keys(name, estimator_orig):\n # check that if _get_tags is implemented, it contains all keys from\n # _DEFAULT_KEYS\n estimator = clone(estimator_orig)\n if not hasattr(estimator, \"_get_tags\"):\n return\n\n tags_keys = set(estimator._get_tags().keys())\n default_tags_keys = set(_DEFAULT_TAGS.keys())\n assert tags_keys.intersection(default_tags_keys) == default_tags_keys, (\n f\"{name}._get_tags() is missing entries for the following default tags\"\n f\": {default_tags_keys - tags_keys.intersection(default_tags_keys)}\"\n )\n"
] |
[
[
"numpy.mean",
"numpy.where",
"numpy.finfo",
"numpy.sort",
"scipy.stats.rankdata",
"numpy.issubdtype",
"numpy.dtype",
"numpy.full",
"numpy.empty",
"numpy.log",
"pandas.DataFrame",
"numpy.core.numerictypes.allTypes.values",
"numpy.take",
"numpy.arange",
"numpy.argmax",
"numpy.random.sample",
"scipy.sparse.csr_matrix",
"numpy.vstack",
"scipy.sparse.issparse",
"numpy.array",
"numpy.reshape",
"numpy.random.randn",
"numpy.argsort",
"numpy.hstack",
"numpy.asarray",
"numpy.random.RandomState",
"numpy.sum",
"numpy.random.permutation",
"numpy.ones",
"numpy.ravel",
"pandas.Series",
"numpy.unique"
]
] |
Sirius207/Reinforcement-Learning-Algorithms
|
[
"85a1297c6a03fa8316f53ccee4ad477684f00ef4"
] |
[
"dqnSample.py"
] |
[
"# From https://towardsdatascience.com/reinforcement-learning-w-keras-openai-dqns-1eed3a5338c\n\nimport gym\nimport numpy as np\nimport random\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import Adam\n\nfrom collections import deque\n\nclass DQN:\n def __init__(self, env):\n self.env = env\n self.memory = deque(maxlen=2000)\n \n self.gamma = 0.85\n self.epsilon = 1.0\n self.epsilon_min = 0.01\n self.epsilon_decay = 0.995\n self.learning_rate = 0.005\n self.tau = .125\n\n self.model = self.create_model()\n self.target_model = self.create_model()\n\n def create_model(self):\n model = Sequential()\n state_shape = self.env.observation_space.shape\n model.add(Dense(24, input_dim=state_shape[0], activation=\"relu\"))\n model.add(Dense(48, activation=\"relu\"))\n model.add(Dense(24, activation=\"relu\"))\n model.add(Dense(self.env.action_space.n))\n model.compile(loss=\"mean_squared_error\",\n optimizer=Adam(lr=self.learning_rate))\n return model\n\n def act(self, state):\n self.epsilon *= self.epsilon_decay\n self.epsilon = max(self.epsilon_min, self.epsilon)\n if np.random.random() < self.epsilon:\n return self.env.action_space.sample()\n return np.argmax(self.model.predict(state)[0])\n\n def remember(self, state, action, reward, new_state, done):\n self.memory.append([state, action, reward, new_state, done])\n\n def replay(self):\n batch_size = 32\n if len(self.memory) < batch_size: \n return\n\n samples = random.sample(self.memory, batch_size)\n for sample in samples:\n state, action, reward, new_state, done = sample\n target = self.target_model.predict(state)\n if done:\n target[0][action] = reward\n else:\n Q_future = max(self.target_model.predict(new_state)[0])\n target[0][action] = reward + Q_future * self.gamma\n self.model.fit(state, target, epochs=1, verbose=0)\n\n def target_train(self):\n weights = self.model.get_weights()\n target_weights = self.target_model.get_weights()\n for i in range(len(target_weights)):\n target_weights[i] = weights[i] * self.tau + target_weights[i] * (1 - self.tau)\n self.target_model.set_weights(target_weights)\n\n def save_model(self, fn):\n self.model.save(fn)\n\ndef main():\n env = gym.make(\"MountainCar-v0\")\n gamma = 0.9\n epsilon = .95\n\n trials = 1000\n trial_len = 500\n\n # updateTargetNetwork = 1000\n dqn_agent = DQN(env=env)\n steps = []\n for trial in range(trials):\n cur_state = env.reset().reshape(1,2)\n for step in range(trial_len):\n action = dqn_agent.act(cur_state)\n new_state, reward, done, _ = env.step(action)\n\n # reward = reward if not done else -20\n new_state = new_state.reshape(1,2)\n dqn_agent.remember(cur_state, action, reward, new_state, done)\n \n dqn_agent.replay() # internally iterates default (prediction) model\n dqn_agent.target_train() # iterates target model\n\n cur_state = new_state\n if done:\n break\n if step >= 199:\n print(\"Failed to complete in trial {}\".format(trial))\n if step % 10 == 0:\n dqn_agent.save_model(\"trial-{}.model\".format(trial))\n else:\n print(\"Completed in {} trials\".format(trial))\n dqn_agent.save_model(\"success.model\")\n break\n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"numpy.random.random"
]
] |
AmedeoSapio/tensorflow
|
[
"38e0922d1e2dcd572379af4496f878492e9f689a"
] |
[
"tensorflow/python/kernel_tests/lookup_ops_test.py"
] |
[
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for lookup ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport numpy as np\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.training import server_lib\n\n\nclass HashTableOpTest(test.TestCase):\n\n def testHashTable(self):\n with self.test_session():\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values), default_val)\n table.init.run()\n\n self.assertAllEqual(3, table.size().eval())\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n self.assertAllEqual([3], output.get_shape())\n\n result = output.eval()\n self.assertAllEqual([0, 1, -1], result)\n\n def testHashTableFindHighRank(self):\n with self.test_session():\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values), default_val)\n table.init.run()\n\n self.assertAllEqual(3, table.size().eval())\n\n input_string = constant_op.constant(\n [[\"brain\", \"salad\"], [\"tank\", \"tarkus\"]])\n output = table.lookup(input_string)\n\n result = output.eval()\n self.assertAllEqual([[0, 1], [-1, -1]], result)\n\n def testHashTableInitWithPythonArrays(self):\n with self.test_session():\n default_val = -1\n keys = [\"brain\", \"salad\", \"surgery\"]\n values = [0, 1, 2]\n table = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(\n keys, values, value_dtype=dtypes.int64), default_val)\n table.init.run()\n\n self.assertAllEqual(3, table.size().eval())\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n\n result = output.eval()\n self.assertAllEqual([0, 1, -1], result)\n\n def testHashTableInitWithNumPyArrays(self):\n with self.test_session():\n default_val = -1\n keys = np.array([\"brain\", \"salad\", \"surgery\"], dtype=np.str)\n values = np.array([0, 1, 2], dtype=np.int64)\n table = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values), default_val)\n table.init.run()\n\n self.assertAllEqual(3, table.size().eval())\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n\n result = output.eval()\n self.assertAllEqual([0, 1, -1], result)\n\n def testMultipleHashTables(self):\n with self.test_session() as sess:\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n\n table1 = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values), default_val)\n table2 = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values), default_val)\n table3 = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values), default_val)\n\n lookup_ops.tables_initializer().run()\n self.assertAllEqual(3, table1.size().eval())\n self.assertAllEqual(3, table2.size().eval())\n self.assertAllEqual(3, table3.size().eval())\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output1 = table1.lookup(input_string)\n output2 = table2.lookup(input_string)\n output3 = table3.lookup(input_string)\n\n out1, out2, out3 = sess.run([output1, output2, output3])\n self.assertAllEqual([0, 1, -1], out1)\n self.assertAllEqual([0, 1, -1], out2)\n self.assertAllEqual([0, 1, -1], out3)\n\n def testHashTableWithTensorDefault(self):\n with self.test_session():\n default_val = constant_op.constant(-1, dtypes.int64)\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values), default_val)\n table.init.run()\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n\n result = output.eval()\n self.assertAllEqual([0, 1, -1], result)\n\n def testHashTableWithSparseTensorInput(self):\n with self.test_session() as sess:\n default_val = constant_op.constant(-1, dtypes.int64)\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values), default_val)\n table.init.run()\n\n sp_indices = [[0, 0], [0, 1], [1, 0]]\n sp_shape = [2, 2]\n input_tensor = sparse_tensor.SparseTensor(\n constant_op.constant(sp_indices, dtypes.int64),\n constant_op.constant([\"brain\", \"salad\", \"tank\"]),\n constant_op.constant(sp_shape, dtypes.int64))\n output = table.lookup(input_tensor)\n\n out_indices, out_values, out_shape = sess.run(output)\n\n self.assertAllEqual([0, 1, -1], out_values)\n self.assertAllEqual(sp_indices, out_indices)\n self.assertAllEqual(sp_shape, out_shape)\n\n def testSignatureMismatch(self):\n with self.test_session():\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values), default_val)\n table.init.run()\n\n input_string = constant_op.constant([1, 2, 3], dtypes.int64)\n with self.assertRaises(TypeError):\n table.lookup(input_string)\n\n with self.assertRaises(TypeError):\n lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values), \"UNK\")\n\n def testDTypes(self):\n with self.test_session():\n default_val = -1\n with self.assertRaises(TypeError):\n lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer([\"a\"], [1], [dtypes.string],\n dtypes.int64), default_val)\n\n def testNotInitialized(self):\n with self.test_session():\n default_val = -1\n table = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(\n [\"a\"], [1], value_dtype=dtypes.int64), default_val)\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n output = table.lookup(input_string)\n\n with self.assertRaisesOpError(\"Table not initialized\"):\n output.eval()\n\n def testInitializeTwice(self):\n with self.test_session():\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values), default_val)\n table.init.run()\n\n with self.assertRaisesOpError(\"Table already initialized\"):\n table.init.run()\n\n def testInitializationWithInvalidDimensions(self):\n with self.test_session():\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2, 3, 4], dtypes.int64)\n\n with self.assertRaises(ValueError):\n lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values), default_val)\n\n def testMultipleSessions(self):\n # Start a server\n server = server_lib.Server(\n {\n \"local0\": [\"localhost:0\"]\n }, protocol=\"grpc\", start=True)\n # Create two sessions sharing the same state\n session1 = session.Session(server.target)\n session2 = session.Session(server.target)\n\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n name=\"t1\")\n\n # Init the table in the first session.\n with session1:\n table.init.run()\n self.assertAllEqual(3, table.size().eval())\n\n # Init the table in the second session and verify that we do not get a\n # \"Table already initialized\" error.\n with session2:\n table.init.run()\n self.assertAllEqual(3, table.size().eval())\n\n\nclass IndexTableFromFile(test.TestCase):\n\n def _createVocabFile(self, basename, values=(\"brain\", \"salad\", \"surgery\")):\n vocabulary_file = os.path.join(self.get_temp_dir(), basename)\n with open(vocabulary_file, \"w\") as f:\n f.write(\"\\n\".join(values) + \"\\n\")\n return vocabulary_file\n\n def test_string_index_table_from_file(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab1.txt\")\n with self.test_session():\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, num_oov_buckets=1)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n self.assertRaises(errors_impl.OpError, ids.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((1, 2, 3), ids.eval())\n\n def test_string_index_table_from_file_tensor_filename(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab1.txt\")\n with self.test_session():\n vocabulary_file = constant_op.constant(vocabulary_file)\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, num_oov_buckets=1)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n self.assertRaises(errors_impl.OpError, ids.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((1, 2, 3), ids.eval())\n\n def test_int32_index_table_from_file(self):\n vocabulary_file = self._createVocabFile(\n \"f2i_vocab2.txt\", values=(\"42\", \"1\", \"-1000\"))\n with self.test_session():\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file,\n num_oov_buckets=1,\n key_dtype=dtypes.int32)\n ids = table.lookup(\n constant_op.constant((1, -1000, 11), dtype=dtypes.int32))\n\n self.assertRaises(errors_impl.OpError, ids.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((1, 2, 3), ids.eval())\n\n def test_int64_index_table_from_file(self):\n vocabulary_file = self._createVocabFile(\n \"f2i_vocab3.txt\", values=(\"42\", \"1\", \"-1000\"))\n with self.test_session():\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file,\n num_oov_buckets=1,\n key_dtype=dtypes.int64)\n ids = table.lookup(\n constant_op.constant((1, -1000, 11), dtype=dtypes.int64))\n\n self.assertRaises(errors_impl.OpError, ids.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((1, 2, 3), ids.eval())\n\n def test_index_table_from_file_with_default_value(self):\n default_value = -42\n vocabulary_file = self._createVocabFile(\"f2i_vocab4.txt\")\n with self.test_session():\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, default_value=default_value)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n self.assertRaises(errors_impl.OpError, ids.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((1, 2, default_value), ids.eval())\n\n def test_index_table_from_file_with_oov_buckets(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab5.txt\")\n with self.test_session():\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, num_oov_buckets=1000)\n ids = table.lookup(\n constant_op.constant([\"salad\", \"surgery\", \"tarkus\", \"toccata\"]))\n\n self.assertRaises(errors_impl.OpError, ids.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual(\n (\n 1, # From vocabulary file.\n 2, # From vocabulary file.\n 867, # 3 + fingerprint(\"tarkus\") mod 300.\n 860), # 3 + fingerprint(\"toccata\") mod 300.\n ids.eval())\n\n def test_index_table_from_file_fails_with_empty_vocabulary_file_name(self):\n self.assertRaises(\n ValueError, lookup_ops.index_table_from_file, vocabulary_file=\"\")\n\n def test_index_table_from_file_fails_with_empty_vocabulary(self):\n self.assertRaises(\n ValueError, lookup_ops.index_table_from_file, vocabulary_file=None)\n\n def test_index_table_from_file_with_vocab_size_too_small(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab6.txt\")\n with self.test_session():\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, vocab_size=2)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n self.assertRaises(errors_impl.OpError, ids.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((1, -1, -1), ids.eval())\n self.assertEqual(2, table.size().eval())\n\n def test_index_table_from_file_with_vocab_size_too_large(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab7.txt\")\n with self.test_session():\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, vocab_size=4)\n self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"Invalid vocab_size\", table.init.run)\n\n def test_index_table_from_file_with_vocab_size(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab8.txt\")\n\n self.assertRaises(\n ValueError,\n lookup_ops.index_table_from_file,\n vocabulary_file=vocabulary_file,\n vocab_size=0)\n\n with self.test_session():\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, vocab_size=3)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n self.assertRaises(errors_impl.OpError, ids.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((1, 2, -1), ids.eval())\n self.assertEqual(3, table.size().eval())\n\n def test_index_table_from_file_with_invalid_hashers(self):\n vocabulary_file = self._createVocabFile(\"invalid_hasher.txt\")\n with self.test_session():\n with self.assertRaises(TypeError):\n lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file,\n vocab_size=3,\n num_oov_buckets=1,\n hasher_spec=1)\n\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file,\n vocab_size=3,\n num_oov_buckets=1,\n hasher_spec=lookup_ops.HasherSpec(\"my-awesome-hash\", None))\n\n self.assertRaises(ValueError, table.lookup,\n constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n\nclass KeyValueTensorInitializerTest(test.TestCase):\n\n def test_string(self):\n with ops.Graph().as_default(), self.test_session():\n init = lookup_ops.KeyValueTensorInitializer(\n (\"brain\", \"salad\", \"surgery\"), (0, 1, 2), dtypes.string, dtypes.int64)\n table = lookup_ops.HashTable(init, default_value=-1)\n table.init.run()\n\n def test_int64(self):\n with ops.Graph().as_default(), self.test_session():\n init = lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),\n dtypes.int64, dtypes.int64)\n table = lookup_ops.HashTable(init, default_value=-1)\n table.init.run()\n\n def test_int32(self):\n with ops.Graph().as_default(), self.test_session():\n init = lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),\n dtypes.int32, dtypes.int64)\n table = lookup_ops.HashTable(init, default_value=-1)\n with self.assertRaisesRegexp(\n errors_impl.OpError, \"No OpKernel was registered\"):\n table.init.run()\n\n\nclass IndexTableFromTensor(test.TestCase):\n\n def test_index_table_from_tensor_with_tensor_init(self):\n with self.test_session():\n table = lookup_ops.index_table_from_tensor(\n vocabulary_list=(\"brain\", \"salad\", \"surgery\"), num_oov_buckets=1)\n ids = table.lookup(constant_op.constant((\"salad\", \"surgery\", \"tarkus\")))\n\n self.assertRaises(errors_impl.OpError, ids.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((1, 2, 3), ids.eval())\n\n def test_int32_index_table_from_tensor_with_tensor_init(self):\n with self.test_session():\n table = lookup_ops.index_table_from_tensor(\n vocabulary_list=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int32)\n ids = table.lookup(\n constant_op.constant((1, -1000, 11), dtype=dtypes.int32))\n\n self.assertRaises(errors_impl.OpError, ids.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((1, 2, 3), ids.eval())\n\n def test_int64_index_table_from_tensor_with_tensor_init(self):\n with self.test_session():\n table = lookup_ops.index_table_from_tensor(\n vocabulary_list=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int64)\n ids = table.lookup(\n constant_op.constant((1, -1000, 11), dtype=dtypes.int64))\n\n self.assertRaises(errors_impl.OpError, ids.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((1, 2, 3), ids.eval())\n\n def test_index_table_from_tensor_with_default_value(self):\n default_value = -42\n with self.test_session():\n table = lookup_ops.index_table_from_tensor(\n vocabulary_list=[\"brain\", \"salad\", \"surgery\"],\n default_value=default_value)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n self.assertRaises(errors_impl.OpError, ids.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((1, 2, default_value), ids.eval())\n\n def test_index_table_from_tensor_missing_vocabulary_list(self):\n with self.test_session():\n with self.assertRaisesRegexp(ValueError,\n \"vocabulary_list must be specified\"):\n lookup_ops.index_table_from_tensor(\n vocabulary_list=None, num_oov_buckets=1)\n\n def test_index_table_from_tensor_empty_vocabulary_list(self):\n with self.test_session():\n table = lookup_ops.index_table_from_tensor(\n vocabulary_list=np.array([], dtype=np.str_), num_oov_buckets=1)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"brain\"]))\n self.assertRaises(errors_impl.OpError, ids.eval)\n with self.assertRaisesRegexp(\n errors_impl.OpError, \"keys and values cannot be empty\"):\n lookup_ops.tables_initializer().run()\n\n def test_index_table_from_tensor_with_invalid_hashers(self):\n with self.test_session():\n with self.assertRaises(TypeError):\n lookup_ops.index_table_from_tensor(\n vocabulary_list=[\"brain\", \"salad\", \"surgery\"],\n num_oov_buckets=1,\n hasher_spec=1)\n\n table = lookup_ops.index_table_from_tensor(\n vocabulary_list=[\"brain\", \"salad\", \"surgery\"],\n num_oov_buckets=1,\n hasher_spec=lookup_ops.HasherSpec(\"my-awesome-hash\", None))\n\n self.assertRaises(ValueError, table.lookup,\n constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n\nclass IndexToStringTableFromFileTest(test.TestCase):\n\n def _createVocabFile(self, basename):\n vocabulary_file = os.path.join(self.get_temp_dir(), basename)\n with open(vocabulary_file, \"w\") as f:\n f.write(\"\\n\".join([\"brain\", \"salad\", \"surgery\"]) + \"\\n\")\n return vocabulary_file\n\n def test_index_to_string_table(self):\n vocabulary_file = self._createVocabFile(\"i2f_vocab1.txt\")\n with self.test_session():\n table = lookup_ops.index_to_string_table_from_file(\n vocabulary_file=vocabulary_file)\n features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))\n self.assertRaises(errors_impl.OpError, features.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((b\"brain\", b\"salad\", b\"surgery\", b\"UNK\"),\n features.eval())\n\n def test_index_to_string_table_with_default_value(self):\n default_value = b\"NONE\"\n vocabulary_file = self._createVocabFile(\"f2i_vocab2.txt\")\n with self.test_session():\n table = lookup_ops.index_to_string_table_from_file(\n vocabulary_file=vocabulary_file, default_value=default_value)\n features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))\n self.assertRaises(errors_impl.OpError, features.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((b\"salad\", b\"surgery\", default_value),\n features.eval())\n\n def test_index_to_string_table_with_vocab_size_too_small(self):\n default_value = b\"NONE\"\n vocabulary_file = self._createVocabFile(\"f2i_vocab2.txt\")\n with self.test_session():\n table = lookup_ops.index_to_string_table_from_file(\n vocabulary_file=vocabulary_file,\n vocab_size=2,\n default_value=default_value)\n features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))\n self.assertRaises(errors_impl.OpError, features.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((b\"salad\", default_value, default_value),\n features.eval())\n\n def test_index_to_string_table_with_vocab_size_too_large(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab6.txt\")\n with self.test_session():\n table = lookup_ops.index_to_string_table_from_file(\n vocabulary_file=vocabulary_file, vocab_size=4)\n features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))\n\n self.assertRaises(errors_impl.OpError, features.eval)\n init = lookup_ops.tables_initializer()\n self.assertRaisesRegexp(errors_impl.InvalidArgumentError,\n \"Invalid vocab_size\", init.run)\n\n def test_index_to_string_table_with_vocab_size(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab7.txt\")\n with self.test_session():\n table = lookup_ops.index_to_string_table_from_file(\n vocabulary_file=vocabulary_file, vocab_size=3)\n features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))\n\n self.assertRaises(errors_impl.OpError, features.eval)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((b\"salad\", b\"surgery\", b\"UNK\"), features.eval())\n\n\nclass IndexToStringTableFromTensorTest(test.TestCase):\n\n def test_index_to_string_table_from_tensor(self):\n with self.test_session():\n vocabulary_list = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n table = lookup_ops.index_to_string_table_from_tensor(\n vocabulary_list=vocabulary_list)\n\n indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)\n features = table.lookup(indices)\n self.assertRaises(errors_impl.OpError, features.eval)\n lookup_ops.tables_initializer().run()\n\n self.assertAllEqual((b\"brain\", b\"salad\", b\"surgery\", b\"UNK\"),\n features.eval())\n\n def test_duplicate_entries(self):\n with self.test_session():\n vocabulary_list = constant_op.constant([\"hello\", \"hello\"])\n table = lookup_ops.index_to_string_table_from_tensor(\n vocabulary_list=vocabulary_list)\n indices = constant_op.constant([0, 1, 4], dtypes.int64)\n features = table.lookup(indices)\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((b\"hello\", b\"hello\", b\"UNK\"), features.eval())\n\n def test_index_to_string_with_default_value(self):\n default_value = b\"NONE\"\n with self.test_session():\n vocabulary_list = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n table = lookup_ops.index_to_string_table_from_tensor(\n vocabulary_list=vocabulary_list, default_value=default_value)\n indices = constant_op.constant([1, 2, 4], dtypes.int64)\n features = table.lookup(indices)\n self.assertRaises(errors_impl.OpError, features.eval)\n\n lookup_ops.tables_initializer().run()\n self.assertAllEqual((b\"salad\", b\"surgery\", default_value),\n features.eval())\n\n\nclass InitializeTableFromFileOpTest(test.TestCase):\n\n def _createVocabFile(self, basename, values=(\"brain\", \"salad\", \"surgery\")):\n vocabulary_file = os.path.join(self.get_temp_dir(), basename)\n with open(vocabulary_file, \"w\") as f:\n f.write(\"\\n\".join(values) + \"\\n\")\n return vocabulary_file\n\n def testInitializeStringTable(self):\n vocabulary_file = self._createVocabFile(\"one_column_1.txt\")\n\n with self.test_session():\n default_value = -1\n table = lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(\n vocabulary_file, dtypes.string,\n lookup_ops.TextFileIndex.WHOLE_LINE, dtypes.int64,\n lookup_ops.TextFileIndex.LINE_NUMBER), default_value)\n table.init.run()\n\n output = table.lookup(constant_op.constant([\"brain\", \"salad\", \"tank\"]))\n\n result = output.eval()\n self.assertAllEqual([0, 1, -1], result)\n\n def testInitializeInt64Table(self):\n vocabulary_file = self._createVocabFile(\n \"one_column_int64.txt\", values=(\"42\", \"1\", \"-1000\"))\n\n with self.test_session():\n default_value = -1\n table = lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(\n vocabulary_file, dtypes.int64,\n lookup_ops.TextFileIndex.WHOLE_LINE, dtypes.int64,\n lookup_ops.TextFileIndex.LINE_NUMBER), default_value)\n table.init.run()\n\n output = table.lookup(\n constant_op.constant((42, 1, 11), dtype=dtypes.int64))\n\n result = output.eval()\n self.assertAllEqual([0, 1, -1], result)\n\n def testInitializeIndexTable(self):\n vocabulary_file = self._createVocabFile(\"one_column_2.txt\")\n\n with self.test_session():\n default_value = \"UNK\"\n key_index = lookup_ops.TextFileIndex.LINE_NUMBER\n value_index = lookup_ops.TextFileIndex.WHOLE_LINE\n table = lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(vocabulary_file, dtypes.int64,\n key_index, dtypes.string, value_index),\n default_value)\n table.init.run()\n\n input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)\n output = table.lookup(input_values)\n\n result = output.eval()\n self.assertAllEqual([b\"brain\", b\"salad\", b\"surgery\", b\"UNK\"], result)\n\n def testMultiColumn(self):\n vocabulary_file = os.path.join(self.get_temp_dir(), \"three_columns.txt\")\n with open(vocabulary_file, \"w\") as f:\n f.write(\"\\n\".join([\"0\\tbrain\\t1\", \"1\\tsalad\\t5\", \"2\\tsurgery\\t6\"]) + \"\\n\")\n\n with self.test_session():\n default_value = -1\n key_index = 1\n value_index = 2\n\n table = lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string,\n key_index, dtypes.int64, value_index),\n default_value)\n table.init.run()\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n output = table.lookup(input_string)\n\n result = output.eval()\n self.assertAllEqual([1, 5, 6], result)\n\n def testInvalidDataTypeInMultiColumn(self):\n vocabulary_file = os.path.join(self.get_temp_dir(), \"three_columns.txt\")\n with open(vocabulary_file, \"w\") as f:\n f.write(\"\\n\".join([\"0\\tbrain\\t1\", \"1\\tsalad\\t5\", \"2\\tsurgery\\t6\"]) + \"\\n\")\n\n with self.test_session():\n default_value = -1\n key_index = 2\n value_index = 1\n table = lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string,\n key_index, dtypes.int64, value_index),\n default_value)\n with self.assertRaisesOpError(\"is not a valid\"):\n table.init.run()\n\n def testInvalidDataType(self):\n vocabulary_file = self._createVocabFile(\"one_column_3.txt\")\n\n with self.test_session():\n default_value = \"UNK\"\n key_index = lookup_ops.TextFileIndex.WHOLE_LINE\n value_index = lookup_ops.TextFileIndex.LINE_NUMBER\n\n with self.assertRaises(ValueError):\n lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(vocabulary_file, dtypes.int64,\n key_index, dtypes.string,\n value_index), default_value)\n\n def testInvalidIndex(self):\n vocabulary_file = self._createVocabFile(\"one_column_4.txt\")\n with self.test_session():\n default_value = -1\n key_index = 1 # second column of the line\n value_index = lookup_ops.TextFileIndex.LINE_NUMBER\n table = lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string,\n key_index, dtypes.int64, value_index),\n default_value)\n\n with self.assertRaisesOpError(\"Invalid number of columns\"):\n table.init.run()\n\n def testInitializeSameTableWithMultipleNodes(self):\n vocabulary_file = self._createVocabFile(\"one_column_5.txt\")\n\n with self.test_session() as sess:\n shared_name = \"shared-one-columm\"\n default_value = -1\n table1 = lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string,\n lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64,\n lookup_ops.TextFileIndex.LINE_NUMBER),\n default_value,\n shared_name=shared_name)\n table2 = lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string,\n lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64,\n lookup_ops.TextFileIndex.LINE_NUMBER),\n default_value,\n shared_name=shared_name)\n table3 = lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string,\n lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64,\n lookup_ops.TextFileIndex.LINE_NUMBER),\n default_value,\n shared_name=shared_name)\n\n lookup_ops.tables_initializer().run()\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n\n output1 = table1.lookup(input_string)\n output2 = table2.lookup(input_string)\n output3 = table3.lookup(input_string)\n\n out1, out2, out3 = sess.run([output1, output2, output3])\n self.assertAllEqual([0, 1, -1], out1)\n self.assertAllEqual([0, 1, -1], out2)\n self.assertAllEqual([0, 1, -1], out3)\n\n def testInitializeTableWithNoFilename(self):\n with self.test_session():\n default_value = -1\n with self.assertRaises(ValueError):\n lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(\n \"\", dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER),\n default_value)\n\n def testInitializeWithVocabSize(self):\n with self.test_session():\n default_value = -1\n vocab_size = 3\n vocabulary_file1 = self._createVocabFile(\"one_column6.txt\")\n table1 = lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(\n vocabulary_file1,\n dtypes.string,\n lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64,\n lookup_ops.TextFileIndex.LINE_NUMBER,\n vocab_size=vocab_size), default_value)\n\n # Initialize from file.\n table1.init.run()\n self.assertEquals(vocab_size, table1.size().eval())\n\n vocabulary_file2 = self._createVocabFile(\"one_column7.txt\")\n vocab_size = 5\n table2 = lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(\n vocabulary_file2,\n dtypes.string,\n lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64,\n lookup_ops.TextFileIndex.LINE_NUMBER,\n vocab_size=vocab_size), default_value)\n with self.assertRaisesOpError(\"Invalid vocab_size\"):\n table2.init.run()\n\n vocab_size = 1\n vocabulary_file3 = self._createVocabFile(\"one_column3.txt\")\n table3 = lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(\n vocabulary_file3,\n dtypes.string,\n lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64,\n lookup_ops.TextFileIndex.LINE_NUMBER,\n vocab_size=vocab_size), default_value)\n\n # Smaller vocab size reads only vocab_size records.\n table3.init.run()\n self.assertEquals(vocab_size, table3.size().eval())\n\n def testFeedVocabularyName(self):\n vocabulary_file = self._createVocabFile(\"feed_vocabulary.txt\")\n\n with self.test_session():\n default_value = -1\n table = lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(\n \"old_file.txt\", dtypes.string,\n lookup_ops.TextFileIndex.WHOLE_LINE, dtypes.int64,\n lookup_ops.TextFileIndex.LINE_NUMBER), default_value)\n\n # Initialize with non existing file (old_file.txt) should fail.\n # TODO(yleon): Update message, which might change per FileSystem.\n with self.assertRaisesOpError(\"old_file.txt\"):\n table.init.run()\n\n # Initialize the model feeding the vocabulary file.\n filenames = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)\n table.init.run(feed_dict={filenames[0]: vocabulary_file})\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n\n result = output.eval()\n self.assertAllEqual([0, 1, -1], result)\n\n def testInvalidFilenames(self):\n vocabulary_file = self._createVocabFile(\"filename_shape.txt\")\n\n with self.test_session():\n default_value = -1\n\n # Invalid data type\n other_type = constant_op.constant(1)\n with self.assertRaises(ValueError):\n lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(\n other_type, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER),\n default_value)\n\n # Non-scalar filename\n filenames = constant_op.constant([vocabulary_file, vocabulary_file])\n with self.assertRaises(ValueError):\n lookup_ops.HashTable(\n lookup_ops.TextFileInitializer(\n filenames, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER),\n default_value)\n\n def testIdToStringTable(self):\n vocab_file = self._createVocabFile(\"feat_to_id_1.txt\")\n with self.test_session():\n default_value = \"UNK\"\n vocab_size = 3\n table = lookup_ops.HashTable(\n lookup_ops.TextFileStringTableInitializer(\n vocab_file, vocab_size=vocab_size), default_value)\n\n table.init.run()\n\n input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)\n\n out = table.lookup(input_values)\n self.assertAllEqual([b\"brain\", b\"salad\", b\"surgery\", b\"UNK\"], out.eval())\n self.assertEquals(vocab_size, table.size().eval())\n\n def testStringToIdTable(self):\n vocab_file = self._createVocabFile(\"feat_to_id_2.txt\")\n with self.test_session():\n default_value = -1\n vocab_size = 3\n table = lookup_ops.HashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size), default_value)\n table.init.run()\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"surgery\", \"UNK\"])\n\n out = table.lookup(input_string)\n self.assertAllEqual([0, 1, 2, -1], out.eval())\n self.assertEquals(vocab_size, table.size().eval())\n\n def testInt64ToIdTable(self):\n vocab_file = self._createVocabFile(\n \"feat_to_id_3.txt\", values=(\"42\", \"1\", \"-1000\"))\n with self.test_session():\n default_value = -1\n vocab_size = 3\n table = lookup_ops.HashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),\n default_value)\n table.init.run()\n\n out = table.lookup(\n constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64))\n self.assertAllEqual((0, 1, 2, -1), out.eval())\n self.assertEquals(vocab_size, table.size().eval())\n\n\nclass IdTableWithHashBucketsTest(test.TestCase):\n\n def _createVocabFile(self, basename, values=(\"brain\", \"salad\", \"surgery\")):\n vocabulary_file = os.path.join(self.get_temp_dir(), basename)\n with open(vocabulary_file, \"w\") as f:\n f.write(\"\\n\".join(values) + \"\\n\")\n return vocabulary_file\n\n def testStringIdTableWithHashBuckets(self):\n vocab_file = self._createVocabFile(\"feat_to_id_1.txt\")\n with self.test_session():\n default_value = -1\n vocab_size = 3\n oov_buckets = 1\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.HashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size), default_value),\n oov_buckets)\n\n table.init.run()\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"surgery\", \"UNK\"])\n\n out = table.lookup(input_string)\n self.assertAllEqual([0, 1, 2, 3], out.eval())\n self.assertEquals(vocab_size + oov_buckets, table.size().eval())\n\n def testInt32IdTableWithHashBuckets(self):\n vocab_file = self._createVocabFile(\"feat_to_id_2.txt\", (\"42\", \"1\", \"-1000\"))\n with self.test_session():\n default_value = -1\n vocab_size = 3\n oov_buckets = 1\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.HashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),\n default_value),\n oov_buckets,\n key_dtype=dtypes.int32)\n\n table.init.run()\n\n values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)\n\n out = table.lookup(values)\n self.assertAllEqual([0, 1, 2, 3], out.eval())\n self.assertEquals(vocab_size + oov_buckets, table.size().eval())\n\n def testInt64IdTableWithHashBuckets(self):\n vocab_file = self._createVocabFile(\"feat_to_id_3.txt\", (\"42\", \"1\", \"-1000\"))\n with self.test_session():\n default_value = -1\n vocab_size = 3\n oov_buckets = 1\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.HashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),\n default_value), oov_buckets)\n\n table.init.run()\n\n values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)\n\n out = table.lookup(values)\n self.assertAllEqual([0, 1, 2, 3], out.eval())\n self.assertEquals(vocab_size + oov_buckets, table.size().eval())\n\n def testStringIdTableWithOnlyHashBucket(self):\n with self.test_session():\n oov_buckets = 5\n\n # Set a table that only uses hash buckets, for each input value returns\n # an id calculated by fingerprint(\"input\") mod oov_buckets.\n table = lookup_ops.IdTableWithHashBuckets(None, oov_buckets)\n table.init.run()\n\n values = constant_op.constant((\"brain\", \"salad\", \"surgery\"))\n\n out = table.lookup(values)\n self.assertAllEqual(\n [\n 3, # fingerprint(\"brain\") mod 5.\n 1, # fingerprint(\"salad\") mod 5.\n 4 # fingerprint(\"surgery\") mod 5\n ],\n out.eval())\n self.assertEquals(oov_buckets, table.size().eval())\n\n def testInt32IdTableWithOnlyHashBucket(self):\n with self.test_session():\n oov_buckets = 5\n\n # Set a table that only uses hash buckets, for each input value returns\n # an id calculated by fingerprint(\"input\") mod oov_buckets.\n table = lookup_ops.IdTableWithHashBuckets(\n None, oov_buckets, key_dtype=dtypes.int32)\n table.init.run()\n\n input_string = constant_op.constant([42, 1, -1000], dtype=dtypes.int32)\n\n out = table.lookup(input_string)\n self.assertAllEqual(\n [\n 1, # fingerprint(\"42\") mod 5.\n 4, # fingerprint(\"1\") mod 5.\n 2 # fingerprint(\"-1000\") mod 5\n ],\n out.eval())\n self.assertEquals(oov_buckets, table.size().eval())\n\n def testFloat64IdTableWithOnlyHashBucket(self):\n with self.test_session():\n with self.assertRaisesRegexp(TypeError, \"Invalid key_dtype\"):\n lookup_ops.IdTableWithHashBuckets(\n None, num_oov_buckets=5, key_dtype=dtypes.float64)\n\n def testBoolIdTableWithOnlyHashBucket(self):\n with self.test_session():\n with self.assertRaisesRegexp(TypeError, \"Invalid key_dtype\"):\n lookup_ops.IdTableWithHashBuckets(\n None, num_oov_buckets=5, key_dtype=dtypes.bool)\n\n def testIdTableWithHashBucketsWithMultipleInitializers(self):\n vocab_file = self._createVocabFile(\"feat_to_id_4.txt\")\n with self.test_session() as sess:\n default_value = -1\n vocab_size = 3\n oov_buckets = 3\n\n vocab_table = lookup_ops.HashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size), default_value)\n table1 = lookup_ops.IdTableWithHashBuckets(\n vocab_table,\n oov_buckets,\n hasher_spec=lookup_ops.FastHashSpec,\n name=\"table1\")\n\n table2 = lookup_ops.IdTableWithHashBuckets(\n vocab_table,\n oov_buckets,\n hasher_spec=lookup_ops.StrongHashSpec((1, 2)),\n name=\"table2\")\n\n lookup_ops.tables_initializer().run()\n\n input_string = constant_op.constant(\n [\"fruit\", \"brain\", \"salad\", \"surgery\", \"UNK\"])\n\n out1 = table1.lookup(input_string)\n out2 = table2.lookup(input_string)\n\n out1, out2 = sess.run([out1, out2])\n self.assertAllEqual([5, 0, 1, 2, 5], out1)\n self.assertAllEqual([5, 0, 1, 2, 3], out2)\n self.assertEquals(vocab_size + oov_buckets, table1.size().eval())\n self.assertEquals(vocab_size + oov_buckets, table2.size().eval())\n test_util.assert_ops_in_graph({\n \"table1_Lookup/hash_bucket\": \"StringToHashBucketFast\",\n \"table2_Lookup/hash_bucket\": \"StringToHashBucketStrong\",\n }, sess.graph)\n\n def testIdTableWithHashBucketsInitializationAcrossSessions(self):\n vocab_file = self._createVocabFile(\"feat_to_id_5.txt\")\n shared_name = \"across-sessions\"\n with self.test_session():\n default_value = -1\n vocab_size = 3\n oov_buckets = 1\n table1 = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.HashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size),\n default_value,\n shared_name=shared_name), oov_buckets)\n\n table1.init.run()\n\n input_string_1 = constant_op.constant(\n [\"brain\", \"salad\", \"surgery\", \"UNK\"])\n\n out1 = table1.lookup(input_string_1)\n\n self.assertAllEqual([0, 1, 2, 3], out1.eval())\n self.assertEquals(vocab_size + oov_buckets, table1.size().eval())\n\n with self.test_session():\n default_value = -1\n vocab_size = 3\n oov_buckets = 1\n\n # Underlying lookup table already initialized in previous session.\n # No need to call table2.init.run()\n table2 = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.HashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size),\n default_value,\n shared_name=shared_name), oov_buckets)\n\n input_string_2 = constant_op.constant([\"fruit\", \"salad\", \"UNK\"])\n\n out2 = table2.lookup(input_string_2)\n\n self.assertAllEqual([3, 1, 3], out2.eval())\n self.assertEquals(vocab_size + oov_buckets, table2.size().eval())\n\n def testIdTableWithHashBucketsWithMultipleInitializersDifferentDefault(self):\n vocab_file = self._createVocabFile(\"feat_to_id_6.txt\")\n with self.test_session() as sess:\n default_value1 = -1\n vocab_size = 3\n oov_buckets = 0\n table1 = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.HashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size), default_value1),\n oov_buckets)\n\n default_value2 = -2\n table2 = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.HashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size), default_value2),\n oov_buckets)\n\n lookup_ops.tables_initializer().run()\n\n input_string_1 = constant_op.constant(\n [\"brain\", \"salad\", \"surgery\", \"UNK\"])\n input_string_2 = constant_op.constant([\"fruit\", \"salad\", \"UNK\"])\n\n out1 = table1.lookup(input_string_1)\n out2 = table2.lookup(input_string_2)\n\n out1, out2 = sess.run([out1, out2])\n self.assertAllEqual([0, 1, 2, -1], out1)\n self.assertAllEqual([-2, 1, -2], out2)\n self.assertEquals(vocab_size + oov_buckets, table1.size().eval())\n self.assertEquals(vocab_size + oov_buckets, table2.size().eval())\n\n def testSparseTensor(self):\n vocab_file = self._createVocabFile(\"feat_to_id_7.txt\")\n input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]\n input_shape = [4, 4]\n with self.test_session() as sess:\n sp_features = sparse_tensor.SparseTensor(\n constant_op.constant(input_indices, dtypes.int64),\n constant_op.constant([\"brain\", \"salad\", \"brain\", \"surgery\", \"tarkus\"],\n dtypes.string),\n constant_op.constant(input_shape, dtypes.int64))\n\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.HashTable(\n lookup_ops.TextFileIdTableInitializer(vocab_file, vocab_size=3),\n -1), 1)\n table.init.run()\n\n sp_ids = table.lookup(sp_features)\n\n self.assertAllEqual([5], sp_ids.values._shape_as_list())\n\n sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(\n [sp_ids.indices, sp_ids.values, sp_ids.dense_shape])\n\n self.assertAllEqual(input_indices, sp_ids_ind)\n self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)\n self.assertAllEqual(input_shape, sp_ids_shape)\n\n def testInt32SparseTensor(self):\n input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]\n input_shape = [4, 4]\n with self.test_session() as sess:\n sp_features = sparse_tensor.SparseTensor(\n constant_op.constant(input_indices, dtypes.int64),\n constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),\n constant_op.constant(input_shape, dtypes.int64))\n\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(\n (42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1),\n 1,\n key_dtype=dtypes.int32)\n table.init.run()\n\n sp_ids = table.lookup(sp_features)\n\n self.assertAllEqual([5], sp_ids.values._shape_as_list())\n\n sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(\n [sp_ids.indices, sp_ids.values, sp_ids.dense_shape])\n\n self.assertAllEqual(input_indices, sp_ids_ind)\n self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)\n self.assertAllEqual(input_shape, sp_ids_shape)\n\n def testInt64SparseTensor(self):\n input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]\n input_shape = [4, 4]\n with self.test_session() as sess:\n sp_features = sparse_tensor.SparseTensor(\n constant_op.constant(input_indices, dtypes.int64),\n constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),\n constant_op.constant(input_shape, dtypes.int64))\n\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.HashTable(\n lookup_ops.KeyValueTensorInitializer(\n (42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1),\n 1,\n key_dtype=dtypes.int64)\n table.init.run()\n\n sp_ids = table.lookup(sp_features)\n\n self.assertAllEqual([5], sp_ids.values._shape_as_list())\n\n sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(\n [sp_ids.indices, sp_ids.values, sp_ids.dense_shape])\n\n self.assertAllEqual(input_indices, sp_ids_ind)\n self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)\n self.assertAllEqual(input_shape, sp_ids_shape)\n\n def testIdTableWithHashBucketsWithInvalidHashers(self):\n vocab_file = self._createVocabFile(\"feat_to_id_4.txt\")\n with self.test_session():\n default_value = -1\n vocab_size = 3\n oov_buckets = 1\n lookup_table = lookup_ops.HashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size), default_value)\n\n with self.assertRaises(TypeError):\n lookup_ops.IdTableWithHashBuckets(\n lookup_table, oov_buckets, hasher_spec=1)\n\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_table,\n oov_buckets,\n hasher_spec=lookup_ops.HasherSpec(\"my-awesome-hash\", None))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"surgery\", \"UNK\"])\n\n with self.assertRaises(ValueError):\n table.lookup(input_string)\n\n with self.assertRaises(ValueError):\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_table,\n oov_buckets,\n hasher_spec=lookup_ops.StrongHashSpec([]))\n\n with self.assertRaises(ValueError):\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_table,\n oov_buckets,\n hasher_spec=lookup_ops.StrongHashSpec([1, 2, 3]))\n\n with self.assertRaises(TypeError):\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_table,\n oov_buckets,\n hasher_spec=lookup_ops.StrongHashSpec([None, 2]))\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] |
[
[
"tensorflow.python.ops.lookup_ops.TextFileStringTableInitializer",
"tensorflow.python.ops.lookup_ops.TextFileInitializer",
"tensorflow.python.framework.test_util.assert_ops_in_graph",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.lookup_ops.KeyValueTensorInitializer",
"tensorflow.python.ops.lookup_ops.index_table_from_file",
"tensorflow.python.ops.lookup_ops.TextFileIdTableInitializer",
"tensorflow.python.ops.lookup_ops.HasherSpec",
"tensorflow.python.client.session.Session",
"tensorflow.python.ops.lookup_ops.tables_initializer",
"numpy.array",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.training.server_lib.Server",
"tensorflow.python.ops.lookup_ops.HashTable",
"tensorflow.python.ops.lookup_ops.index_to_string_table_from_file",
"tensorflow.python.ops.lookup_ops.IdTableWithHashBuckets",
"tensorflow.python.ops.lookup_ops.index_to_string_table_from_tensor",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.lookup_ops.StrongHashSpec",
"tensorflow.python.ops.lookup_ops.index_table_from_tensor"
]
] |
goel96vibhor/semisup-adv
|
[
"30576066663e999d6ae9cc06fd5016d5886dd0b2"
] |
[
"ti_cifar_dataset.py"
] |
[
"\"\"\"\nDatasets with unlabeled (or pseudo-labeled) data\n\"\"\"\n\nfrom torchvision.datasets import CIFAR10, SVHN, MNIST\nfrom torch.utils.data import Sampler, Dataset\nimport torch\nimport numpy as np# from PIL import Image\nimport cifar_own\nimport qmnist_own\nimport os\nfrom PIL import Image\nimport pickle\n# from qmnist import QMNIST\nimport logging\nfrom torchvision import transforms\nDATASETS = ['cifar10', 'svhn', 'cifar_own']\nfrom diff_distribution_dataload_helper import *\nfrom dataset_utils.benrecht_cifar10 import BenRecht_cifar10_dataset\nfrom dataset_utils.tinyimages_80mn_loader import TinyImages\n\ndef to_tensor(x):\n t = torch.Tensor(x).transpose(2,0).transpose(1,2) / 255\n t -= torch.Tensor([0.4914, 0.4822, 0.4465]).reshape(3, 1 ,1)\n t /= torch.Tensor([0.2470, 0.2435, 0.2616]).reshape(3, 1 ,1)\n return t\n\nmean = np.array([0.4914, 0.4822, 0.4465])\nstd = np.array([0.2470, 0.2435, 0.2616])\n\nclass TICifarDataset(Dataset):\n def __init__(self,\n base_dataset,\n extend_dataset,\n ti_indices_map,\n targets,\n ti_start_index = 0,\n # base_targets,\n # extend_targets,\n train=False, \n transform = None,\n used_targets = None,\n **kwargs):\n \"\"\"A dataset with auxiliary pseudo-labeled data\"\"\"\n logger = logging.getLogger()\n self.base_dataset = base_dataset\n self.extend_dataset = extend_dataset\n self.ti_indices_map = ti_indices_map\n self.transform = transform\n # self.base_targets = base_targets\n # self.extend_targets = extend_targets\n self.targets = targets\n self.used_targets = used_targets\n self.base_dataset_size = len(base_dataset)\n self.ti_start_index = ti_start_index\n logger.info(\"Base dataset size for TI CIFAR dataset %d, extend dataset size %d, ti indices map size %d, targets size %d, ti start index %d\" \n %(self.base_dataset_size, len(self.extend_dataset), len(self.ti_indices_map), len(self.targets), self.ti_start_index))\n self.train = train\n \n @property\n def data(self):\n return self.base_dataset.data\n\n# @data.setter\n# def data(self, value):\n# self.dataset.data = value\n\n# @property\n# def targets(self):\n# return self.dataset.targets\n\n# @targets.setter\n# def targets(self, value):\n# self.dataset.targets = value\n\n def __len__(self):\n return len(self.base_dataset) + len(self.ti_indices_map)\n\n def __getitem__(self, item):\n if item >= self.base_dataset_size:\n # print('returning extend item')\n # item = item % self.base_dataset_size\n # extend_tuple = torch.tensor(self.extend_dataset[self.ti_indices_map[item-self.base_dataset_size]-self.ti_start_index].transpose([2,0,1]), dtype= torch.float32)\n extend_tuple = self.extend_dataset[self.ti_indices_map[item-self.base_dataset_size]-self.ti_start_index]\n extend_tuple = Image.fromarray(extend_tuple)\n train_transform = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n # torchvision.transforms.Normalize(mean, std),\n ])\n # if item < self.base_dataset_size+80:\n # extend_tuple.save('selection_model/for_view/ti/'+str(item)+'.png')\n # # print(extend_tuple.dtype)\n if self.transform is not None:\n extend_tuple = train_transform(extend_tuple)\n # extend_tuple = to_tensor(extend_tuple)\n # # print(extend_tuple.shape)\n # # print(extend_tuple.dtype)\n return (extend_tuple, self.targets[item], item)\n # return self.extend_dataset[self.ti_indices_map[item-self.base_dataset_size]] # because torchvision is annoying\n # print(self.extend_dataset[self.ti_indices_map[item-self.base_dataset_size]])\n # else:\n # print('returning base item')\n # print(self.base_dataset[item][0].shape)\n # print(self.base_dataset[item][0].dtype)\n return self.base_dataset[item] \n\n# def __repr__(self):\n# fmt_str = 'Semisupervised Dataset ' + self.__class__.__name__ + '\\n'\n# fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n# fmt_str += ' Training: {}\\n'.format(self.train)\n# fmt_str += ' Root Location: {}\\n'.format(self.dataset.root)\n# tmp = ' Transforms (if any): '\n# fmt_str += '{0}{1}\\n'.format(tmp, self.dataset.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n# tmp = ' Target Transforms (if any): '\n# fmt_str += '{0}{1}'.format(tmp, self.dataset.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n# return fmt_str\n\n\n"
] |
[
[
"numpy.array",
"torch.Tensor"
]
] |
janismdhanbad/PointCNN
|
[
"23b7f1873b607ebeb5d63e466a65565405a4909a"
] |
[
"data_conversions/prepare_s3dis_data.py"
] |
[
"#!/usr/bin/python3\n'''Prepare Data for S3DIS Segmentation Task.'''\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport math\nimport h5py\nimport argparse\nimport numpy as np\nfrom datetime import datetime\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport data_utils\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--folder', '-f', help='Path to data folder')\n parser.add_argument('--max_point_num', '-m', help='Max point number of each sample', type=int, default=8192)\n parser.add_argument('--block_size', '-b', help='Block size', type=float, default=1.5)\n parser.add_argument('--grid_size', '-g', help='Grid size', type=float, default=0.03)\n parser.add_argument('--save_ply', '-s', help='Convert .pts to .ply', action='store_true')\n\n args = parser.parse_args()\n print(args)\n\n root = args.folder if args.folder else '/home/datumx/data_science_experiments/3D_data/data/S3DIS/prepare_label_rgb'\n max_point_num = args.max_point_num\n\n batch_size = 2048\n data = np.zeros((batch_size, max_point_num, 6))\n data_num = np.zeros((batch_size), dtype=np.int32)\n label = np.zeros((batch_size), dtype=np.int32)\n label_seg = np.zeros((batch_size, max_point_num), dtype=np.int32)\n indices_split_to_full = np.zeros((batch_size, max_point_num), dtype=np.int32)\n\n for area_idx in range(1, 7):\n folder = os.path.join(root, 'Area_%d' % area_idx)\n datasets = [dataset for dataset in os.listdir(folder)]\n for dataset_idx, dataset in enumerate(datasets):\n dataset_marker = os.path.join(folder, dataset, \".dataset\")\n if os.path.exists(dataset_marker):\n print('{}-{}/{} already processed, skipping'.format(datetime.now(), folder, dataset))\n continue\n filename_data = os.path.join(folder, dataset, 'xyzrgb.npy')\n print('{}-Loading {}...'.format(datetime.now(), filename_data))\n xyzrgb = np.load(filename_data)\n\n filename_labels = os.path.join(folder, dataset, 'label.npy')\n print('{}-Loading {}...'.format(datetime.now(), filename_labels))\n labels = np.load(filename_labels).astype(int).flatten()\n\n xyz, rgb = np.split(xyzrgb, [3], axis=-1)\n xyz_min = np.amin(xyz, axis=0, keepdims=True)\n xyz_max = np.amax(xyz, axis=0, keepdims=True)\n xyz_center = (xyz_min + xyz_max) / 2\n xyz_center[0][-1] = xyz_min[0][-1]\n xyz = xyz - xyz_center # align to room bottom center\n rgb = rgb / 255 - 0.5\n\n offsets = [('zero', 0.0), ('half', args.block_size / 2)]\n for offset_name, offset in offsets:\n idx_h5 = 0\n idx = 0\n\n print('{}-Computing block id of {} points...'.format(datetime.now(), xyzrgb.shape[0]))\n xyz_min = np.amin(xyz, axis=0, keepdims=True) - offset\n xyz_max = np.amax(xyz, axis=0, keepdims=True)\n block_size = (args.block_size, args.block_size, 2 * (xyz_max[0, -1] - xyz_min[0, -1]))\n xyz_blocks = np.floor((xyz - xyz_min) / block_size).astype(np.int)\n\n print('{}-Collecting points belong to each block...'.format(datetime.now(), xyzrgb.shape[0]))\n blocks, point_block_indices, block_point_counts = np.unique(xyz_blocks, return_inverse=True,\n return_counts=True, axis=0)\n block_point_indices = np.split(np.argsort(point_block_indices), np.cumsum(block_point_counts[:-1]))\n print('{}-{} is split into {} blocks.'.format(datetime.now(), dataset, blocks.shape[0]))\n\n block_to_block_idx_map = dict()\n for block_idx in range(blocks.shape[0]):\n block = (blocks[block_idx][0], blocks[block_idx][1])\n block_to_block_idx_map[(block[0], block[1])] = block_idx\n\n # merge small blocks into one of their big neighbors\n block_point_count_threshold = max_point_num/10\n nbr_block_offsets = [(0, 1), (1, 0), (0, -1), (-1, 0), (-1, 1), (1, 1), (1, -1), (-1, -1)]\n block_merge_count = 0\n for block_idx in range(blocks.shape[0]):\n if block_point_counts[block_idx] >= block_point_count_threshold:\n continue\n\n block = (blocks[block_idx][0], blocks[block_idx][1])\n for x, y in nbr_block_offsets:\n nbr_block = (block[0] + x, block[1] + y)\n if nbr_block not in block_to_block_idx_map:\n continue\n\n nbr_block_idx = block_to_block_idx_map[nbr_block]\n if block_point_counts[nbr_block_idx] < block_point_count_threshold:\n continue\n\n block_point_indices[nbr_block_idx] = np.concatenate(\n [block_point_indices[nbr_block_idx], block_point_indices[block_idx]], axis=-1)\n block_point_indices[block_idx] = np.array([], dtype=np.int)\n block_merge_count = block_merge_count + 1\n break\n print('{}-{} of {} blocks are merged.'.format(datetime.now(), block_merge_count, blocks.shape[0]))\n\n idx_last_non_empty_block = 0\n for block_idx in reversed(range(blocks.shape[0])):\n if block_point_indices[block_idx].shape[0] != 0:\n idx_last_non_empty_block = block_idx\n break\n\n # uniformly sample each block\n for block_idx in range(idx_last_non_empty_block + 1):\n point_indices = block_point_indices[block_idx]\n if point_indices.shape[0] == 0:\n continue\n block_points = xyz[point_indices]\n block_min = np.amin(block_points, axis=0, keepdims=True)\n xyz_grids = np.floor((block_points - block_min) / args.grid_size).astype(np.int)\n grids, point_grid_indices, grid_point_counts = np.unique(xyz_grids, return_inverse=True,\n return_counts=True, axis=0)\n grid_point_indices = np.split(np.argsort(point_grid_indices), np.cumsum(grid_point_counts[:-1]))\n grid_point_count_avg = int(np.average(grid_point_counts))\n point_indices_repeated = []\n for grid_idx in range(grids.shape[0]):\n point_indices_in_block = grid_point_indices[grid_idx]\n repeat_num = math.ceil(grid_point_count_avg / point_indices_in_block.shape[0])\n if repeat_num > 1:\n point_indices_in_block = np.repeat(point_indices_in_block, repeat_num)\n np.random.shuffle(point_indices_in_block)\n point_indices_in_block = point_indices_in_block[:grid_point_count_avg]\n point_indices_repeated.extend(list(point_indices[point_indices_in_block]))\n block_point_indices[block_idx] = np.array(point_indices_repeated)\n block_point_counts[block_idx] = len(point_indices_repeated)\n\n for block_idx in range(idx_last_non_empty_block + 1):\n point_indices = block_point_indices[block_idx]\n if point_indices.shape[0] == 0:\n continue\n\n block_point_num = point_indices.shape[0]\n block_split_num = int(math.ceil(block_point_num * 1.0 / max_point_num))\n point_num_avg = int(math.ceil(block_point_num * 1.0 / block_split_num))\n point_nums = [point_num_avg] * block_split_num\n point_nums[-1] = block_point_num - (point_num_avg * (block_split_num - 1))\n starts = [0] + list(np.cumsum(point_nums))\n\n np.random.shuffle(point_indices)\n block_points = xyz[point_indices]\n block_rgb = rgb[point_indices]\n block_labels = labels[point_indices]\n x, y, z = np.split(block_points, (1, 2), axis=-1)\n block_xzyrgb = np.concatenate([x, z, y, block_rgb], axis=-1)\n\n for block_split_idx in range(block_split_num):\n start = starts[block_split_idx]\n point_num = point_nums[block_split_idx]\n end = start + point_num\n idx_in_batch = idx % batch_size\n data[idx_in_batch, 0:point_num, ...] = block_xzyrgb[start:end, :]\n data_num[idx_in_batch] = point_num\n label[idx_in_batch] = dataset_idx # won't be used...\n label_seg[idx_in_batch, 0:point_num] = block_labels[start:end]\n indices_split_to_full[idx_in_batch, 0:point_num] = point_indices[start:end]\n\n if ((idx + 1) % batch_size == 0) or \\\n (block_idx == idx_last_non_empty_block and block_split_idx == block_split_num - 1):\n item_num = idx_in_batch + 1\n filename_h5 = os.path.join(folder, dataset, '%s_%d.h5' % (offset_name, idx_h5))\n print('{}-Saving {}...'.format(datetime.now(), filename_h5))\n\n file = h5py.File(filename_h5, 'w')\n file.create_dataset('data', data=data[0:item_num, ...])\n file.create_dataset('data_num', data=data_num[0:item_num, ...])\n file.create_dataset('label', data=label[0:item_num, ...])\n file.create_dataset('label_seg', data=label_seg[0:item_num, ...])\n file.create_dataset('indices_split_to_full', data=indices_split_to_full[0:item_num, ...])\n file.close()\n\n if args.save_ply:\n print('{}-Saving ply of {}...'.format(datetime.now(), filename_h5))\n filepath_label_ply = os.path.join(folder, dataset, 'ply_label',\n 'label_%s_%d' % (offset_name, idx_h5))\n data_utils.save_ply_property_batch(data[0:item_num, :, 0:3],\n label_seg[0:item_num, ...],\n filepath_label_ply, data_num[0:item_num, ...], 14)\n\n filepath_rgb_ply = os.path.join(folder, dataset, 'ply_rgb',\n 'rgb_%s_%d' % (offset_name, idx_h5))\n data_utils.save_ply_color_batch(data[0:item_num, :, 0:3],\n (data[0:item_num, :, 3:] + 0.5) * 255,\n filepath_rgb_ply, data_num[0:item_num, ...])\n\n idx_h5 = idx_h5 + 1\n idx = idx + 1\n\n # Marker indicating we've processed this dataset\n open(dataset_marker, \"w\").close()\n\nif __name__ == '__main__':\n main()\n print('{}-Done.'.format(datetime.now()))\n"
] |
[
[
"numpy.concatenate",
"numpy.repeat",
"numpy.array",
"numpy.zeros",
"numpy.load",
"numpy.split",
"numpy.random.shuffle",
"numpy.amax",
"numpy.argsort",
"numpy.amin",
"numpy.cumsum",
"numpy.average",
"numpy.unique",
"numpy.floor"
]
] |
hellock/mmaction2
|
[
"def3b651ab7818ece637d8637dddacbca027910c"
] |
[
"tests/test_dataset.py"
] |
[
"import os\nimport os.path as osp\nimport tempfile\n\nimport mmcv\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal\n\nfrom mmaction.datasets import (ActivityNetDataset, RawframeDataset,\n RepeatDataset, VideoDataset)\n\n\nclass TestDataset(object):\n\n @staticmethod\n def check_keys_contain(result_keys, target_keys):\n \"\"\"Check if all elements in target_keys is in result_keys.\"\"\"\n return set(target_keys).issubset(set(result_keys))\n\n @classmethod\n def setup_class(cls):\n cls.data_prefix = osp.join(osp.dirname(__file__), 'data')\n cls.frame_ann_file = osp.join(cls.data_prefix, 'frame_test_list.txt')\n cls.video_ann_file = osp.join(cls.data_prefix, 'video_test_list.txt')\n cls.action_ann_file = osp.join(cls.data_prefix,\n 'action_test_anno.json')\n\n cls.frame_pipeline = [\n dict(\n type='SampleFrames',\n clip_len=32,\n frame_interval=2,\n num_clips=1),\n dict(type='FrameSelector', io_backend='disk')\n ]\n cls.video_pipeline = [\n dict(type='OpenCVInit'),\n dict(\n type='SampleFrames',\n clip_len=32,\n frame_interval=2,\n num_clips=1),\n dict(type='OpenCVDecode')\n ]\n cls.action_pipeline = []\n\n def test_rawframe_dataset(self):\n rawframe_dataset = RawframeDataset(self.frame_ann_file,\n self.frame_pipeline,\n self.data_prefix)\n rawframe_infos = rawframe_dataset.video_infos\n frame_dir = osp.join(self.data_prefix, 'test_imgs')\n assert rawframe_infos == [\n dict(frame_dir=frame_dir, total_frames=5, label=127)\n ] * 2\n\n def test_dataset_realpath(self):\n dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline,\n '.')\n assert dataset.data_prefix == osp.realpath('.')\n dataset = RawframeDataset(self.frame_ann_file, self.frame_pipeline,\n 's3://good')\n assert dataset.data_prefix == 's3://good'\n\n def test_video_dataset(self):\n video_dataset = VideoDataset(self.video_ann_file, self.video_pipeline,\n self.data_prefix)\n video_infos = video_dataset.video_infos\n video_filename = osp.join(self.data_prefix, 'test.mp4')\n assert video_infos == [dict(filename=video_filename, label=0)] * 2\n\n def test_rawframe_pipeline(self):\n target_keys = ['frame_dir', 'total_frames', 'label', 'filename_tmpl']\n\n # RawframeDataset not in test mode\n rawframe_dataset = RawframeDataset(\n self.frame_ann_file,\n self.frame_pipeline,\n self.data_prefix,\n test_mode=False)\n result = rawframe_dataset[0]\n assert self.check_keys_contain(result.keys(), target_keys)\n\n # RawframeDataset in multi-class tasks\n rawframe_dataset = RawframeDataset(\n self.frame_ann_file,\n self.frame_pipeline,\n self.data_prefix,\n multi_class=True,\n num_classes=400,\n test_mode=False)\n result = rawframe_dataset[0]\n assert self.check_keys_contain(result.keys(), target_keys)\n\n # RawframeDataset in test mode\n rawframe_dataset = RawframeDataset(\n self.frame_ann_file,\n self.frame_pipeline,\n self.data_prefix,\n test_mode=True)\n result = rawframe_dataset[0]\n assert self.check_keys_contain(result.keys(), target_keys)\n\n # RawframeDataset in multi-class tasks in test mode\n rawframe_dataset = RawframeDataset(\n self.frame_ann_file,\n self.frame_pipeline,\n self.data_prefix,\n multi_class=True,\n num_classes=400,\n test_mode=True)\n result = rawframe_dataset[0]\n assert self.check_keys_contain(result.keys(), target_keys)\n\n def test_video_pipeline(self):\n target_keys = ['filename', 'label']\n\n # VideoDataset not in test mode\n video_dataset = VideoDataset(\n self.video_ann_file,\n self.video_pipeline,\n self.data_prefix,\n test_mode=False)\n result = video_dataset[0]\n assert self.check_keys_contain(result.keys(), target_keys)\n\n # VideoDataset in test mode\n video_dataset = VideoDataset(\n self.video_ann_file,\n self.video_pipeline,\n self.data_prefix,\n test_mode=True)\n result = video_dataset[0]\n assert self.check_keys_contain(result.keys(), target_keys)\n\n def test_action_pipeline(self):\n target_keys = ['video_name', 'data_prefix']\n\n # ActivityNet Dataset not in test mode\n action_dataset = ActivityNetDataset(\n self.action_ann_file,\n self.action_pipeline,\n self.data_prefix,\n test_mode=False)\n result = action_dataset[0]\n assert self.check_keys_contain(result.keys(), target_keys)\n\n # ActivityNet Dataset in test mode\n action_dataset = ActivityNetDataset(\n self.action_ann_file,\n self.action_pipeline,\n self.data_prefix,\n test_mode=True)\n result = action_dataset[0]\n assert self.check_keys_contain(result.keys(), target_keys)\n\n def test_rawframe_evaluate(self):\n rawframe_dataset = RawframeDataset(self.frame_ann_file,\n self.frame_pipeline,\n self.data_prefix)\n\n with pytest.raises(TypeError):\n # results must be a list\n rawframe_dataset.evaluate('0.5')\n\n with pytest.raises(AssertionError):\n # The length of results must be equal to the dataset len\n rawframe_dataset.evaluate([0] * 5)\n\n with pytest.raises(TypeError):\n # topk must be int or tuple of int\n rawframe_dataset.evaluate([0] * len(rawframe_dataset), topk=1.0)\n\n with pytest.raises(KeyError):\n # unsupported metric\n rawframe_dataset.evaluate(\n [0] * len(rawframe_dataset), metrics='iou')\n\n # evaluate top_k_accuracy and mean_class_accuracy metric\n results = [np.array([0.1, 0.5, 0.4])] * 2\n eval_result = rawframe_dataset.evaluate(\n results, metrics=['top_k_accuracy', 'mean_class_accuracy'])\n assert set(eval_result.keys()) == set(\n ['top1_acc', 'top5_acc', 'mean_class_accuracy'])\n\n def test_video_evaluate(self):\n video_dataset = VideoDataset(self.video_ann_file, self.video_pipeline,\n self.data_prefix)\n\n with pytest.raises(TypeError):\n # results must be a list\n video_dataset.evaluate('0.5')\n\n with pytest.raises(AssertionError):\n # The length of results must be equal to the dataset len\n video_dataset.evaluate([0] * 5)\n\n with pytest.raises(TypeError):\n # topk must be int or tuple of int\n video_dataset.evaluate([0] * len(video_dataset), topk=1.0)\n\n with pytest.raises(KeyError):\n # unsupported metric\n video_dataset.evaluate([0] * len(video_dataset), metrics='iou')\n\n # evaluate top_k_accuracy and mean_class_accuracy metric\n results = [np.array([0.1, 0.5, 0.4])] * 2\n eval_result = video_dataset.evaluate(\n results, metrics=['top_k_accuracy', 'mean_class_accuracy'])\n assert set(eval_result.keys()) == set(\n ['top1_acc', 'top5_acc', 'mean_class_accuracy'])\n\n def test_base_dataset(self):\n video_dataset = VideoDataset(self.video_ann_file, self.video_pipeline,\n self.data_prefix)\n assert len(video_dataset) == 2\n assert type(video_dataset[0]) == dict\n\n def test_repeat_dataset(self):\n rawframe_dataset = RawframeDataset(self.frame_ann_file,\n self.frame_pipeline,\n self.data_prefix)\n repeat_dataset = RepeatDataset(rawframe_dataset, 5)\n assert len(repeat_dataset) == 10\n result_a = repeat_dataset[0]\n result_b = repeat_dataset[2]\n assert set(result_a.keys()) == set(result_b.keys())\n for key in result_a:\n if isinstance(result_a[key], np.ndarray):\n assert np.equal(result_a[key], result_b[key]).all()\n elif isinstance(result_a[key], list):\n assert all(\n np.array_equal(a, b)\n for (a, b) in zip(result_a[key], result_b[key]))\n else:\n assert result_a[key] == result_b[key]\n\n def test_activitynet_dataset(self):\n activitynet_dataset = ActivityNetDataset(self.action_ann_file,\n self.action_pipeline,\n self.data_prefix)\n activitynet_infos = activitynet_dataset.video_infos\n assert activitynet_infos == [\n dict(\n video_name='v_test1',\n duration_second=1,\n duration_frame=30,\n annotations=[dict(segment=[0.3, 0.6], label='Rock climbing')],\n feature_frame=30,\n fps=30.0,\n rfps=30),\n dict(\n video_name='v_test2',\n duration_second=2,\n duration_frame=48,\n annotations=[dict(segment=[1.0, 2.0], label='Drinking beer')],\n feature_frame=48,\n fps=24.0,\n rfps=24.0)\n ]\n\n def test_activitynet_proposals2json(self):\n activitynet_dataset = ActivityNetDataset(self.action_ann_file,\n self.action_pipeline,\n self.data_prefix)\n results = [\n dict(\n video_name='v_test1',\n proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]),\n dict(\n video_name='v_test2',\n proposal_list=[dict(segment=[10.1, 20.9], score=0.9)])\n ]\n result_dict = activitynet_dataset.proposals2json(results)\n assert result_dict == dict(\n test1=[{\n 'segment': [0.1, 0.9],\n 'score': 0.1\n }],\n test2=[{\n 'segment': [10.1, 20.9],\n 'score': 0.9\n }])\n result_dict = activitynet_dataset.proposals2json(results, True)\n assert result_dict == dict(\n test1=[{\n 'segment': [0.1, 0.9],\n 'score': 0.1\n }],\n test2=[{\n 'segment': [10.1, 20.9],\n 'score': 0.9\n }])\n\n def test_activitynet_evaluate(self):\n activitynet_dataset = ActivityNetDataset(self.action_ann_file,\n self.action_pipeline,\n self.data_prefix)\n\n with pytest.raises(TypeError):\n # results must be a list\n activitynet_dataset.evaluate('0.5')\n\n with pytest.raises(AssertionError):\n # The length of results must be equal to the dataset len\n activitynet_dataset.evaluate([0] * 5)\n\n with pytest.raises(KeyError):\n # unsupported metric\n activitynet_dataset.evaluate(\n [0] * len(activitynet_dataset), metrics='iou')\n\n # evaluate AR@AN metric\n results = [\n dict(\n video_name='v_test1',\n proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]),\n dict(\n video_name='v_test2',\n proposal_list=[dict(segment=[10.1, 20.9], score=0.9)])\n ]\n eval_result = activitynet_dataset.evaluate(results, metrics=['AR@AN'])\n assert set(eval_result) == set(\n ['auc', 'AR@1', 'AR@5', 'AR@10', 'AR@100'])\n\n def test_activitynet_dump_results(self):\n activitynet_dataset = ActivityNetDataset(self.action_ann_file,\n self.action_pipeline,\n self.data_prefix)\n # test dumping json file\n results = [\n dict(\n video_name='v_test1',\n proposal_list=[dict(segment=[0.1, 0.9], score=0.1)]),\n dict(\n video_name='v_test2',\n proposal_list=[dict(segment=[10.1, 20.9], score=0.9)])\n ]\n dump_results = {\n 'version': 'VERSION 1.3',\n 'results': {\n 'test1': [{\n 'segment': [0.1, 0.9],\n 'score': 0.1\n }],\n 'test2': [{\n 'segment': [10.1, 20.9],\n 'score': 0.9\n }]\n },\n 'external_data': {}\n }\n\n tmp_filename = osp.join(tempfile.gettempdir(), 'result.json')\n activitynet_dataset.dump_results(results, tmp_filename, 'json')\n assert osp.isfile(tmp_filename)\n with open(tmp_filename, 'r+') as f:\n load_obj = mmcv.load(f, file_format='json')\n assert load_obj == dump_results\n os.remove(tmp_filename)\n\n # test dumping csv file\n results = [('test_video', np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9,\n 10]]))]\n with tempfile.TemporaryDirectory() as tmpdir:\n activitynet_dataset.dump_results(results, tmpdir, 'csv')\n load_obj = np.loadtxt(\n osp.join(tmpdir, 'test_video.csv'),\n dtype=np.float32,\n delimiter=',',\n skiprows=1)\n assert_array_equal(\n load_obj,\n np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]],\n dtype=np.float32))\n"
] |
[
[
"numpy.equal",
"numpy.array",
"numpy.array_equal"
]
] |
zhihansh/federated-oss
|
[
"38cfcb05702ff7297db76d3ccb5f5afef53ca09b"
] |
[
"tensorflow_federated/python/learning/federated_sgd_test.py"
] |
[
"# Copyright 2018, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for local client training implemented in ClientSgd.\n\nIntegration tests that include server averaging and alternative tff.aggregator\nfactories are in found in\ntensorflow_federated/python/tests/federated_sgd_integration_test.py.\n\"\"\"\n\nimport collections\nfrom unittest import mock\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.common_libs import test_utils\nfrom tensorflow_federated.python.core.api import test_case\nfrom tensorflow_federated.python.learning import client_weight_lib\nfrom tensorflow_federated.python.learning import federated_sgd\nfrom tensorflow_federated.python.learning import model_examples\nfrom tensorflow_federated.python.learning import model_update_aggregator\nfrom tensorflow_federated.python.learning import model_utils\nfrom tensorflow_federated.python.learning.framework import dataset_reduce\n\n\nclass FederatedSgdTest(test_case.TestCase, parameterized.TestCase):\n\n def dataset(self):\n # Create a dataset with 4 examples:\n dataset = tf.data.Dataset.from_tensor_slices(\n collections.OrderedDict(\n x=[[1.0, 0.0], [1.0, 0.0], [1.0, 0.0], [1.0, 0.0]],\n y=[[1.0], [1.0], [1.0], [1.0]]))\n # Repeat the dataset 2 times with batches of 3 examples,\n # producing 3 minibatches (the last one with only 2 examples).\n # Note that `batch` is required for this dataset to be useable,\n # as it adds the batch dimension which is expected by the model.\n return dataset.repeat(2).batch(3)\n\n def model(self):\n return model_examples.LinearRegression(feature_dim=2)\n\n def initial_weights(self):\n return model_utils.ModelWeights(\n trainable=[\n tf.constant([[0.0], [0.0]]),\n tf.constant(0.0),\n ],\n non_trainable=[0.0])\n\n def test_clietsgd_fails_for_non_tff_model(self):\n keras_model = tf.keras.Sequential([tf.keras.layers.Dense(1)])\n with self.assertRaisesRegex(TypeError, 'Model'):\n federated_sgd.ClientSgd(keras_model)\n\n @parameterized.named_parameters(\n ('non-simulation_weighted', False, True),\n ('non-simulation_unweighted', False, False),\n ('simulation_weighted', True, True),\n ('simulation_unweighted', True, False),\n )\n @test_utils.skip_test_for_multi_gpu\n def test_client_tf(self, simulation, weighted):\n model = self.model()\n dataset = self.dataset()\n if weighted:\n client_weighting = client_weight_lib.ClientWeighting.NUM_EXAMPLES\n else:\n client_weighting = client_weight_lib.ClientWeighting.UNIFORM\n client_tf = federated_sgd.ClientSgd(\n model,\n client_weighting=client_weighting,\n use_experimental_simulation_loop=simulation)\n client_outputs = self.evaluate(client_tf(dataset, self.initial_weights()))\n\n # Both trainable parameters should have gradients, and we don't return the\n # non-trainable 'c'. Model deltas for squared error:\n self.assertAllClose(client_outputs.weights_delta, [[[1.0], [0.0]], 1.0])\n if weighted:\n self.assertAllClose(client_outputs.weights_delta_weight, 8.0)\n else:\n self.assertAllClose(client_outputs.weights_delta_weight, 1.0)\n\n self.assertDictContainsSubset(\n client_outputs.model_output, {\n 'num_examples': 8,\n 'num_examples_float': 8.0,\n 'num_batches': 3,\n 'loss': 0.5,\n })\n self.assertEqual(client_outputs.optimizer_output['has_non_finite_delta'], 0)\n\n @parameterized.named_parameters(('_inf', np.inf), ('_nan', np.nan))\n def test_non_finite_aggregation(self, bad_value):\n model = self.model()\n dataset = self.dataset()\n client_tf = federated_sgd.ClientSgd(model)\n init_weights = self.initial_weights()\n init_weights.trainable[1] = bad_value\n client_outputs = client_tf(dataset, init_weights)\n self.assertEqual(self.evaluate(client_outputs.weights_delta_weight), 0.0)\n self.assertAllClose(\n self.evaluate(client_outputs.weights_delta), [[[0.0], [0.0]], 0.0])\n\n @parameterized.named_parameters(('non-simulation', False),\n ('simulation', True))\n @mock.patch.object(\n dataset_reduce,\n '_dataset_reduce_fn',\n wraps=dataset_reduce._dataset_reduce_fn)\n @test_utils.skip_test_for_multi_gpu\n def test_client_tf_dataset_reduce_fn(self, simulation, mock_method):\n model = self.model()\n dataset = self.dataset()\n client_tf = federated_sgd.ClientSgd(\n model, use_experimental_simulation_loop=simulation)\n client_tf(dataset, self.initial_weights())\n if simulation:\n mock_method.assert_not_called()\n else:\n mock_method.assert_called()\n\n\nclass FederatedSGDTest(test_case.TestCase, parameterized.TestCase):\n \"\"\"Tests construction of FedSGD training process.\"\"\"\n\n # pylint: disable=g-complex-comprehension\n @parameterized.named_parameters(\n ('robust_aggregator', model_update_aggregator.robust_aggregator),\n ('dp_aggregator', lambda: model_update_aggregator.dp_aggregator(1e-3, 3)),\n ('compression_aggregator',\n model_update_aggregator.compression_aggregator),\n ('secure_aggreagtor', model_update_aggregator.secure_aggregator),\n )\n # pylint: enable=g-complex-comprehension\n def test_construction_calls_model_fn(self, aggregation_factory):\n # Assert that the the process building does not call `model_fn` too many\n # times. `model_fn` can potentially be expensive (loading weights,\n # processing, etc).\n mock_model_fn = mock.Mock(side_effect=model_examples.LinearRegression)\n federated_sgd.build_federated_sgd_process(\n model_fn=mock_model_fn,\n model_update_aggregation_factory=aggregation_factory())\n # TODO(b/186451541): reduce the number of calls to model_fn.\n self.assertEqual(mock_model_fn.call_count, 3)\n\n\nif __name__ == '__main__':\n test_case.main()\n"
] |
[
[
"tensorflow.constant",
"tensorflow.keras.layers.Dense"
]
] |
stenczelt/pyGSM
|
[
"48e7a710744ec768e2c4a0f4d8dc1f9ffd948ce1"
] |
[
"pygsm/optimizers/lbfgs.py"
] |
[
"\n\n# standard library imports\nimport os\nimport sys\nfrom io import StringIO\n\n# third party\nimport numpy as np\nfrom scipy.optimize.lbfgsb import LbfgsInvHessProduct\n\n# local application imports\nfrom pygsm import utilities\n\nfrom .base_optimizer import base_optimizer\n\n\nclass iterationData:\n \"\"\"docstring for iterationData\"\"\"\n def __init__(self, alpha, s, y):\n self.alpha = alpha\n self.s_prim = s #step\n self.y_prim = y #diff in grad\n\nclass lbfgs(base_optimizer):\n \"\"\"the class of lbfgs method\"\"\"\n\n def __init__(self,options):\n super(lbfgs,self).__init__(options)\n self.k = 0\n self.end = 0\n\n def optimize(\n self,\n molecule,\n refE=0.,\n opt_type='UNCONSTRAINED',\n opt_steps=20,\n maxcor=10,\n ictan=None,\n xyzframerate=4,\n verbose=False,\n path=os.getcwd(),\n ):\n\n # stash/initialize some useful attributes\n print(\" initial E %5.4f\" % (molecule.energy - refE))\n geoms = []\n energies=[]\n geoms.append(molecule.geometry)\n energies.append(molecule.energy-refE)\n self.check_inputs(molecule,opt_type,ictan)\n nconstraints=self.get_nconstraints(opt_type)\n self.buf = StringIO()\n\n # form initial coord basis\n constraints = self.get_constraint_vectors(molecule,opt_type,ictan)\n molecule.update_coordinate_basis(constraints=constraints)\n\n if opt_type=='SEAM' or opt_type=='MECI':\n self.opt_cross=True\n\n # get coordinates\n x = np.copy(molecule.coordinates)\n xyz = np.copy(molecule.xyz)\n x_prim = molecule.primitive_internal_values\n num_coords = molecule.num_coordinates - nconstraints - molecule.num_frozen_atoms*3\n\n # Evaluate the function value and its gradient.\n fx = molecule.energy\n g = molecule.gradient.copy()\n\n # project out the constraint\n gc = g.copy()\n for c in molecule.constraints.T:\n gc -= np.dot(gc.T,c[:,np.newaxis])*c[:,np.newaxis]\n\n g_prim = utilities.block_matrix.dot(molecule.coord_basis, gc)\n molecule.gradrms = np.sqrt(np.dot(gc.T,gc)/num_coords)\n\n # primitive constraint step\n self.cstep_prim = np.zeros_like(g_prim)\n dE = molecule.difference_energy\n\n # Not checking convergence here anymore ...\n #if molecule.PES.__class__.__name__!=\"PES\" and self.opt_cross:\n # if molecule.gradrms < self.conv_grms and abs(dE)<1.0:\n # print(\" converged\")\n # return geoms,energies\n #elif molecule.gradrms < self.conv_grms:\n # print(\" converged\")\n # return geoms,energies\n\n ## reset k in principle k does not have to reset but . . .\n # TRY Turning off Feb 2020\n if opt_type != 'CLIMB':\n self.k = 0\n self.end=0\n\n # initialize the iteration data list\n if self.k==0:\n self.lm = []\n for i in range(0, maxcor):\n s_prim = np.zeros_like(g_prim)\n y_prim = np.zeros_like(g_prim)\n self.lm.append(iterationData(0.0, s_prim.flatten(), y_prim.flatten()))\n\n for ostep in range(opt_steps):\n print(\" On opt step {} \".format(ostep+1))\n\n\n SCALE =self.options['SCALEQN']\n if molecule.newHess>0: SCALE = self.options['SCALEQN']*molecule.newHess\n\n if self.k!=0:\n # update vectors s and y:\n # TODO this doesn't work exactly with constraint steps\n self.lm[self.end].s_prim = molecule.coord_obj.Prims.calcDiff(xyz,self.xyzp) - self.cstep_prim.flatten()\n self.lm[self.end].y_prim = g_prim - self.gp_prim\n\n self.end = (self.end + 1) % maxcor\n #j = self.end\n bound = min(self.k, maxcor)\n s_prim = np.array([self.lm[i].s_prim.flatten() for i in range(maxcor)])\n y_prim = np.array([self.lm[i].y_prim.flatten() for i in range(maxcor)])\n hess_inv = LbfgsInvHessProduct(s_prim[:bound],y_prim[:bound])\n # compute the negative gradients\n d_prim = -g_prim\n # perform matrix product\n d_prim = hess_inv._matvec(d_prim)\n d_prim = np.reshape(d_prim,(-1,1))/SCALE\n else:\n # d: store the negative gradient of the object function on point x.\n d_prim = -g_prim/SCALE\n self.k = self.k + 1\n\n # form in DLC basis (does nothing if cartesian)\n d = utilities.block_matrix.dot(utilities.block_matrix.transpose(molecule.coord_basis), d_prim)\n\n # normalize the direction\n actual_step = np.linalg.norm(d)\n print(\" actual_step= %1.5f\"% actual_step)\n d = d/actual_step #normalize\n if actual_step>self.DMAX:\n step=self.DMAX\n print(\" reducing step, new step = %1.5f\" %step)\n else:\n step=actual_step\n\n # store\n xp = x.copy()\n self.xyzp = xyz.copy()\n gp = g.copy()\n self.gp_prim = utilities.block_matrix.dot(molecule.coord_basis, gc)\n fxp = fx\n pgradrms = molecule.gradrms\n\n # => calculate constraint step <= #\n constraint_steps = self.get_constraint_steps(molecule,opt_type,g)\n self.cstep_prim = utilities.block_matrix.dot(molecule.coord_basis, constraint_steps)\n\n # line search\n ls = self.Linesearch(nconstraints, x, fx, gc, d, step, xp,constraint_steps,self.linesearch_parameters,molecule,verbose)\n\n # save new values from linesearch\n molecule = ls['molecule']\n step = ls['step']\n x = ls['x']\n fx = ls['fx']\n g = ls['g']\n\n dEstep = fx - fxp\n dq = x-xp\n\n # TODO dEpre is missing second order effects or is it?\n dEpre = np.dot(gc.T,dq) * utilities.units.KCAL_MOL_PER_AU\n constraint_energy = np.dot(gp.T,constraint_steps) * utilities.units.KCAL_MOL_PER_AU\n if opt_type not in ['UNCONSTRAINED','ICTAN']:\n print(\"constraint_energy: %1.4f\" % constraint_energy)\n dEpre += constraint_energy\n\n #if abs(dEpre)<0.05:\n # dEpre = np.sign(dEpre)*0.05\n ratio = dEstep/dEpre\n print(\" dEstep=%5.4f\" %dEstep)\n print(\" dEpre=%5.4f\" %dEpre)\n print(\" ratio=%5.4f\" %ratio)\n\n # revert to the privious point\n if ls['status'] < 0 or (ratio<0. and opt_type!='CLIMB'):\n if ratio<0.:\n ls['status']=-1\n x = xp.copy()\n molecule.xyz = self.xyzp.copy()\n g = gp.copy()\n fx = fxp\n #fx = molecule.energy\n ratio=0.\n dEstep=0.\n\n print('[ERROR] the point return to the previous point')\n self.lm = []\n for i in range(0, maxcor):\n s_prim = np.zeros_like(g_prim)\n y_prim = np.zeros_like(g_prim)\n self.lm.append(iterationData(0.0, s_prim.flatten(), y_prim.flatten()))\n self.k = 0\n self.end =0\n molecule.newHess=5\n if self.DMAX <= self.DMIN:\n print(\" Reached minimum step,exiting\")\n geoms.append(molecule.geometry)\n energies.append(molecule.energy-refE)\n break\n\n self.DMAX = ls['step']/2\n if self.DMAX < self.DMIN:\n self.DMAX = self.DMIN\n else:\n # update molecule xyz\n xyz = molecule.update_xyz(x-xp)\n\n # if ratio is less than 0.3 than reduce DMAX\n flag=True\n if ratio<0.3 and ls['status']==0: #and abs(dEpre)>0.05:\n print(\" Reducing DMAX\")\n self.DMAX /= 1.5\n if self.DMAX < self.DMIN:\n self.DMAX = self.DMIN\n if molecule.newHess<5:\n molecule.newHess+=1\n flag=False\n\n if opt_type==\"CLIMB\":\n if self.SCALE_CLIMB<10. and opt_type=='CLIMB':\n self.SCALE_CLIMB+=1.\n print('SCALING CLIMB BY {}'.format(self.SCALE_CLIMB))\n elif ratio>0.3:\n molecule.newHess-=1\n if opt_type==\"CLIMB\":\n if self.SCALE_CLIMB>1.:\n self.SCALE_CLIMB -=1.\n\n\n # project out the constraints\n gc = g.copy()\n for c in molecule.constraints.T:\n gc -= np.dot(gc.T,c[:,np.newaxis])*c[:,np.newaxis]\n g_prim = utilities.block_matrix.dot(molecule.coord_basis, gc)\n\n dE = molecule.difference_energy\n if dE < 100.:\n print(\" difference energy is %5.4f\" % dE)\n molecule.gradrms = np.sqrt(np.dot(gc.T,gc)/num_coords)\n\n\n # control step size NEW FEB 2020\n if flag:\n if ls['status']==0: # passed\n dgradrms = molecule.gradrms - pgradrms\n print(\"dgradrms \",dgradrms)\n if ls['step'] > self.DMAX:\n if ls['step']<= self.options['abs_max_step']: # absolute max\n print(\" Increasing DMAX to {}\".format(ls['step']))\n self.DMAX = ls['step']\n else:\n self.DMAX =self.options['abs_max_step']\n elif ls['step']<self.DMAX:\n if ls['step']>=self.DMIN: # absolute min\n print(\" Decreasing DMAX to {}\".format(ls['step']))\n self.DMAX = ls['step']\n elif ls['step']<=self.DMIN:\n self.DMAX = self.DMIN\n print(\" Decreasing DMAX to {}\".format(self.DMIN))\n elif ratio>0.85 and ratio<1.1 and actual_step>self.DMAX and dgradrms<-0.00005:\n print(\" HERE increasing DMAX\")\n self.DMAX *= 1.1\n if self.DMAX>self.options['abs_max_step']:\n self.DMAX=self.options['abs_max_step']\n else:\n print(\"status not zero\")\n\n\n if ostep % xyzframerate==0:\n geoms.append(molecule.geometry)\n energies.append(molecule.energy-refE)\n utilities.manage_xyz.write_xyzs_w_comments('{}/opt_{}.xyz'.format(path, molecule.node_id), geoms, energies, scale=1.)\n\n if self.options['print_level']>0:\n print(\" Node: %d Opt step: %d E: %5.4f predE: %5.4f ratio: %1.3f gradrms: %1.5f ss: %1.3f DMAX: %1.3f\" % (molecule.node_id,ostep+1,fx-refE,dEpre,ratio,molecule.gradrms,step,self.DMAX))\n self.buf.write(u' Node: %d Opt step: %d E: %5.4f predE: %5.4f ratio: %1.3f gradrms: %1.5f ss: %1.3f DMAX: %1.3f\\n' % (molecule.node_id,ostep+1,fx-refE,dEpre,ratio,molecule.gradrms,step,self.DMAX))\n\n gmax = float(np.max(np.absolute(gc)))\n disp = float(np.linalg.norm((xyz-self.xyzp).flatten()))\n print(\" gmax %5.4f disp %5.4f dEstep %5.4f gradrms %5.4f\\n\" % (gmax,disp,dEstep,molecule.gradrms))\n self.converged=False\n if self.opt_cross and abs(dE)<self.conv_dE and molecule.gradrms < self.conv_grms and abs(gmax) < self.conv_gmax and abs(dEstep) < self.conv_Ediff and abs(disp) < self.conv_disp and ls['status']==0:\n\n # TODO Seam Climb\n if opt_type==\"TS-SEAM\":\n gts = np.dot(g.T,molecule.constraints[:,0])\n if abs(gts)<self.conv_grms*5:\n self.converged=True\n else:\n self.converged=True\n\n elif not self.opt_cross and molecule.gradrms < self.conv_grms and abs(gmax) < self.conv_gmax and abs(dEstep) < self.conv_Ediff and abs(disp) < self.conv_disp:\n if opt_type==\"CLIMB\":\n gts = np.dot(g.T,molecule.constraints[:,0])\n if abs(gts)<self.conv_grms*5:\n self.converged=True\n else:\n self.converged=True\n\n if self.converged:\n print(\" converged\")\n if ostep % xyzframerate!=0:\n geoms.append(molecule.geometry)\n energies.append(molecule.energy-refE)\n utilities.manage_xyz.write_xyzs_w_comments('{}/opt_{}.xyz'.format(path, molecule.node_id), geoms, energies, scale=1.)\n break\n #print \" ########## DONE WITH TOTAL STEP #########\"\n\n #update DLC --> this changes q, g, Hint\n if not molecule.coord_obj.__class__.__name__=='CartesianCoordinates':\n if opt_type == 'SEAM' or opt_type==\"MECI\":\n constraints = self.get_constraint_vectors(molecule,opt_type,ictan)\n molecule.update_coordinate_basis(constraints=constraints)\n x = np.copy(molecule.coordinates)\n g = molecule.gradient.copy()\n # project out the constraint\n gc = g.copy()\n for c in molecule.constraints.T:\n gc -= np.dot(gc.T,c[:,np.newaxis])*c[:,np.newaxis]\n g_prim = utilities.block_matrix.dot(molecule.coord_basis, gc)\n sys.stdout.flush()\n\n print(\" opt-summary\")\n print(self.buf.getvalue())\n return geoms,energies\n\n"
] |
[
[
"numpy.zeros_like",
"numpy.linalg.norm",
"numpy.dot",
"numpy.reshape",
"numpy.copy",
"scipy.optimize.lbfgsb.LbfgsInvHessProduct",
"numpy.absolute"
]
] |
MC-kit/mckit-meshes
|
[
"8aa06ff95ffd1dabac95f399d45047325c265f78"
] |
[
"src/mckit_meshes/mesh/geometry_spec.py"
] |
[
"\"\"\"Common mesh geometry specification classes and functions.\n\n## Relative or absolute coordinates\n\n There are variations when coordinates are presented as relative to origin\n or absolute. This depends on is output for specification, or input/output\n to Weight of Meshtal files and is it cartesian or cylinder mesh.\n\n Cartesian:\n\n | | wwinp | meshtal |\n | ===== | ======= | ======== |\n | spec | relative | absolute (but origin is extracted to separate item) |\n | ----- | ------- | -------- |\n | file | relative | absolute |\n\n Cylinder:\n\n | | wwinp | meshtal |\n | ===== | ======= | ======== |\n | spec | relative | relative |\n | ----- | ------- | -------- |\n | file | relative | relative |\n\n The new callers are to use local_coordinates converter to avoid difficulties.\n For the old callers we will use ZERO_ORIGIN for Geometry Specification being\n used in FMesh.\n\n\"\"\"\nfrom typing import Iterable, List, Optional, Sequence, TextIO, Tuple, Union, cast\n\nimport abc\n\nfrom dataclasses import dataclass\n\nimport mckit_meshes.utils as ut\nimport numpy as np\nimport numpy.linalg as linalg\n\nfrom mckit_meshes.utils.cartesian_product import cartesian_product\n\n_2PI = 2.0 * np.pi\n_1_TO_2PI = 1 / _2PI\n__DEG_2_RAD = np.pi / 180.0\nCARTESIAN_BASIS = np.eye(3, dtype=np.double)\nNX, NY, NZ = CARTESIAN_BASIS\n\nDEFAULT_AXIS = NZ\nDEFAULT_VEC = NX\n\n\nZERO_ORIGIN: np.ndarray = np.zeros((3,), dtype=np.double)\n\n\ndef as_float_array(array) -> np.ndarray:\n \"\"\"Convert any sequence of numbers to numpy array of floats.\n\n Args:\n array: Anything that can be converted to numpy ndarray.\n\n Returns:\n np.ndarray: either original or conversion.\n\n \"\"\"\n return np.asarray(array, dtype=float)\n\n\n@dataclass(eq=False)\nclass AbstractGeometrySpecData:\n ibins: np.ndarray\n jbins: np.ndarray\n kbins: np.ndarray\n origin: np.ndarray = ZERO_ORIGIN\n\n def __post_init__(self) -> None:\n \"\"\"Check if a caller provided data in numpy format.\"\"\"\n for b in self.bins:\n if not isinstance(b, np.ndarray):\n raise ValueError(f\"Expected numpy array, actual {b[0]}...{b[-1]}\")\n\n def __hash__(self) -> int:\n return hash(self.bins)\n\n def __eq__(self, other: \"AbstractGeometrySpecData\") -> bool:\n if not isinstance(other, AbstractGeometrySpecData):\n return False\n a, b = self.bins, other.bins\n return len(a) == len(b) and arrays_equal(zip(a, b))\n\n @property\n def bins(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Return tuple of origin and bins.\"\"\"\n return self.origin, self.ibins, self.jbins, self.kbins\n\n\nclass AbstractGeometrySpec(AbstractGeometrySpecData, abc.ABC):\n @property\n @abc.abstractmethod\n def cylinder(self) -> bool:\n ...\n\n @abc.abstractmethod\n def local_coordinates(self, points: np.ndarray) -> np.ndarray:\n ...\n\n @abc.abstractmethod\n def get_mean_square_distance_weights(self, point):\n ...\n\n @abc.abstractmethod\n def calc_cell_centers(self):\n ...\n\n @abc.abstractmethod\n def print_geom(self, io: TextIO, indent: str) -> None:\n ...\n\n # Generic methods\n\n @property\n def bins_shape(self) -> Tuple[int, int, int]:\n return (self.ibins.size - 1), (self.jbins.size - 1), (self.kbins.size - 1)\n\n @property\n def bins_size(self) -> int:\n return (self.ibins.size - 1) * (self.jbins.size - 1) * (self.kbins.size - 1)\n\n @property\n def boundaries(self) -> np.ndarray:\n return np.vstack(\n (\n self.ibins[[0, -1]],\n self.jbins[[0, -1]],\n self.kbins[[0, -1]],\n ),\n )\n\n @property\n def boundaries_shape(self) -> Tuple[int, int, int]:\n return self.ibins.size, self.jbins.size, self.kbins.size\n\n def surrounds_point(self, x, y, z, local: bool = True) -> bool:\n \"\"\"\n Check if the point is within the volume of mesh.\n\n By default, assumes that the point is given in local coordinates.\n \"\"\"\n if not local:\n x, y, z = self.local_coordinates(np.array([x, y, z], dtype=float))\n (xmin, xmax), (ymin, ymax), (zmin, zmax) = self.boundaries\n return cast(bool, (xmin < x < xmax) and (ymin < y < ymax) and (zmin < z < zmax))\n\n def select_indexes(\n self, *, i_values=None, j_values=None, k_values=None\n ) -> Tuple[\n Union[int, slice, np.ndarray],\n Union[int, slice, np.ndarray],\n Union[int, slice, np.ndarray],\n ]:\n return (\n select_indexes(self.ibins, i_values),\n select_indexes(self.jbins, j_values),\n select_indexes(self.kbins, k_values),\n )\n\n def print(self, io: TextIO, columns: int = 6):\n indent = \" \" * 8\n self.print_geom(io, indent)\n print(indent, \"origin=\", \" \".join(format_floats(self.origin)), sep=\"\", file=io)\n _print_bins(indent, \"i\", self.ibins, io, columns=columns)\n _print_bins(indent, \"j\", self.jbins, io, columns=columns)\n _print_bins(indent, \"k\", self.kbins, io, columns=columns)\n\n\nclass CartesianGeometrySpec(AbstractGeometrySpec):\n\n # TODO dvp: add transformation\n\n @property\n def cylinder(self) -> bool:\n return False\n\n @property\n def x(self) -> np.ndarray:\n return self.ibins\n\n @property\n def y(self) -> np.ndarray:\n return self.jbins\n\n @property\n def z(self) -> np.ndarray:\n return self.kbins\n\n def local_coordinates(self, points: np.ndarray) -> np.ndarray:\n assert points.shape[-1] == 3, \"Expected cartesian point array or single point\"\n if self.origin is not ZERO_ORIGIN:\n return cast(np.ndarray, points - ZERO_ORIGIN)\n else:\n return points\n\n def print_geom(self, io: TextIO, indent: str) -> None:\n pass # Defaults will do for cartesian mesh\n\n def get_mean_square_distance_weights(self, point):\n ni, nj, nk = self.bins_shape\n\n def calc_sum(bins):\n bins_square = np.square(bins)\n bins_mult = bins[:-1] * bins[1:]\n bins_square = bins_square[:-1] + bins_square[1:] + bins_mult\n return bins_square\n\n x_square, y_square, z_square = [\n calc_sum(x - px)\n for x, px in zip((self.ibins, self.jbins, self.kbins), point)\n ]\n w = np.zeros((ni, nj, nk), dtype=float)\n for i in range(ni):\n for j in range(nj):\n for k in range(nk):\n w[i, j, k] = x_square[i] + y_square[j] + z_square[k]\n w = (1.0 / 3.0) * w\n\n w = w * (1024.0 / np.max(w))\n\n return w\n\n def calc_cell_centers(self):\n raise NotImplementedError(\n f\"{self.__class__.__name__} has not implemented method calc_cell_centers\"\n )\n\n\n@dataclass(eq=False)\nclass CylinderGeometrySpec(AbstractGeometrySpec):\n axs: np.ndarray = DEFAULT_AXIS\n vec: np.ndarray = DEFAULT_VEC\n\n def __post_init__(self):\n super().__post_init__()\n\n if self.axs is not DEFAULT_AXIS:\n if not isinstance(self.axs, np.ndarray):\n # self.axs = as_float_array(self.axs)\n raise ValueError(f\"Expected axs as numpy array, actual {self.axs}\")\n if np.array_equal(self.axs, DEFAULT_AXIS):\n self.axs = DEFAULT_AXIS # internalize on default value\n\n if self.vec is not DEFAULT_VEC:\n if not isinstance(self.vec, np.ndarray):\n # self.vec = as_float_array(self.vec)\n raise ValueError(f\"Expected vec as numpy array, actual {self.vec}\")\n if np.array_equal(self.vec, DEFAULT_VEC):\n self.axs = DEFAULT_VEC # internalize on default value\n\n if not (self.theta[0] == 0.0 and self.theta[-1] == 1.0):\n raise ValueError(\"Theta is expected in rotations only\")\n\n @property\n def bins(self):\n return super().bins + (self.axs, self.vec)\n\n @property\n def cylinder(self) -> bool:\n return True\n\n @property\n def r(self) -> np.ndarray:\n return self.ibins\n\n @property\n def z(self) -> np.ndarray:\n return self.jbins\n\n @property\n def theta(self) -> np.ndarray:\n return self.kbins\n\n def local_coordinates(self, points: np.ndarray) -> np.ndarray:\n assert points.shape[-1] == 3, \"Expected cartesian point array or single point\"\n assert np.array_equal(self.axs, DEFAULT_AXIS) and (\n np.array_equal(self.vec, DEFAULT_VEC)\n or self.vec[1] == 0.0 # vec is in xz plane\n ), \"Tilted cylinder meshes are not implemented yet\"\n # TODO dvp: implement tilted cylinder meshes\n # ez = self.axs / np.linalg.norm(self.axs)\n # ey = np.cross(ez, self.vec)\n # ey /= np.linalg.norm(ey)\n # ex = np.cross(ey, ez)\n # ex /= np.linalg.norm(ex)\n local_points: np.ndarray = points - self.origin\n local_points[..., :] = (\n np.sqrt(local_points[..., 0] ** 2 + local_points[..., 1] ** 2), # r\n local_points[..., 2], # z\n np.arctan2(local_points[..., 1], local_points[..., 0]) * _1_TO_2PI, # theta\n )\n return local_points\n\n # TODO dvp: add opposite method global_coordinates\n\n def print_geom(self, io: TextIO, indent: str) -> None:\n print(indent, \"geom=cyl\", sep=\"\", file=io)\n print(\n indent,\n \"axs=\",\n \" \".join(format_floats(self.axs)),\n \"\\n\",\n indent,\n \"vec=\",\n \" \".join(format_floats(self.vec)),\n sep=\"\",\n file=io,\n )\n\n # noinspection SpellCheckingInspection\n def get_mean_square_distance_weights(self, point: np.ndarray) -> np.ndarray:\n ni, nj, nk = self.bins_shape\n assert self.vec is not None\n # Define synonyms for cylinder coordinates\n r = self.ibins # radius\n phi = self.kbins\n assert phi[-1] == 1.0\n phi = phi * _2PI\n z = self.jbins\n px, py, pz = (\n point - self.origin\n ) # TODO dvp: apply local_coordinates instead of the following\n l1_square = px**2 + py**2\n l1 = np.sqrt(l1_square) # distance to origin from point projection on z=0 plane\n assert 0.0 < l1\n # Terms of integration of L^2 in cylindrical coordinates\n # r^2\n gamma = np.arcsin(py / l1)\n r_square = np.square(r)\n r_square = 0.5 * (r_square[1:] + r_square[:-1])\n r_sum = r[1:] + r[:-1]\n r_mult = r[1:] * r[:-1]\n dphi = phi[1:] - phi[:-1]\n dsins = np.sin(phi - gamma)\n dsins = dsins[1:] - dsins[:-1]\n dsins = dsins / dphi\n z_minus_pz = z - pz\n z_minus_pz_square = np.square(z_minus_pz)\n z_sum = (1.0 / 3.0) * (\n z_minus_pz_square[1:]\n + z_minus_pz_square[:-1]\n + z_minus_pz[1:] * z_minus_pz[:-1]\n )\n w = np.zeros((ni, nj, nk), dtype=float)\n\n for i in range(ni):\n for j in range(nj):\n for k in range(nk):\n a = r_square[i]\n b = (-4.0 / 3.0) * l1 * (r_sum[i] - r_mult[i] / r_sum[i]) * dsins[k]\n d = z_sum[j]\n w[i, j, k] = a + b + d\n w = w + l1_square\n w = w * (1024.0 / np.max(w))\n\n return w\n\n def calc_cell_centers(self) -> np.ndarray:\n _x0, _y0, _z0 = self.origin\n r_mids = (self.ibins[1:] + self.ibins[:-1]) * 0.5\n z_mids = (self.jbins[1:] + self.jbins[:-1]) * 0.5\n t_mids = (self.kbins[1:] + self.kbins[:-1]) * 0.5\n if self.kbins[-1] == 1.0:\n t_mids = t_mids * _2PI\n v2 = np.cross(self.axs, self.vec)\n v1 = np.cross(v2, self.axs)\n v2 /= linalg.norm(v2)\n v1 /= linalg.norm(v1)\n axs = self.axs / linalg.norm(self.axs)\n axs_z = np.dot(axs, NZ)\n\n def aggregator(elements):\n r, z, fi = elements\n x, y = r * (v1 * np.cos(fi) + v2 * np.sin(fi))[0:2]\n x += _x0\n y += _y0\n z = axs_z * z + _z0\n return np.array([x, y, z], dtype=float)\n\n cell_centers: np.ndarray = cartesian_product(\n r_mids, z_mids, t_mids, aggregator=aggregator\n )\n\n return cell_centers\n\n # def __hash__(self):\n # return hash((super().__hash__(), self.axs, self.vec))\n #\n # def __eq__(self, other):\n # if not isinstance(other, CylinderGeometrySpec):\n # return False\n # return (\n # super().__eq__(other)\n # and np.array_equal(self.axs, other.axs)\n # and np.array_equal(self.vec, other.vec)\n # )\n\n def adjust_axs_vec_for_mcnp(self) -> \"CylinderGeometrySpec\":\n \"\"\"Set `axs` and `vec` attributes to the values, which MCNP considers orthogonal.\n\n Assumptions\n -----------\n\n Cylinder mesh is not tilted:\n - `self.vec` is in PY=0 plane\n - `self.axs` is vertical\n\n\n Returns\n -------\n gs:\n new CylinderGeometrySpec with adjusted `axs` and `vec` attributes.\n \"\"\"\n # TODO dvp: fix for arbitrary axs and vec\n axs = self.origin + DEFAULT_AXIS * self.z[-1]\n vec = self.origin + DEFAULT_VEC * self.r[-1]\n return CylinderGeometrySpec(\n self.r, self.z, self.theta, origin=self.origin, axs=axs, vec=vec\n )\n\n\ndef _print_bins(indent, prefix, _ibins, io, columns: int = 6):\n intervals, coarse_mesh = compute_intervals_and_coarse_bins(_ibins)\n coarse_mesh = coarse_mesh[1:] # drop the first value - it's presented with origin\n print(indent, f\"{prefix}mesh=\", sep=\"\", end=\"\", file=io)\n second_indent = indent + \" \" * 5\n ut.print_n(\n map(\"{:.6g}\".format, coarse_mesh), io=io, indent=second_indent, columns=columns\n )\n print(indent, f\"{prefix}ints=\", sep=\"\", end=\"\", file=io)\n ut.print_n(intervals, io=io, indent=second_indent, columns=columns)\n\n\ndef select_indexes(\n a: np.ndarray, x: Optional[Union[float, List[float], np.ndarray]]\n) -> Union[int, slice, np.ndarray]:\n \"\"\"Find indexes for a mesh bin, corresponding given coordinates.\n\n Assumes that `a` is sorted.\n\n Examples:\n\n >>> r = np.arange(5)\n >>> r\n array([0, 1, 2, 3, 4])\n\n For x is None return slice over all `a` indexes.\n\n >>> select_indexes(r, None)\n slice(0, 5, None)\n\n For non specified x, if input array represents just one bin,\n then return index 0 to squeeze results.\n >>> select_indexes(np.array([10,20]), None)\n 0\n\n For x = 1.5, we have 1 < 1.5 < 2, so the bin index is to be 1\n >>> select_indexes(r, 1.5)\n 1\n\n For x = 0, it's the first bin, and index is to be 0\n >>> select_indexes(r, 0)\n 0\n\n For coordinates below r[0] return -1.\n >>> select_indexes(r, -1)\n -1\n\n For coordinates above r[-1] return a.size-1.\n >>> select_indexes(r, 5)\n 4\n\n And for array of coordinates\n >>> select_indexes(r, np.array([1.5, 0, -1, 5])) # doctest: +SKIP\n array([ 1, 0, -1, 4])\n\n Args:\n a: bin boundaries\n x: one or more coordinates along `a`-boundaries\n\n Returns:\n out: index or indices for each given coordinate\n \"\"\"\n assert 1 < a.size, \"Parameter a doesn't represent binning\"\n\n if x is None:\n return slice(0, a.size) if 2 < a.size else 0 # squeeze if there's only one bin\n\n i: np.ndarray = a.searchsorted(x) - 1\n\n if np.isscalar(i):\n if i < 0:\n if x == a[0]:\n return 0\n else:\n neg = i < 0\n if np.any(neg):\n eq_to_min = a[0] == x\n i[np.logical_and(neg, eq_to_min)] = 0\n\n return i\n\n\ndef format_floats(floats: Iterable[float], _format=\"{:.6g}\") -> Iterable[str]:\n yield from map(_format.format, floats)\n\n\ndef compute_intervals_and_coarse_bins(\n arr: Sequence[float], tolerance: float = 1.0e-4\n) -> Tuple[List[int], List[float]]:\n \"\"\"Compute fine intervals and coarse binning.\n\n Examples:\n\n Find equidistant bins and report as intervals\n >>> arr = np.array([1, 2, 3, 4], dtype=float)\n >>> arr\n array([1., 2., 3., 4.])\n >>> intervals, coarse = compute_intervals_and_coarse_bins(arr)\n >>> intervals\n [3]\n >>> coarse\n [1.0, 4.0]\n\n A bins with two interval values.\n >>> arr = np.array([1, 2, 3, 6, 8, 10], dtype=float)\n >>> intervals, coarse = compute_intervals_and_coarse_bins(arr)\n >>> intervals\n [2, 1, 2]\n >>> coarse\n [1.0, 3.0, 6.0, 10.0]\n\n On zero (or negative tolerance) just use 1 intervals and return original array.\n >>> intervals, coarse = compute_intervals_and_coarse_bins(arr, tolerance=0.0)\n >>> intervals\n [1, 1, 1, 1, 1]\n >>> coarse is arr\n True\n\n\n Args:\n arr: actual bins\n tolerance: precision to distinguish intervals with\n\n Returns:\n Tuple: numbers of fine intervals between coarse bins, coarse binning\n \"\"\"\n if tolerance <= 0.0:\n return [1] * (len(arr) - 1), arr\n fine_intervals = []\n coarse_bins = [arr[0]]\n d_old = arr[1] - arr[0]\n count = 0\n for i in range(1, len(arr)):\n d = arr[i] - arr[i - 1]\n if abs(d - d_old) < tolerance:\n count += 1\n else:\n d_old = d\n fine_intervals.append(count)\n coarse_bins.append(arr[i - 1])\n count = 1\n fine_intervals.append(count)\n coarse_bins.append(arr[-1])\n return fine_intervals, coarse_bins\n\n\ndef arrays_equal(arrays: Iterable[Tuple[np.ndarray, np.ndarray]]) -> bool:\n for a, b in arrays:\n if not (a is b or np.array_equal(a, b)):\n return False\n return True\n"
] |
[
[
"numpy.dot",
"numpy.array_equal",
"numpy.cos",
"numpy.max",
"numpy.sin",
"numpy.linalg.norm",
"numpy.arcsin",
"numpy.logical_and",
"numpy.eye",
"numpy.sqrt",
"numpy.cross",
"numpy.vstack",
"numpy.square",
"numpy.array",
"numpy.zeros",
"numpy.isscalar",
"numpy.arctan2",
"numpy.asarray",
"numpy.any"
]
] |
crcresearch/GOS
|
[
"a359f54f9477c1785501bd07a05ccbb8d122a0c0"
] |
[
"examples/migration/migration.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport data as data\nfrom constants import POPULATION_SCALE, MIGRATION_THRESHOLD, PROCESSES, SPLITS, BRAIN_DRAIN_THRESHOLD\nfrom gos import Globe\nimport sys\n\n# The attributes for each agent.\nworld_columns = [\"Country\", \"Income\", \"High Income\", \"Employed\", \"Attachment\",\n \"Location\", \"Neighborhood\", \"Migration\"]\n\nagentdt = np.dtype([('country', np.object),\n ('income', np.float32),\n ('high income', np.bool),\n ('employed', np.bool),\n ('attachment', np.float32),\n ('location', np.object),\n ('neighborhood', np.uint8),\n ('migration', np.float32)])\n\ndef generate_agents(df, country, population):\n \"\"\"\n Generate a dataframe of agents for a country where population\n is the number of agents to be created.\n \"\"\"\n def max_value(attribute):\n return df[attribute].max()\n # Turn this on for truly random output from each process.\n # pid = mp.current_process()._identity[0]\n rand = np.random.mtrand.RandomState(0)\n country_data = df[df.index == country].to_dict(\"records\")[0]\n gdp = country_data[\"GDP\"]\n income_array = gdp / 10 * rand.chisquare(10, (population,1)).astype('float32')\n unemployment_rate = float(country_data[\"Unemployment\"] / 100.0)\n employment_array = rand.choice([True, False], (population,1),\n p=[1 - unemployment_rate, unemployment_rate])\n attachment_array = (country_data[\"Fertility\"] *\n rand.triangular(0.0, 0.5, 1.0, (population,1)) /\n max_value(\"Fertility\")).astype('float32')\n frame = np.empty([population,1], dtype=agentdt, order='F')\n frame[\"country\"] = country\n frame[\"income\"] = income_array\n frame[\"high income\"] = income_array > gdp * BRAIN_DRAIN_THRESHOLD\n frame[\"employed\" ] = employment_array.astype('bool')\n frame[\"attachment\"] = attachment_array\n frame[\"location\"] = frame[\"country\"]\n frame[\"neighborhood\"] = np.random.randint(10, size=(population,1)).astype('uint8')\n \"\"\"\n frame = pd.DataFrame({\n \"Country\": pd.Categorical([country] * population, list(df.index)),\n \"Income\": income_array,\n \"High Income\": income_array > gdp * BRAIN_DRAIN_THRESHOLD,\n \"Employed\": employment_array.astype('bool'),\n \"Attachment\": attachment_array,\n \"Location\": pd.Categorical([country] * population, list(df.index)),\n \"Neighborhood\": np.random.randint(10, size=population).astype('uint8'),\n \"Migration\": 0,\n }, columns=world_columns)\n \"\"\"\n return frame\n\n\ndef migrate_array(a, **kwargs):\n if len(a[a.Migration > MIGRATION_THRESHOLD]) == 0:\n return a.Location\n np.random.seed(1000)\n migration_map = kwargs[\"migration_map\"]\n countries = kwargs[\"countries\"]\n for country, population in a.groupby(\"Location\"):\n local_attraction = migration_map[country]\n local_attraction /= local_attraction.sum()\n migrants_num = len(population[population.Migration > MIGRATION_THRESHOLD])\n a.loc[(a.Country == country) & (a.Migration > MIGRATION_THRESHOLD),\n \"Location\"] = np.random.choice(countries,\n p=local_attraction,\n size=migrants_num,\n replace=True)\n return a.Location\n\n\ndef migrate_score(a, **kwargs):\n max_income = kwargs[\"max_income\"]\n conflict_scores = kwargs[\"conflict\"]\n max_conflict = kwargs[\"max_conflict\"]\n conflict = conflict_scores.merge(a, left_index=True,\n right_on='Location')[\"Conflict\"] / max_conflict\n gdp = kwargs[\"gdp\"]\n # Brain drain\n a.loc[a[\"High Income\"] == True, \"Income\"] = 0\n return ((10 * (1 + a.Income / -max_income) +\n 10 * a.Attachment +\n (5 * conflict) +\n 3 + a.Employed * 4) / 32).astype('float32')\n\n\ndef main(proc=PROCESSES):\n np.random.seed(1000)\n globe = Globe(data.all(), processes=proc, splits=SPLITS)\n\n globe.create_agents(generate_agents)\n print(globe.agents)\n \"\"\"\n globe.agents.Migration = globe.run_par(migrate_score, max_income=globe.agents.Income.max(),\n conflict=globe.df[[\"Conflict\"]].sort_index(),\n gdp=globe.df[[\"GDP\"]].sort_index(),\n max_conflict=globe.df.Conflict.max(),\n columns=[\"Income\", \"High Income\", \"Employed\", \"Attachment\", \"Location\"])\n print(\"The potential migrants came from\")\n migrants = globe.agents[globe.agents.Migration > MIGRATION_THRESHOLD]\n print(migrants.Country.value_counts()[migrants.Country.value_counts().gt(0)])\n attractiveness = ((1 - globe.df[\"Conflict\"] / globe.max_value(\"Conflict\")) +\n (globe.df[\"GDP\"] / globe.max_value(\"GDP\")) +\n (1 - globe.df[\"Unemployment\"] / globe.max_value(\"Unemployment\")) +\n (1 - globe.df[\"Fertility\"] / globe.max_value(\"Fertility\")))\n\n def neighbors(country):\n return globe.df[globe.df.index == country].iloc[0].neighbors\n\n migration_map = {}\n for country in globe.df.index:\n local_attraction = attractiveness.copy()\n local_attraction[local_attraction.index.isin(neighbors(country))] += 1\n migration_map[country] = local_attraction\n\n globe.agents[\"Location\"] = globe.run_par(migrate_array, migration_map=migration_map,\n countries=globe.df.index,\n columns=[\"Country\", \"Location\", \"Migration\"])\n\n print(\"Migration model completed at a scale of {}:1.\".format(int(1 / POPULATION_SCALE)))\n print(\"The model used {} child processes\".format(globe.processes))\n migrants = globe.agents[globe.agents.Country != globe.agents.Location]\n print(\"There were a total of {} migrants.\".format(len(migrants)))\n print(\"There were a total of {} agents.\".format(len(globe.agents)))\n changes = (globe.agents.Location.value_counts() -\n globe.agents.Country.value_counts()).sort_values()\n print(changes.head())\n print(changes.tail())\n print(\"The potential migrants came from\")\n migrants = globe.agents[globe.agents.Migration > MIGRATION_THRESHOLD]\n print(migrants.Country.value_counts()[migrants.Country.value_counts().gt(0)])\n return globe\n \"\"\"\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n main(int(sys.argv[1]))\n else:\n main()\n"
] |
[
[
"numpy.random.choice",
"numpy.empty",
"numpy.random.seed",
"numpy.random.mtrand.RandomState",
"numpy.random.randint",
"numpy.dtype"
]
] |
JinheonBaek/pytorch_geometric
|
[
"dfd32d08a3d8191d6290e53458d4eda515d04fd6"
] |
[
"torch_geometric/utils/convert.py"
] |
[
"from typing import Optional, Union, Tuple, List\n\nfrom collections import defaultdict\n\nimport torch\nimport scipy.sparse\nfrom torch import Tensor\nfrom torch.utils.dlpack import to_dlpack, from_dlpack\n\nimport torch_geometric.data\n\nfrom .num_nodes import maybe_num_nodes\n\n\ndef to_scipy_sparse_matrix(edge_index, edge_attr=None, num_nodes=None):\n r\"\"\"Converts a graph given by edge indices and edge attributes to a scipy\n sparse matrix.\n\n Args:\n edge_index (LongTensor): The edge indices.\n edge_attr (Tensor, optional): Edge weights or multi-dimensional\n edge features. (default: :obj:`None`)\n num_nodes (int, optional): The number of nodes, *i.e.*\n :obj:`max_val + 1` of :attr:`index`. (default: :obj:`None`)\n \"\"\"\n row, col = edge_index.cpu()\n\n if edge_attr is None:\n edge_attr = torch.ones(row.size(0))\n else:\n edge_attr = edge_attr.view(-1).cpu()\n assert edge_attr.size(0) == row.size(0)\n\n N = maybe_num_nodes(edge_index, num_nodes)\n out = scipy.sparse.coo_matrix(\n (edge_attr.numpy(), (row.numpy(), col.numpy())), (N, N))\n return out\n\n\ndef from_scipy_sparse_matrix(A):\n r\"\"\"Converts a scipy sparse matrix to edge indices and edge attributes.\n\n Args:\n A (scipy.sparse): A sparse matrix.\n \"\"\"\n A = A.tocoo()\n row = torch.from_numpy(A.row).to(torch.long)\n col = torch.from_numpy(A.col).to(torch.long)\n edge_index = torch.stack([row, col], dim=0)\n edge_weight = torch.from_numpy(A.data)\n return edge_index, edge_weight\n\n\ndef to_networkx(data, node_attrs=None, edge_attrs=None, to_undirected=False,\n remove_self_loops=False):\n r\"\"\"Converts a :class:`torch_geometric.data.Data` instance to a\n :obj:`networkx.Graph` if :attr:`to_undirected` is set to :obj:`True`, or\n a directed :obj:`networkx.DiGraph` otherwise.\n\n Args:\n data (torch_geometric.data.Data): The data object.\n node_attrs (iterable of str, optional): The node attributes to be\n copied. (default: :obj:`None`)\n edge_attrs (iterable of str, optional): The edge attributes to be\n copied. (default: :obj:`None`)\n to_undirected (bool, optional): If set to :obj:`True`, will return a\n a :obj:`networkx.Graph` instead of a :obj:`networkx.DiGraph`. The\n undirected graph will correspond to the upper triangle of the\n corresponding adjacency matrix. (default: :obj:`False`)\n remove_self_loops (bool, optional): If set to :obj:`True`, will not\n include self loops in the resulting graph. (default: :obj:`False`)\n \"\"\"\n import networkx as nx\n\n if to_undirected:\n G = nx.Graph()\n else:\n G = nx.DiGraph()\n\n G.add_nodes_from(range(data.num_nodes))\n\n node_attrs, edge_attrs = node_attrs or [], edge_attrs or []\n\n values = {}\n for key, item in data(*(node_attrs + edge_attrs)):\n if torch.is_tensor(item):\n values[key] = item.squeeze().tolist()\n else:\n values[key] = item\n if isinstance(values[key], (list, tuple)) and len(values[key]) == 1:\n values[key] = item[0]\n\n for i, (u, v) in enumerate(data.edge_index.t().tolist()):\n\n if to_undirected and v > u:\n continue\n\n if remove_self_loops and u == v:\n continue\n\n G.add_edge(u, v)\n\n for key in edge_attrs:\n G[u][v][key] = values[key][i]\n\n for key in node_attrs:\n for i, feat_dict in G.nodes(data=True):\n feat_dict.update({key: values[key][i]})\n\n return G\n\n\ndef from_networkx(G, group_node_attrs: Optional[Union[List[str], all]] = None,\n group_edge_attrs: Optional[Union[List[str], all]] = None):\n r\"\"\"Converts a :obj:`networkx.Graph` or :obj:`networkx.DiGraph` to a\n :class:`torch_geometric.data.Data` instance.\n\n Args:\n G (networkx.Graph or networkx.DiGraph): A networkx graph.\n group_node_attrs (List[str] or all, optional): The node attributes to\n be concatenated and added to :obj:`data.x`. (default: :obj:`None`)\n group_edge_attrs (List[str] or all, optional): The edge attributes to\n be concatenated and added to :obj:`data.edge_attr`.\n (default: :obj:`None`)\n\n .. note::\n\n All :attr:`group_node_attrs` and :attr:`group_edge_attrs` values must\n be numeric.\n \"\"\"\n import networkx as nx\n\n G = nx.convert_node_labels_to_integers(G)\n G = G.to_directed() if not nx.is_directed(G) else G\n edge_index = torch.LongTensor(list(G.edges)).t().contiguous()\n\n data = defaultdict(list)\n\n if G.number_of_nodes() > 0:\n node_attrs = list(next(iter(G.nodes(data=True)))[-1].keys())\n else:\n node_attrs = {}\n\n if G.number_of_edges() > 0:\n edge_attrs = list(next(iter(G.edges(data=True)))[-1].keys())\n else:\n edge_attrs = {}\n\n for i, (_, feat_dict) in enumerate(G.nodes(data=True)):\n if set(feat_dict.keys()) != set(node_attrs):\n raise ValueError('Not all nodes contain the same attributes')\n for key, value in feat_dict.items():\n data[str(key)].append(value)\n\n for i, (_, _, feat_dict) in enumerate(G.edges(data=True)):\n if set(feat_dict.keys()) != set(edge_attrs):\n raise ValueError('Not all edges contain the same attributes')\n for key, value in feat_dict.items():\n data[str(key)].append(value)\n\n for key, value in data.items():\n try:\n data[key] = torch.tensor(value)\n except ValueError:\n pass\n\n data['edge_index'] = edge_index.view(2, -1)\n data = torch_geometric.data.Data.from_dict(data)\n if data.x is None:\n data.num_nodes = G.number_of_nodes()\n\n if group_node_attrs is all:\n group_node_attrs = list(node_attrs)\n if group_node_attrs is not None:\n xs = [data[key] for key in group_node_attrs]\n xs = [x.view(-1, 1) if x.dim() <= 1 else x for x in xs]\n data.x = torch.cat(xs, dim=-1)\n\n if group_edge_attrs is all:\n group_edge_attrs = list(edge_attrs)\n if group_edge_attrs is not None:\n edge_attrs = [data[key] for key in group_edge_attrs]\n edge_attrs = [x.view(-1, 1) if x.dim() <= 1 else x for x in edge_attrs]\n data.edge_attr = torch.cat(edge_attrs, dim=-1)\n\n return data\n\n\ndef to_trimesh(data):\n r\"\"\"Converts a :class:`torch_geometric.data.Data` instance to a\n :obj:`trimesh.Trimesh`.\n\n Args:\n data (torch_geometric.data.Data): The data object.\n \"\"\"\n import trimesh\n return trimesh.Trimesh(vertices=data.pos.detach().cpu().numpy(),\n faces=data.face.detach().t().cpu().numpy(),\n process=False)\n\n\ndef from_trimesh(mesh):\n r\"\"\"Converts a :obj:`trimesh.Trimesh` to a\n :class:`torch_geometric.data.Data` instance.\n\n Args:\n mesh (trimesh.Trimesh): A :obj:`trimesh` mesh.\n \"\"\"\n pos = torch.from_numpy(mesh.vertices).to(torch.float)\n face = torch.from_numpy(mesh.faces).t().contiguous()\n\n return torch_geometric.data.Data(pos=pos, face=face)\n\n\ndef to_cugraph(edge_index: Tensor, edge_weight: Optional[Tensor] = None,\n relabel_nodes: bool = True):\n r\"\"\"Converts a graph given by :obj:`edge_index` and optional\n :obj:`edge_weight` into a :obj:`cugraph` graph object.\n\n Args:\n relabel_nodes (bool, optional): If set to :obj:`True`,\n :obj:`cugraph` will remove any isolated nodes, leading to a\n relabeling of nodes. (default: :obj:`True`)\n \"\"\"\n import cudf\n import cugraph\n\n df = cudf.from_dlpack(to_dlpack(edge_index.t()))\n\n if edge_weight is not None:\n assert edge_weight.dim() == 1\n df[2] = cudf.from_dlpack(to_dlpack(edge_weight))\n\n return cugraph.from_cudf_edgelist(\n df, source=0, destination=1,\n edge_attr=2 if edge_weight is not None else None,\n renumber=relabel_nodes)\n\n\ndef from_cugraph(G) -> Tuple[Tensor, Optional[Tensor]]:\n r\"\"\"Converts a :obj:`cugraph` graph object into :obj:`edge_index` and\n optional :obj:`edge_weight` tensors.\n \"\"\"\n df = G.edgelist.edgelist_df\n\n src = from_dlpack(df['src'].to_dlpack()).long()\n dst = from_dlpack(df['dst'].to_dlpack()).long()\n edge_index = torch.stack([src, dst], dim=0)\n\n edge_weight = None\n if 'weights' in df:\n edge_weight = from_dlpack(df['weights'].to_dlpack())\n\n return edge_index, edge_weight\n"
] |
[
[
"torch.utils.dlpack.to_dlpack",
"torch.cat",
"torch.stack",
"torch.is_tensor",
"torch.from_numpy",
"torch.tensor"
]
] |
zhubonan/parsevasp
|
[
"ac929f324f33e03543f0b4cb4b11eb426aab2199"
] |
[
"parsevasp/vasprun.py"
] |
[
"#!/usr/bin/python\nimport sys\nimport os\nimport numpy as np\nimport logging\nimport mmap\nimport copy\n\nfrom parsevasp import constants\nfrom parsevasp import utils\nfrom parsevasp.base import BaseParser\n\nfrom lxml import etree\n\n# Try to import lxml, if not present fall back to\n# intrinsic ElementTree\nlxml = False\ntry:\n from lxml import etree\n lxml = True\nexcept ImportError:\n try:\n # Python 2.5\n import xml.etree.cElementTree as etree\n except ImportError:\n try:\n # Python 2.5\n import xml.etree.ElementTree as etree\n except ImportError:\n try:\n # normal cElementTree\n import cElementTree as etree\n except ImportError:\n try:\n # normal ElementTree\n import elementtree.ElementTree as etree\n except ImportError:\n logging.error('Failed to import ElementTree.')\n sys.exit('Failed to import ElementTree.')\n\n\n_SUPPORTED_TOTAL_ENERGIES = {'energy_extrapolated': 'e_0_energy',\n 'energy_free': 'e_fr_energy',\n 'energy_no_entropy': 'e_wo_entrp'}\n\n\nclass Xml(BaseParser):\n\n ERROR_MULTIPLE_ENTRIES = 500\n ERROR_NO_SPECIES = 501\n ERROR_NO_ISPIN = 502\n ERROR_NO_NBANDS = 503\n ERROR_NO_KPOINTS = 504\n ERROR_MISMATCH_KPOINTS_NBANDS = 505\n ERROR_UNKNOWN_ELEMENT = 506\n ERROR_UNSUPPORTED_STATUS = 507\n ERROR_NO_SIZE = 508\n ERROR_OVERFLOW = 509\n BaseParser.ERROR_MESSAGES.update({\n ERROR_NO_SPECIES:\n 'Please extract the species first.',\n ERROR_MULTIPLE_ENTRIES:\n 'Multiple entries of were located.',\n ERROR_NO_ISPIN:\n 'Please extract ISPIN first.',\n ERROR_NO_NBANDS:\n 'Please extract NBANDS first.',\n ERROR_NO_KPOINTS:\n 'Please extract the kpoints first.',\n ERROR_MISMATCH_KPOINTS_NBANDS:\n 'The number of kpoints and bands for the entries does not match the number of '\n 'located kpoints and NBANDS.',\n ERROR_UNKNOWN_ELEMENT:\n 'There is an atomic element present in the XML file that is unknown.',\n ERROR_UNSUPPORTED_STATUS:\n 'The supplied status is not supported.',\n ERROR_NO_SIZE:\n 'Can not calculate size.',\n ERROR_OVERFLOW:\n 'Overflow detected in the XML file.'\n })\n ERROR_MESSAGES = BaseParser.ERROR_MESSAGES\n\n def __init__(self,\n file_path=None,\n file_handler=None,\n k_before_band=False,\n extract_all=True,\n logger=None,\n event=False):\n \"\"\"Initialize the XmlParser by first trying the lxml and\n fall back to the standard ElementTree if that is not present.\n\n Parameters\n ----------\n k_before_band : bool\n If True the kpoint index runs before the bands\n index.\n extract_all : bool\n Extract data from all calculation (i.e. ionic steps)\n event : bool\n If True, force event based method.\n\n Notes\n -----\n lxml should be used and is required for large files\n \"\"\"\n\n super(Xml, self).__init__(file_path=file_path,\n file_handler=file_handler,\n logger=logger)\n\n self._sizecutoff = 500\n self._event = event\n\n if self._file_path is None and self._file_handler is None:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_ONLY_ONE_ARGUMENT])\n sys.exit(self.ERROR_ONLY_ONE_ARGUMENT)\n\n # extract data from all calculations (e.g. ionic steps)\n self._extract_all = extract_all\n\n # kpoint index before band index (for instance for the ordering\n # of the eigenvalue data etc.)?\n self._k_before_band = k_before_band\n\n # version\n self._version = None\n \n # dictionaries that contain the output of the parsing\n self._parameters = {\n 'symprec': None,\n 'ismear': None,\n 'sigma': None,\n 'ispin': None,\n 'nbands': None,\n 'nelect': None,\n 'system': None,\n 'nelm': None,\n 'nsw': None,\n }\n self._lattice = {\n 'unitcell': None,\n 'species': None,\n 'positions': None,\n 'kpoints': None,\n 'kpointsw': None,\n 'kpointdiv': None\n }\n self._data = {\n 'eigenvalues': None,\n 'eigenvalues_specific': None,\n 'eigenvelocities': None,\n 'kpoints': None,\n 'kpointsw': None,\n 'occupancies': None,\n 'dos': None,\n 'dos_specific': None,\n 'totens': None,\n 'forces': None,\n 'stress': None,\n 'dielectrics': None,\n 'projectors': None,\n 'hessian': None,\n 'dynmat': None,\n 'born': None\n }\n\n if lxml:\n self._logger.info('We are utilizing lxml!')\n else:\n self._logger.info('We are not uitilizing lxml!')\n\n # parse parse parse\n self._parse()\n\n @property\n def truncated(self):\n \"\"\"Return True of the xml parsed is truncated.\"\"\"\n return self._xml_recover\n \n def _parse(self):\n \"\"\"Perform the actual parsing\n\n Parameters\n ----------\n None\n\n Returns\n -------\n None\n\n \"\"\"\n\n # Check size of the XML file. For large files we need to\n # perform event driven parsing. For smaller files this is\n # not necessary and is too slow.\n file_size = self._file_size()\n if file_size is None:\n return None\n\n # Do a quick check to see if the XML file is not truncated\n self._xml_recover = self._check_xml()\n \n if ((file_size < self._sizecutoff) or self._xml_recover) and \\\n not self._event:\n # run regular method (loads file into memory) and\n # enable recovery mode if necessary\n self._parsew(self._xml_recover)\n else:\n # event based, saves a bit of memory\n self._parsee()\n\n def _parsew(self, xml_recover):\n \"\"\"Performs parsing on the whole XML files. For smaller files\n\n \"\"\"\n\n self._logger.debug('Running parsew.')\n\n # now open the complete file, we have already checked its presence when we checked the recover.\n if self._file_handler is None:\n filer = self._file_path\n else:\n filer = self._file_handler\n\n # make sure we enable the recovery mode\n # pretty sure there is a performance bottleneck running this\n # enabled at all times, so consider to add check for\n # truncated XML files and then enable\n\n if lxml and xml_recover:\n if xml_recover:\n self._logger.debug('Running LXML in recovery mode.')\n parser = etree.XMLParser(recover=True)\n vaspxml = etree.parse(filer, parser=parser)\n else:\n vaspxml = etree.parse(filer)\n\n # do we want to extract data from all calculations (e.g. ionic steps)\n extract_all = self._extract_all\n\n # let us start to parse the content\n self._version = self._fetch_versionw(vaspxml)\n self._parameters['symprec'] = self._fetch_symprecw(vaspxml)\n self._parameters['sigma'] = self._fetch_sigmaw(vaspxml)\n self._parameters['ismear'] = self._fetch_ismearw(vaspxml)\n self._parameters['ispin'] = self._fetch_ispinw(vaspxml)\n self._parameters['nbands'] = self._fetch_nbandsw(vaspxml)\n self._parameters['nelect'] = self._fetch_nelectw(vaspxml)\n self._parameters['system'] = self._fetch_systemw(vaspxml)\n self._parameters['nelm'] = self._fetch_nelmw(vaspxml)\n self._parameters['nsw'] = self._fetch_nsww(vaspxml)\n self._lattice['species'] = self._fetch_speciesw(vaspxml)\n self._lattice['unitcell'], self._lattice['positions'], \\\n self._data['forces'], self._data['stress'] = \\\n self._fetch_upfsw(vaspxml, extract_all=extract_all)\n self._lattice['kpoints'] = self._fetch_kpointsw(vaspxml)\n self._lattice['kpointsw'] = self._fetch_kpointsww(vaspxml)\n self._lattice['kpointdiv'] = self._fetch_kpointdivw(vaspxml)\n self._data['eigenvalues'], self._data[\n 'occupancies'] = self._fetch_eigenvaluesw(vaspxml)\n self._data['eigenvalues_specific'] = self._fetch_eigenvalues_specificw(\n vaspxml)\n self._data['eigenvelocities'] = self._fetch_eigenvelocitiesw(vaspxml)\n self._data['dos'], self._data['dos_specific'] = self._fetch_dosw(\n vaspxml)\n self._data['totens'] = self._fetch_totensw(vaspxml)\n self._data['dielectrics'] = self._fetch_dielectricsw(vaspxml)\n self._data['projectors'] = self._fetch_projectorsw(vaspxml)\n self._data['hessian'] = self._fetch_hessian(vaspxml)\n self._data['dynmat'] = self._fetch_dynmatw(vaspxml)\n self._data['born'] = self._fetch_bornw(vaspxml)\n\n def _parsee(self):\n \"\"\"Performs parsing in an event driven fashion on the XML file.\n Slower, but suitable for bigger files.\n\n \"\"\"\n\n # set logger\n self._logger.debug('Running parsee.')\n\n # helper lists\n data = []\n data2 = []\n data3 = []\n data4 = []\n data5 = []\n data6 = []\n\n # dicts\n cell = {}\n pos = {}\n force = {}\n stress = {}\n dos = {}\n totens = {}\n dynmat = {}\n _dos = {}\n _dos2 = {}\n\n # bool to control extraction of content\n extract_generator = False\n extract_parameters = False\n extract_calculation = False\n extract_latticedata = False\n extract_unitcell = False\n extract_positions = False\n extract_species = False\n extract_kpointdata = False\n extract_kpoints = False\n extract_kpointsw = False\n extract_kpointdiv = False\n extract_kpoints_specific = False\n extract_kpointsw_specific = False\n extract_eigenvalues = False\n extract_eigenvalues_specific = False\n extract_eigenvalues_spin1 = False\n extract_eigenvalues_spin2 = False\n extract_eigenvalues_specific_spin1 = False\n extract_eigenvalues_specific_spin2 = False\n extract_eigenvelocities = False\n extract_eigenvelocities_spin1 = False\n extract_eigenvelocities_spin2 = False\n extract_dos = False\n extract_dos_specific = False\n extract_total_dos = False\n extract_partial_dos = False\n extract_dos_ispin1 = False\n extract_dos_ispin2 = False\n extract_dos_specific_ispin1 = False\n extract_dos_specific_ispin2 = False\n extract_projected = False\n extract_forces = False\n extract_stress = False\n extract_energies = False\n extract_e_0_energy = False\n extract_e_fr_energy = False\n extract_e_wo_entrp = False\n extract_scstep = False\n extract_dielectrics = False\n extract_eig_proj = False\n extract_eig_proj_ispin1 = False\n extract_eig_proj_ispin2 = False\n extract_dynmat = False\n extract_dynmat_eigen = False\n extract_hessian = False\n extract_born = False\n\n # do we want to extract data from all calculations (e.g. ionic steps)\n extract_all = self._extract_all\n\n if self._file_handler is None:\n filer = self._file_path\n else:\n filer = self._file_handler\n\n # index that control the calculation step (e.g. ionic step)\n calc = 1\n for event, element in etree.iterparse(filer, events=('start', 'end')):\n # set extraction points (what to read and when to read it)\n # here we also set the relevant data elements when the tags\n # close when they contain more than one element\n if event == 'start' and element.tag == 'generator':\n extract_generator = True\n if event == 'end' and element.tag == 'generator':\n extract_generator = False\n if event == 'start' and element.tag == 'parameters':\n extract_parameters = True\n if event == 'end' and element.tag == 'parameters':\n extract_parameters = False\n if event == 'start' and element.tag == 'calculation':\n # Instead of needing to check the nested dicts we initialize them here\n # so that we can use update for each calculation.\n totens[calc] = {}\n extract_calculation = True\n if event == 'end' and element.tag == 'calculation':\n data3 = self._convert_array1D_f(data3)\n data4 = self._convert_array1D_f(data4)\n data5 = self._convert_array1D_f(data5)\n totens[calc].update({'energy_extrapolated': data3,\n 'energy_free': data4,\n 'energy_no_entropy': data5\n })\n data3 = []\n data4 = []\n data5 = []\n # update index for the calculation\n calc = calc + 1\n extract_calculation = False\n if event == 'start' and element.tag == 'array' \\\n and element.attrib.get('name') == 'atoms':\n extract_species = True\n if event == 'end' and element.tag == 'array' \\\n and element.attrib.get('name') == 'atoms':\n # only need every other element (element, not atomtype)\n self._lattice['species'] = self._convert_species(data[::2])\n data = []\n extract_species = False\n if event == 'start' and element.tag == 'kpoints' and not extract_calculation:\n extract_kpointdata = True\n if event == 'end' and element.tag == 'kpoints' and not extract_calculation:\n extract_kpointdata = False\n if event == 'start' and element.tag == 'projected':\n extract_projected = True\n if event == 'end' and element.tag == 'projected':\n extract_projected = False\n\n # now fetch the data\n if extract_generator:\n try:\n if event == 'start' and element.attrib['name'] == 'version':\n self._version = element.text\n except KeyError:\n pass\n if extract_parameters:\n try:\n if event == 'start' and element.attrib['name'] == 'SYMPREC':\n self._parameters['symprec'] = self._convert_f(element)\n except KeyError:\n pass\n try:\n if event == 'start' and element.attrib['name'] == 'ISPIN':\n self._parameters['ispin'] = self._convert_i(element)\n except KeyError:\n pass\n try:\n if event == 'start' and element.attrib['name'] == 'ISMEAR':\n self._parameters['ismear'] = self._convert_i(element)\n except KeyError:\n pass\n try:\n if event == 'start' and element.attrib['name'] == 'SIGMA':\n self._parameters['sigma'] = self._convert_f(element)\n except KeyError:\n pass\n try:\n if event == 'start' and element.attrib['name'] == 'NBANDS':\n self._parameters['nbands'] = self._convert_i(element)\n except KeyError:\n pass\n try:\n if event == 'start' and element.attrib['name'] == 'NELECT':\n self._parameters['nelect'] = self._convert_f(element)\n except KeyError:\n pass\n try:\n if event == 'start' and element.attrib['name'] == 'SYSTEM':\n self._parameters['system'] = element.text\n except KeyError:\n pass\n try:\n if event == 'start' and element.attrib['name'] == 'NELM' \\\n and element.getparent().attrib['name'] == 'electronic convergence':\n self._parameters['nelm'] = self._convert_i(element)\n except KeyError:\n pass\n try:\n if event == 'start' and element.attrib['name'] == 'NSW':\n self._parameters['nsw'] = self._convert_i(element)\n except KeyError:\n pass\n\n if extract_calculation:\n # it would be very tempting just to fill the data and disect\n # it later, would be faster, but it is not so easy since\n # we do not know how many calculations have been performed\n # or how many scteps there are per calculation\n if event == 'start' and element.tag == 'dos' and element.attrib.get(\n 'comment') is None:\n extract_dos = True\n if event == 'end' and element.tag == 'total' and extract_dos:\n if data2:\n # only store energy for one part as\n # this is the same for both\n dos_ispin = self._convert_array2D_f(data, 3)\n _dos['energy'] = dos_ispin[:, 0]\n _dos['total'] = dos_ispin[:, 1]\n _dos['integrated'] = dos_ispin[:, 2]\n _dos['partial'] = None\n dos_ispin = self._convert_array2D_f(data2, 3)\n _dos2['total'] = dos_ispin[:, 1]\n _dos2['integrated'] = dos_ispin[:, 2]\n _dos2['partial'] = None\n else:\n dos_ispin = self._convert_array2D_f(data, 3)\n _dos['energy'] = dos_ispin[:, 0]\n _dos['total'] = dos_ispin[:, 1]\n _dos['integrated'] = dos_ispin[:, 2]\n _dos['partial'] = None\n data = []\n data2 = []\n if event == 'end' and element.tag == 'partial' and extract_dos:\n num_atoms = 0\n if self._lattice['species'] is not None:\n num_atoms = self._lattice['species'].shape[0]\n else:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_NO_SPECIES])\n sys.exit(self.ERROR_NO_SPECIES)\n if data2:\n dos_ispin = self._convert_array2D_f(data, 10)\n # do not need the energy term (similar to total)\n _dos['partial'] = np.asarray(\n np.split(dos_ispin[:, 1:10], num_atoms))\n dos_ispin = self._convert_array2D_f(data2, 10)\n # do not need the energy term (similar to total)\n _dos2['partial'] = np.asarray(\n np.split(dos_ispin[:, 1:10], num_atoms))\n else:\n dos_ispin = self._convert_array2D_f(data, 10)\n # do not need the energy term (similar to total)\n _dos['partial'] = np.asarray(\n np.split(dos_ispin[:, 1:10], num_atoms))\n data = []\n data2 = []\n if event == 'end' and element.tag == 'dos' and extract_dos:\n # check the Fermi level\n if len(data6) == 1:\n fermi_level = self._convert_f(data6[0])\n elif len(data6) > 1:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_MULTIPLE_ENTRIES] +\n \" The tag in question is 'efermi'.\")\n sys.exit(self.ERROR_MULTIPLE_ENTRIES)\n else:\n fermi_level = None\n\n if _dos2:\n dos['up'] = _dos\n dos['down'] = _dos2\n dos['total'] = {\n 'fermi_level': fermi_level,\n 'energy': _dos['energy']\n }\n del dos['up']['energy']\n else:\n _dos['fermi_level'] = fermi_level\n dos['total'] = _dos\n self._data['dos'] = copy.deepcopy(dos)\n data = []\n data2 = []\n data6 = []\n _dos = {}\n _dos2 = {}\n extract_dos = False\n if event == 'start' and element.tag == 'dos' and element.attrib.get(\n 'comment') == 'interpolated':\n extract_dos_specific = True\n if event == 'end' and element.tag == 'total' and extract_dos_specific:\n if data2:\n # only store energy for one part as\n # this is the same for both\n dos_ispin = self._convert_array2D_f(data, 3)\n _dos['energy'] = dos_ispin[:, 0]\n _dos['total'] = dos_ispin[:, 1]\n _dos['integrated'] = dos_ispin[:, 2]\n _dos['partial'] = None\n dos_ispin = self._convert_array2D_f(data2, 3)\n _dos2['total'] = dos_ispin[:, 1]\n _dos2['integrated'] = dos_ispin[:, 2]\n _dos2['partial'] = None\n else:\n dos_ispin = self._convert_array2D_f(data, 3)\n _dos['energy'] = dos_ispin[:, 0]\n _dos['total'] = dos_ispin[:, 1]\n _dos['integrated'] = dos_ispin[:, 2]\n _dos['partial'] = None\n data = []\n data2 = []\n if event == 'end' and element.tag == 'partial' and extract_dos_specific:\n num_atoms = 0\n if self._lattice['species'] is not None:\n num_atoms = self._lattice['species'].shape[0]\n else:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_NO_SPECIES])\n sys.exit(self.ERROR_NO_SPECIES)\n if data2:\n dos_ispin = self._convert_array2D_f(data, 10)\n # do not need the energy term (similar to total)\n _dos['partial'] = np.asarray(\n np.split(dos_ispin[:, 1:10], num_atoms))\n dos_ispin = self._convert_array2D_f(data2, 10)\n # do not need the energy term (similar to total)\n _dos2['partial'] = np.asarray(\n np.split(dos_ispin[:, 1:10], num_atoms))\n else:\n dos_ispin = self._convert_array2D_f(data, 10)\n # do not need the energy term (similar to total)\n _dos['partial'] = np.asarray(\n np.split(dos_ispin[:, 1:10], num_atoms))\n data = []\n data2 = []\n if event == 'end' and element.tag == 'dos' and extract_dos_specific:\n # check the Fermi level\n if len(data6) == 1:\n fermi_level = self._convert_f(data6[0])\n elif len(data6) > 1:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_MULTIPLE_ENTRIES] +\n \" The tag in question is 'efermi'.\")\n sys.exit(self.ERROR_MULTIPLE_ENTRIES)\n else:\n fermi_level = None\n\n if _dos2:\n dos['up'] = _dos\n dos['down'] = _dos2\n dos['total'] = {\n 'fermi_level': fermi_level,\n 'energy': _dos['energy']\n }\n del dos['up']['energy']\n else:\n _dos['fermi_level'] = fermi_level\n dos['total'] = _dos\n self._data['dos_specific'] = dos\n data = []\n data2 = []\n data6 = []\n _dos = {}\n _dos2 = {}\n extract_dos_specific = False\n\n if event == 'start' and element.tag == 'structure':\n extract_latticedata = True\n if event == 'end' and element.tag == 'structure':\n extract_latticedata = False\n if event == 'start' and element.tag == 'varray' and \\\n element.attrib['name'] == 'forces':\n extract_forces = True\n if event == 'end' and element.tag == 'varray' and \\\n element.attrib['name'] == 'forces':\n force[calc] = self._convert_array2D_f(data, 3)\n data = []\n extract_forces = False\n if event == 'start' and element.tag == 'varray' and \\\n element.attrib['name'] == 'stress':\n extract_stress = True\n if event == 'end' and element.tag == 'varray' and \\\n element.attrib['name'] == 'stress':\n stress[calc] = self._convert_array2D_f(data, 3)\n data = []\n extract_stress = False\n if event == 'start' and element.tag == 'energy' and not extract_scstep:\n extract_energies = True\n if event == 'end' and element.tag == 'energy' and not extract_scstep:\n extract_energies = False\n if event == 'start' and element.tag == 'scstep':\n extract_scstep = True\n if event == 'end' and element.tag == 'scstep':\n extract_scstep = False\n if event == 'start' and element.tag == 'eigenvalues' and not extract_eigenvelocities and element.attrib.get(\n 'comment'\n ) != 'interpolated' and not extract_eigenvalues_specific:\n extract_eigenvalues = True\n if event == 'end' and element.tag == 'eigenvalues' and extract_eigenvalues:\n num_kpoints = len(self._lattice['kpoints'])\n if not data2:\n eigenvalues, occupancies = self._extract_eigenvalues(\n data, None, num_kpoints)\n else:\n eigenvalues, occupancies = self._extract_eigenvalues(\n data, data2, num_kpoints)\n self._data['eigenvalues'] = eigenvalues\n self._data['occupancies'] = occupancies\n data = []\n data2 = []\n extract_eigenvalues = False\n if event == 'start' and element.tag == 'eigenvalues' and element.attrib.get(\n 'comment') == 'interpolated':\n extract_eigenvalues_specific = True\n\n if event == 'end' and element.tag == 'eigenvalues' and extract_eigenvalues_specific:\n num_kpoints = len(self._data['kpoints'])\n if not data2:\n eigenvalues_specific, _ = self._extract_eigenvalues(\n data, None, num_kpoints)\n else:\n eigenvalues_specific, _ = self._extract_eigenvalues(\n data, data2, num_kpoints)\n self._data['eigenvalues_specific'] = eigenvalues_specific\n data = []\n data2 = []\n extract_eigenvalues_specific = False\n if event == 'start' and element.tag == 'eigenvelocities':\n extract_eigenvelocities = True\n if event == 'end' and element.tag == 'eigenvelocities':\n num_kpoints = len(self._data['kpoints'])\n if not data2:\n eigenvelocities = self._extract_eigenvelocities(\n data, None, num_kpoints)\n else:\n eigenvelocities = self._extract_eigenvelocities(\n data, data2, num_kpoints)\n self._data['eigenvelocities'] = eigenvelocities\n data = []\n data2 = []\n extract_eigenvelocities = False\n if event == 'start' and element.tag == 'dielectricfunction':\n extract_dielectrics = True\n if event == 'end' and element.tag == 'dielectricfunction':\n _diel = {}\n diel = np.split(self._convert_array2D_f(data, 7), 2)\n _diel['energy'] = diel[0][:, 0]\n _diel['imag'] = diel[0][:, 1:7]\n _diel['real'] = diel[1][:, 1:7]\n self._data['dielectrics'] = _diel\n data = []\n extract_dielectrics = False\n if event == 'start' and element.tag == 'dynmat':\n extract_dynmat = True\n if event == 'end' and element.tag == 'dynmat':\n self._data['dynmat'] = dynmat\n extract_dynmat = False\n if event == 'start' and element.tag == 'array':\n # a bit of special threatment here as there is\n # an array element without attributes, so we get\n # KeyErrors\n try:\n if element.attrib['name'] == 'born_charges':\n extract_born = True\n except KeyError:\n pass\n if event == 'end' and element.tag == 'array':\n # again a bit special\n try:\n if element.attrib['name'] == 'born_charges':\n num_atoms = 0\n if self._lattice['species'] is not None:\n num_atoms = self._lattice['species'].shape[0]\n else:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_NO_SPECIES])\n sys.exit(self.ERROR_NO_SPECIES)\n data = self._convert_array2D_f(data, 3)\n data = np.split(data, num_atoms)\n self._data['born'] = np.asarray(data)\n data = []\n extract_born = False\n except KeyError:\n pass\n\n # now extract data\n if extract_scstep:\n # extrapolated energy\n if event == 'start' and element.tag == 'i' and \\\n element.attrib['name'] == 'e_0_energy':\n extract_e_0_energy = True\n if event == 'end' and element.tag == 'i' and \\\n element.attrib['name'] == 'e_0_energy':\n extract_e_0_energy = False\n if extract_e_0_energy:\n data3.append(element)\n # free energy\n if event == 'start' and element.tag == 'i' and \\\n element.attrib['name'] == 'e_fr_energy':\n extract_e_fr_energy = True\n if event == 'end' and element.tag == 'i' and \\\n element.attrib['name'] == 'e_fr_energy':\n extract_e_fr_energy = False\n if extract_e_fr_energy:\n data4.append(element)\n # energy without entropy\n if event == 'start' and element.tag == 'i' and \\\n element.attrib['name'] == 'e_wo_entrp':\n extract_e_wo_entrp = True\n if event == 'end' and element.tag == 'i' and \\\n element.attrib['name'] == 'e_wo_entrp':\n extract_e_wo_entrp = False\n if extract_e_wo_entrp:\n data5.append(element)\n if extract_latticedata:\n if event == 'start' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'basis':\n extract_unitcell = True\n if event == 'end' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'basis':\n cell[calc] = self._convert_array2D_f(data, 3)\n data = []\n extract_unitcell = False\n\n if event == 'start' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'positions':\n extract_positions = True\n if event == 'end' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'positions':\n pos[calc] = self._convert_array2D_f(data, 3)\n data = []\n extract_positions = False\n\n if extract_unitcell:\n if event == 'start' and element.tag == 'v':\n data.append(element)\n if extract_positions:\n if event == 'start' and element.tag == 'v':\n data.append(element)\n\n if extract_forces:\n if event == 'start' and element.tag == 'v':\n data.append(element)\n\n if extract_stress:\n if event == 'start' and element.tag == 'v':\n data.append(element)\n\n if extract_energies:\n # extrapolated energy\n if event == 'start' and element.tag == 'i' and \\\n element.attrib['name'] == 'e_0_energy':\n totens[calc].update({'energy_extrapolated_final': float(element.text)})\n # free energy\n if event == 'start' and element.tag == 'i' and \\\n element.attrib['name'] == 'e_fr_energy':\n totens[calc].update({'energy_free_final': float(element.text)})\n # energy without entropy\n if event == 'start' and element.tag == 'i' and \\\n element.attrib['name'] == 'e_wo_entrp':\n totens[calc].update({'energy_no_entropy_final': float(element.text)})\n\n if extract_eigenvalues:\n if event == 'start' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 1':\n extract_eigenvalues_spin1 = True\n if event == 'end' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 1':\n extract_eigenvalues_spin1 = False\n if event == 'start' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 2':\n extract_eigenvalues_spin2 = True\n if event == 'end' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 2':\n extract_eigenvalues_spin2 = False\n if extract_eigenvalues_spin1:\n if event == 'start' and element.tag == 'r':\n data.append(element)\n if extract_eigenvalues_spin2:\n if event == 'start' and element.tag == 'r':\n data2.append(element)\n\n if extract_eigenvalues_specific:\n if event == 'start' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'kpointlist':\n extract_kpoints_specific = True\n if event == 'end' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'kpointlist':\n self._data['kpoints'] = self._convert_array2D_f(\n data, 3)\n data = []\n extract_kpoints_specific = False\n if event == 'start' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'weights':\n extract_kpointsw_specific = True\n if event == 'end' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'weights':\n self._data['kpointsw'] = self._convert_array1D_f(data)\n data = []\n extract_kpointsw_specific = False\n if event == 'start' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 1':\n extract_eigenvalues_spin1 = True\n if event == 'end' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 1':\n extract_eigenvalues_spin1 = False\n if event == 'start' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 2':\n extract_eigenvalues_spin2 = True\n if event == 'end' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 2':\n extract_eigenvalues_spin2 = False\n if extract_kpoints_specific:\n if event == 'start' and element.tag == 'v':\n data.append(element)\n if extract_kpointsw_specific:\n if event == 'start' and element.tag == 'v':\n data.append(element)\n if extract_eigenvalues_spin1:\n if event == 'start' and element.tag == 'r':\n data.append(element)\n if extract_eigenvalues_spin2:\n if event == 'start' and element.tag == 'r':\n data2.append(element)\n\n if extract_eigenvelocities:\n if event == 'start' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'kpointlist':\n extract_kpoints_specific = True\n if event == 'end' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'kpointlist':\n self._data['kpoints'] = self._convert_array2D_f(\n data, 3)\n data = []\n extract_kpoints_specific = False\n if event == 'start' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'weights':\n extract_kpointsw_specific = True\n if event == 'end' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'weights':\n self._data['kpointsw'] = self._convert_array1D_f(data)\n data = []\n extract_kpointsw_specific = False\n if event == 'start' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 1':\n extract_eigenvelocities_spin1 = True\n if event == 'end' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 1':\n extract_eigenvelocities_spin1 = False\n if event == 'start' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 2':\n extract_eigenvelocities_spin2 = True\n if event == 'end' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 2':\n extract_eigenvelocities_spin2 = False\n if extract_kpoints_specific:\n if event == 'start' and element.tag == 'v':\n data.append(element)\n if extract_kpointsw_specific:\n if event == 'start' and element.tag == 'v':\n data.append(element)\n if extract_eigenvelocities_spin1:\n if event == 'start' and element.tag == 'r':\n data.append(element)\n if extract_eigenvelocities_spin2:\n if event == 'start' and element.tag == 'r':\n data2.append(element)\n\n if extract_dielectrics:\n if event == 'start' and element.tag == 'r':\n data.append(element)\n\n if extract_projected:\n # make sure we skip the first entry containing\n # the eigenvalues (already stored at this point)\n if event == 'end' and element.tag == 'eigenvalues':\n extract_eig_proj = True\n if event == 'end' and element.tag == 'array' and \\\n extract_eig_proj:\n if not data2:\n projectors = self._extract_projectors(data, None)\n else:\n projectors = self._extract_projectors(data, data2)\n self._data['projectors'] = projectors\n data = []\n data2 = []\n extract_eig_proj = False\n\n if extract_eig_proj:\n if event == 'start' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin1':\n extract_eig_proj_ispin1 = True\n if event == 'end' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin1':\n extract_eig_proj_ispin1 = False\n if event == 'start' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin2':\n extract_eig_proj_ispin2 = True\n if event == 'end' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin2':\n extract_eig_proj_ispin2 = False\n if extract_eig_proj_ispin1:\n if event == 'start' and element.tag == 'r':\n data.append(element)\n if extract_eig_proj_ispin2:\n if event == 'start' and element.tag == 'r':\n data2.append(element)\n\n if extract_dynmat:\n if event == 'start' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'hessian':\n extract_hessian = True\n if event == 'end' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'hessian':\n num_atoms = 0\n if self._lattice['species'] is not None:\n num_atoms = self._lattice['species'].shape[0]\n else:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_NO_SPECIES])\n sys.exit(self.ERROR_NO_SPECIES)\n hessian = self._convert_array2D_f(data, num_atoms * 3)\n self._data['hessian'] = hessian\n data = []\n extract_hessian = False\n if event == 'start' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'eigenvectors':\n extract_dynmat_eigen = True\n if event == 'end' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'eigenvectors':\n num_atoms = 0\n if self._lattice['species'] is not None:\n num_atoms = self._lattice['species'].shape[0]\n else:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_NO_SPECIES])\n sys.exit(self.ERROR_NO_SPECIES)\n eigenvec = self._convert_array2D_f(data, num_atoms * 3)\n dynmat['eigenvectors'] = eigenvec\n data = []\n extract_dynmat_eigen = False\n if extract_hessian:\n if event == 'start' and element.tag == 'v':\n data.append(element)\n if extract_dynmat_eigen:\n if event == 'start' and element.tag == 'v':\n data.append(element)\n try:\n if event == 'start' and \\\n element.attrib['name'] == 'eigenvalues':\n dynmat['eigenvalues'] = self._convert_array_f(\n element)\n except KeyError:\n pass\n\n if extract_born:\n if event == 'start' and element.tag == 'v':\n data.append(element)\n\n if extract_species:\n if event == 'start' and element.tag == 'c':\n data.append(element)\n\n if extract_kpointdata:\n try:\n if event == 'start' and element.tag == 'v' and \\\n element.attrib['name'] == 'divisions':\n self._lattice['kpointdiv'] = self._convert_array_i(\n element)\n except KeyError:\n pass\n\n if event == 'start' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'kpointlist':\n extract_kpoints = True\n if event == 'end' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'kpointlist':\n self._lattice['kpoints'] = self._convert_array2D_f(data, 3)\n data = []\n extract_kpoints = False\n if event == 'start' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'weights':\n extract_kpointsw = True\n if event == 'end' and element.tag == 'varray' \\\n and element.attrib.get('name') == 'weights':\n self._lattice['kpointsw'] = self._convert_array1D_f(data)\n data = []\n extract_kpointsw = False\n if extract_kpoints:\n if event == 'start' and element.tag == 'v':\n data.append(element)\n if extract_kpointsw:\n if event == 'start' and element.tag == 'v':\n data.append(element)\n\n if extract_dos:\n if event == 'start' and element.tag == 'i' and \\\n element.attrib.get('name') == 'efermi':\n data6.append(element)\n if event == 'start' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 1':\n extract_dos_ispin1 = True\n if event == 'end' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 1':\n extract_dos_ispin1 = False\n if event == 'start' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 2':\n extract_dos_ispin2 = True\n if event == 'end' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 2':\n extract_dos_ispin2 = False\n if extract_dos_ispin1:\n if event == 'start' and element.tag == 'r':\n data.append(element)\n if extract_dos_ispin2:\n if event == 'start' and element.tag == 'r':\n data2.append(element)\n\n if extract_dos_specific:\n if event == 'start' and element.tag == 'i' and \\\n element.attrib.get('name') == 'efermi':\n data6.append(element)\n if event == 'start' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 1':\n extract_dos_specific_ispin1 = True\n if event == 'end' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 1':\n extract_dos_specific_ispin1 = False\n if event == 'start' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 2':\n extract_dos_specific_ispin2 = True\n if event == 'end' and element.tag == 'set' \\\n and element.attrib.get('comment') == 'spin 2':\n extract_dos_specific_ispin2 = False\n if extract_dos_specific_ispin1:\n if event == 'start' and element.tag == 'r':\n data.append(element)\n if extract_dos_specific_ispin2:\n if event == 'start' and element.tag == 'r':\n data2.append(element)\n\n # now we need to update some elements\n # for static runs, initial is equal to last\n # if cell:\n # if len(cell) == 1:\n # cell[2] = cell[1]\n # if pos:\n # if len(pos) == 1:\n # pos[2] = pos[1]\n # if force:\n # if len(force) == 1:\n # force[2] = force[1]\n # if stress:\n # if len(stress) == 1:\n # stress[2] = stress[1]\n # if totens:\n # if len(totens) == 1:\n # totens[2] = totens[1]\n\n # if not extract_all:\n # # only save initial and last\n # if cell:\n # cell = {key: np.asarray(cell[key]) for key in {1, 2}}\n # if pos:\n # pos = {key: np.asarray(pos[key]) for key in {1, 2}}\n # if force:\n # force = {key: force[key] for key in {1, 2}}\n # if stress:\n # stress = {key: stress[key] for key in {1, 2}}\n # if totens:\n # totens = {key: totens[key] for key in {1, 2}}\n\n # if any dict is empty, set to zero\n if not cell:\n cell = None\n if not pos:\n pos = None\n if not force:\n force = None\n if not stress:\n stress = None\n if not totens:\n totens = None\n\n # store\n self._lattice['unitcell'] = cell\n self._lattice['positions'] = pos\n self._data['forces'] = force\n self._data['stress'] = stress\n self._data['totens'] = totens\n\n return\n\n def _fetch_versionw(self, xml):\n \"\"\"Fetch and set version using etree\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n version : string\n If version is found it is returned.\n\n Notes\n -----\n Used when detecting the VASP version.\n\n \"\"\"\n\n entry = self._find(\n xml, './/generator/i[@name=\"version\"]')\n\n if entry is None:\n return None\n\n return entry.text\n \n def _fetch_symprecw(self, xml):\n \"\"\"Fetch and set symprec using etree\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n symprec : float\n If SYMPREC is found it is returned.\n\n Notes\n -----\n Used when detecting symmetry.\n\n \"\"\"\n\n entry = self._find(\n xml, './/parameters/separator[@name=\"symmetry\"]/'\n 'i[@name=\"SYMPREC\"]')\n\n if entry is None:\n return None\n\n symprec = self._convert_f(entry)\n\n return symprec\n\n def _fetch_sigmaw(self, xml):\n \"\"\"Fetch and set sigma using etree\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n sigma : float\n If SIGMA is found it is returned.\n\n Notes\n -----\n Determines the smearing used etc.\n\n \"\"\"\n\n entry = self._find(\n xml, './/parameters/separator[@name=\"electronic\"]/'\n 'separator[@name=\"electronic smearing\"]/'\n 'i[@name=\"SIGMA\"]')\n\n if entry is None:\n return None\n\n sigma = self._convert_f(entry)\n\n return sigma\n\n def _fetch_nelmw(self, xml):\n \"\"\"Fetch and set nelm using etree\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n nelm : float\n If NELM is found it is returned.\n\n Notes\n -----\n Maximum number of eletronic steps. This is needed for checking if the eletronic\n structure is converged. \n\n \"\"\"\n\n entry = self._find(\n xml, './/parameters/separator[@name=\"electronic\"]/'\n 'separator[@name=\"electronic convergence\"]/'\n 'i[@name=\"NELM\"]')\n\n if entry is None:\n return None\n\n nelm = self._convert_i(entry)\n\n return nelm\n\n def _fetch_nsww(self, xml):\n \"\"\"Fetch and set nsw using etree\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n nsw : int\n If NSW is found it is returned.\n\n Notes\n -----\n Maximum number of eletronic steps. This is needed for checking ionic convergence. \n \"\"\"\n\n entry = self._find(\n xml, './/parameters/separator[@name=\"ionic\"]/'\n 'i[@name=\"NSW\"]')\n\n if entry is None:\n return None\n\n nsw = self._convert_i(entry)\n\n return nsw\n\n def _fetch_ispinw(self, xml):\n \"\"\"Fetch and set ispin using etree\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n ispin : int\n If ISPIN is found it is returned.\n\n Notes\n -----\n Determines if spin is included. ISPIN=2 separates the spins.\n\n \"\"\"\n\n entry = self._find(\n xml, './/parameters/separator[@name=\"electronic\"]/'\n 'separator[@name=\"electronic spin\"]/'\n 'i[@name=\"ISPIN\"]')\n if entry is None:\n return None\n\n ispin = self._convert_i(entry)\n\n return ispin\n\n def _fetch_ismearw(self, xml):\n \"\"\"Fetch and set ismear using etree\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n ismear : int\n If ISMEAR is found it is returned.\n\n Notes\n -----\n Determines which smearing factor is used on the electrons.\n\n \"\"\"\n\n entry = self._find(\n xml, './/parameters/separator[@name=\"electronic\"]/'\n 'separator[@name=\"electronic smearing\"]/'\n 'i[@name=\"ISMEAR\"]')\n\n if entry is None:\n return None\n\n ismear = self._convert_i(entry)\n\n return ismear\n\n def _fetch_nbandsw(self, xml):\n \"\"\"Fetch and set nbands using etree\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n nbands : int\n If NBANDS is found it is returned.\n\n Notes\n -----\n The number of bands used in the calculation.\n\n \"\"\"\n\n entry = self._find(\n xml, './/parameters/separator[@name=\"electronic\"]/'\n 'i[@name=\"NBANDS\"]')\n if entry is None:\n return None\n\n nbands = self._convert_i(entry)\n\n return nbands\n\n def _fetch_nelectw(self, xml):\n \"\"\"Fetch and set nelect using etree.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n nelect : float\n If NELECT is found it is returned.\n\n Notes\n -----\n The number of electrons used in the calculation.\n\n \"\"\"\n\n entry = self._find(\n xml, './/parameters/separator[@name=\"electronic\"]/'\n 'i[@name=\"NELECT\"]')\n\n if entry is None:\n return None\n\n nelect = self._convert_f(entry)\n\n return nelect\n\n def _fetch_systemw(self, xml):\n \"\"\"Fetch and set system using etree.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n system : string\n If SYSTEM is found it is returned.\n\n Notes\n -----\n A comment that can be specified in the INCAR file.\n\n \"\"\"\n\n entry = self._find(\n xml, './/parameters/separator[@name=\"general\"]/'\n 'i[@name=\"SYSTEM\"]')\n\n if entry is None:\n return None\n\n system = entry.text\n\n return system\n\n def _fetch_bornw(self, xml):\n \"\"\"Fetch the Born effetive charges.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n born : ndarray\n A ndarray containing the born effective charge\n tensor for each atom.\n\n \"\"\"\n\n entry = self._findall(\n xml, './/calculation/array[@name=\"born_charges\"]/'\n 'set/v')\n\n if entry is None:\n return None\n\n num_atoms = 0\n species = self._lattice['species']\n if species is None:\n # Try to fetch species again, if still none, we cannot parse it\n # and thus we cannot parse the entries in here either.\n species = self._fetch_speciesw(xml)\n if species is None:\n return None, None, None, None\n num_atoms = species.shape[0]\n\n born = self._convert_array2D_f(entry, 3)\n\n born = np.asarray(np.split(born, num_atoms))\n\n return born\n\n def _fetch_upfsw(self, xml, extract_all=False):\n \"\"\"Fetch the unitcell, atomic positions, force and stress.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n extract_all : bool\n Determines which unitcell and positions to get.\n Defaults to the initial and last. If True, extract all.\n\n Returns\n -------\n cell, pos, force, stress : dict\n An dictionary containing ndarrays of the:\n | unitcells with the vectors as rows in AA.\n\n | positions with each position as a row in direct coordinates.\n\n | forces where each row is the force in eV/AA on each atom.\n\n | stress where each row is the stress matrix for the unitcell in kB.\n\n \"\"\"\n\n cell = {}\n pos = {}\n force = {}\n stress = {}\n num_atoms = 0\n species = self._lattice['species']\n if species is None:\n # Try to fetch species again, if still none, we cannot parse it\n # and thus we cannot parse the entries in here either.\n species = self._fetch_speciesw(xml)\n if species is None:\n return None, None, None, None\n num_atoms = species.shape[0]\n\n if not extract_all:\n entry = self._findall(\n xml,\n './/structure[@name=\"finalpos\"]/crystal/varray[@name=\"basis\"]/v'\n )\n if entry is not None:\n cell[2] = self._convert_array2D_f(entry, 3)\n else:\n cell[2] = None\n entry = self._findall(\n xml,\n './/structure[@name=\"initialpos\"]/crystal/varray[@name=\"basis\"]/v'\n )\n if entry is not None:\n cell[1] = self._convert_array2D_f(entry, 3)\n else:\n cell[1] = None\n entry = self._findall(\n xml,\n './/structure[@name=\"finalpos\"]/varray[@name=\"positions\"]/v')\n if entry is not None:\n pos[2] = self._convert_array2D_f(entry, 3)\n else:\n pos[2] = None\n entry = self._findall(\n xml,\n './/structure[@name=\"initialpos\"]/varray[@name=\"positions\"]/v')\n if entry is not None:\n pos[1] = self._convert_array2D_f(entry, 3)\n else:\n pos[1] = None\n\n entry = self._findall(xml,\n './/calculation/varray[@name=\"stress\"]/v')\n\n if entry is not None:\n stress[1] = self._convert_array2D_f(entry[0:3], 3)\n stress[2] = self._convert_array2D_f(entry[-3:], 3)\n else:\n stress[1] = None\n stress[2] = None\n\n entry = self._findall(xml,\n './/calculation/varray[@name=\"forces\"]/v')\n if entry is not None:\n force[1] = self._convert_array2D_f(entry[0:num_atoms], 3)\n force[2] = self._convert_array2D_f(entry[-num_atoms:], 3)\n else:\n force[1] = None\n force[2] = None\n else:\n structures = self._findall(xml, './/calculation/structure')\n entrycell = self._findall(\n xml,\n './/calculation/structure/crystal/varray[@name=\"basis\"]/v')\n entrypos = self._findall(\n xml, './/calculation/structure/varray[@name=\"positions\"]/v')\n entryforce = self._findall(\n xml, './/calculation/varray[@name=\"forces\"]/v')\n entrystress = self._findall(\n xml, './/calculation/varray[@name=\"stress\"]/v')\n\n if structures is not None:\n num_calcs = len(structures)\n else:\n return None\n\n num_entrycell = 0\n num_entrypos = 0\n num_entryforce = 0\n num_entrystress = 0\n\n if entrycell is not None:\n num_entrycell = len(entrycell)\n cell[1] = self._convert_array2D_f(entrycell[0:3], 3)\n if num_entrycell > 3:\n cell[2] = self._convert_array2D_f(entrycell[-3:], 3)\n else:\n cell[2] = None\n else:\n cell[1] = None\n cell[2] = None\n\n if entrypos is not None:\n num_entrypos = len(entrypos)\n pos[1] = self._convert_array2D_f(entrypos[0:num_atoms], 3)\n if num_entrypos > 3:\n pos[2] = self._convert_array2D_f(entrypos[-num_atoms:], 3)\n else:\n pos[2] = None\n else:\n pos[1] = None\n pos[2] = None\n\n if entryforce is not None:\n num_entryforce = len(entryforce)\n force[1] = self._convert_array2D_f(entryforce[0:num_atoms], 3)\n if num_entryforce > 3:\n force[2] = self._convert_array2D_f(entryforce[-num_atoms:],\n 3)\n else:\n force[2] = None\n else:\n force[1] = None\n force[2] = None\n\n if entrystress is not None:\n num_entrystress = len(entrystress)\n stress[1] = self._convert_array2D_f(entrystress[0:3], 3)\n if num_entrystress > 3:\n stress[2] = self._convert_array2D_f(entrystress[-3:], 3)\n else:\n stress[2] = None\n else:\n stress[1] = None\n stress[2] = None\n\n max_entries = max(\n num_entrystress,\n max(num_entryforce, max(num_entrycell, num_entrypos)))\n if max_entries > 6:\n for calc in range(1, num_calcs):\n basecell = calc * 3\n basepos = calc * num_atoms\n if entrycell is not None:\n cell[calc + 1] = self._convert_array2D_f(\n entrycell[basecell:basecell + 3], 3)\n if entrypos is not None:\n pos[calc + 1] = self._convert_array2D_f(\n entrypos[basepos:basepos + num_atoms], 3)\n if entryforce is not None:\n force[calc + 1] = self._convert_array2D_f(\n entryforce[basepos:basepos + num_atoms], 3)\n if entrystress is not None:\n stress[calc + 1] = self._convert_array2D_f(\n entrystress[basecell:basecell + 3], 3)\n\n # If we still only have one entry, or number two is None, last and initial should\n # be the same, force them to be similar. We could do this earlier, but this is done\n # to keep it tidy, and if one in the future would like to utilize the returned None.\n if cell:\n if len(cell) == 1 or cell[2] is None:\n cell[2] = cell[1]\n if pos:\n if len(pos) == 1 or pos[2] is None:\n pos[2] = pos[1]\n if force:\n if len(force) == 1 or force[2] is None:\n force[2] = force[1]\n if stress:\n if len(stress) == 1 or stress[2] is None:\n stress[2] = stress[1]\n\n return cell, pos, force, stress\n\n def _fetch_speciesw(self, xml):\n \"\"\"Fetch the atomic species\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n spec : ndarray\n An array containing the atomic species as a number.\n Organized in the same order as the atomic positions.\n\n \"\"\"\n\n entry = self._findall(xml, './/atominfo/'\n 'array[@name=\"atoms\"]/set/rc/c')\n\n if entry is None:\n return None\n\n spec = self._convert_species(entry[::2])\n\n return spec\n\n def _fetch_hessian(self, xml):\n \"\"\"Fetch the hessian.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n hessian : ndarray\n An array containing the Hessian matrix.\n\n \"\"\"\n\n entry = self._findall(\n xml, './/calculation/dynmat/'\n 'varray[@name=\"hessian\"]/v')\n\n if entry is None:\n return None\n\n species = self._lattice['species']\n if species is None:\n # Try to fetch species again, if still none, we cannot parse it\n # and thus we cannot parse the entries in here either.\n species = self._fetch_speciesw(xml)\n if species is None:\n return None\n num_atoms = species.shape[0]\n\n hessian = self._convert_array2D_f(entry, num_atoms * 3)\n\n return hessian\n\n def _fetch_dynmatw(self, xml):\n \"\"\"Fetch the dynamical matrix data.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n dynmat : dict\n An dict containing the eigenvalues and eigenvectors.\n\n \"\"\"\n\n entry = self._find(xml, './/calculation/dynmat/'\n 'v[@name=\"eigenvalues\"]')\n\n if entry is None:\n return None\n\n species = self._lattice['species']\n if species is None:\n # Try to fetch species again, if still none, we cannot parse it\n # and thus we cannot parse the entries in here either.\n species = self._fetch_speciesw(xml)\n if species is None:\n return None\n num_atoms = species.shape[0]\n\n eigenvalues = self._convert_array_f(entry)\n\n entry = self._find(\n xml, './/calculation/dynmat/'\n 'varray[@name=\"eigenvectors\"]')\n\n if entry is None:\n return None\n\n eigenvectors = self._convert_array2D_f(entry, num_atoms * 3)\n\n dynmat = {'eigenvalues': eigenvalues, 'eigenvectors': eigenvectors}\n\n return dynmat\n\n def _fetch_kpointsw(self, xml,\n path='kpoints/varray[@name=\"kpointlist\"]/v'):\n \"\"\"Fetch the kpoints.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n path : string\n The path in the XML file containing the k-point set to be extracted.\n\n Returns\n -------\n kpoints : ndarray\n An array containing the kpoints used in the calculation\n in direct coordinates.\n\n \"\"\"\n\n entry = self._findall(xml, path)\n\n if entry is None:\n return None\n\n kpoints = self._convert_array2D_f(entry, 3)\n\n return kpoints\n\n def _fetch_kpointsww(self, xml, path='kpoints/varray[@name=\"weights\"]/v'):\n \"\"\"Fetch the kpoint weights.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n path : string\n The path in the XML file containing the k-point weights to be extracted.\n\n Returns\n -------\n kpointw : ndarray\n An array containing the kpoint weights used in the\n calculation.\n\n \"\"\"\n\n entry = self._findall(xml, path)\n\n if entry is None:\n return None\n\n kpointsw = self._convert_array1D_f(entry)\n\n return kpointsw\n\n def _fetch_kpointdivw(self, xml):\n \"\"\"Fetch the number of kpoints in each direction.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n kpointdiv : list\n An list containing the kpoint divisions used in the\n calculation for the full BZ.\n\n \"\"\"\n\n entry = self._find(xml, 'kpoints/generation/v[@name=\"divisions\"]')\n\n if entry is None:\n return None\n\n kpointdiv = self._convert_array_i(entry)\n\n return kpointdiv\n\n def _fetch_eigenvaluesw(self, xml):\n \"\"\"Fetch the eigenvalues.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n eigenvalues, occupancies : tupple\n An tupple of dicts containing ndarrays containing the\n eigenvalues and occupancies for each spin, band and\n kpoint index.\n\n \"\"\"\n\n # spin 1\n entry_ispin1 = self._findall(\n xml, './/calculation/eigenvalues/array/set/'\n 'set[@comment=\"spin 1\"]/set/r')\n\n # spin 2\n entry_ispin2 = self._findall(\n xml, './/calculation/eigenvalues/array/set/'\n 'set[@comment=\"spin 2\"]/set/r')\n\n # if we do not find spin 1 entries return right away\n if entry_ispin1 is None:\n return None, None\n\n eigenvalues, occupancies = self._extract_eigenvalues(\n entry_ispin1, entry_ispin2, len(self._lattice['kpoints']))\n\n return eigenvalues, occupancies\n\n def _fetch_eigenvalues_specificw(self, xml):\n \"\"\"Fetch the eigenvalues at specific k-point grids.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n eigenvalues : ndarray\n An ndarray containing the\n eigenvalues for each spin, band and\n kpoint index.\n\n \"\"\"\n\n # spin 1\n entry_ispin1 = self._findall(\n xml, './/calculation/eigenvalues/'\n 'eigenvalues/array/set/'\n 'set[@comment=\"spin 1\"]/set/r')\n # spin 2\n entry_ispin2 = self._findall(\n xml, './/calculation/eigenvalues/'\n 'eigenvalues/array/set/'\n 'set[@comment=\"spin 2\"]/set/r')\n if entry_ispin1 is not None:\n # Also extract the k-point grids\n self._data['kpoints'] = self._fetch_kpointsw(\n xml,\n path='.//calculation/eigenvalues/'\n 'kpoints/varray[@name=\"kpointlist\"]/v')\n\n self._data['kpointsw'] = self._fetch_kpointsww(\n xml,\n path='//calculation/eigenvalues/'\n 'kpoints/varray[@name=\"weights\"]/v')\n\n # if we do not find spin 1 entries return right away\n if entry_ispin1 is None:\n return None\n\n eigenvalues, _ = self._extract_eigenvalues(entry_ispin1, entry_ispin2,\n len(self._data['kpoints']))\n\n return eigenvalues\n\n def _fetch_eigenvelocitiesw(self, xml):\n \"\"\"Fetch the eigenvelocities and eigenvalues..\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n eigenvelocities : dict\n A dict with ndarrays containing the\n eigenvalues and eigenvelocities for each spin, band and\n kpoint index.\n kpoints : dict\n A dict with a ndarray containing the k-point in the full BZ\n on which the eigenvelocities were extracted.\n\n \"\"\"\n\n # spin 1\n entry_ispin1 = self._findall(\n xml, './/calculation/eigenvelocities/'\n 'eigenvalues/array/set/'\n 'set[@comment=\"spin 1\"]/set/r')\n\n # spin 2\n entry_ispin2 = self._findall(\n xml, './/calculation/eigenvelocities/'\n 'eigenvalues/array/set/'\n 'set[@comment=\"spin 2\"]/set/r')\n\n # if we do not find spin 1 entries return right away\n if entry_ispin1 is None:\n return None\n\n self._data['kpoints'] = self._fetch_kpointsw(\n xml,\n path='.//calculation/eigenvelocities/'\n 'kpoints/varray[@name=\"kpointlist\"]/v')\n\n self._data['kpointsw'] = self._fetch_kpointsww(\n xml,\n path='//calculation/eigenvelocities/'\n 'kpoints/varray[@name=\"weights\"]/v')\n\n eigenvelocities = self._extract_eigenvelocities(\n entry_ispin1, entry_ispin2, len(self._data['kpoints']))\n\n return eigenvelocities\n\n def _fetch_projectorsw(self, xml):\n \"\"\"Fetch the projectors.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n projectors : dict\n An dict containing ndarrays of the projectors\n for each atomic, spin, band and kpoint index.\n\n \"\"\"\n\n # projectors spin 1\n entry_ispin1 = self._findall(\n xml, './/calculation/projected/array/set/'\n 'set[@comment=\"spin1\"]/set/set/r')\n\n # projectors spin 2\n entry_ispin2 = self._findall(\n xml, './/calculation/projected/array/set/'\n 'set[@comment=\"spin2\"]/set/set/r')\n\n # if we do not find spin 1 entries return right away\n if entry_ispin1 is None:\n return None\n\n projectors = self._extract_projectors(entry_ispin1, entry_ispin2)\n return projectors\n\n def _fetch_totensw(self, xml):\n \"\"\"Fetch the total energies\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n eigenvalues, occupancies : tupple\n An tupple of two ndarrays containing the eigenvalues\n for each spin, band and kpoint index.\n\n \"\"\"\n\n # fetch the energies for all electronic\n # steps, due to the fact that the number of steps is not the\n # same between each calculation we need to look at all the\n # children\n #\n # TODO: check in the future if it is faster to fetch all scstep\n # elements and then only how many scstep there is pr. calc\n # and sort from there\n #\n\n entries = self._findall(xml, './/calculation')\n\n if entries is None:\n return None\n\n # this most likely takes too long for very long fpmd calculations,\n # so consider putting in a flag that only extract the\n # energies from each step in the calculation and not the scsteps as\n # well\n energies = {}\n\n for index, calc in enumerate(entries):\n energies_pr_calc = {}\n for supported_energy, supported_key in _SUPPORTED_TOTAL_ENERGIES.items():\n data = self._findall(calc,\n './/scstep/energy/i[@name=\"' + supported_key + '\"]')\n if data is None:\n return None\n data = self._convert_array1D_f(data)\n energies_pr_calc[supported_energy] = data\n\n # now fetch the final entry outside the sc steps for each calculation\n # this term might have been corrected and scaled compared to the final sc energy\n # extrapolated energy\n data = self._findall(calc,\n './energy/i[@name=\"' + supported_key + '\"]')\n if data is None:\n return None\n data = self._convert_f(data[0])\n energies_pr_calc[supported_energy+'_final'] = data\n energies[index + 1] = energies_pr_calc\n\n return energies\n\n def _fetch_dosw(self, xml):\n \"\"\"Fetch the density of states.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n\n Returns\n -------\n dos, dos_specific : dicts\n Dictionaries with ndarrays containing the energies, total and\n integrated density of states for the regular and specific k-point grid, respectively.\n\n \"\"\"\n\n # fetch the Fermi level\n entry = self._find(xml, './/calculation/dos/i[@name=\"efermi\"]')\n\n if entry is not None:\n fermi_level = self._convert_f(entry)\n else:\n fermi_level = None\n\n # spin 1\n entry_total_ispin1 = self._findall(\n xml, './/calculation/dos/total/array/set/set[@comment=\"spin 1\"]/r')\n\n # spin 2\n entry_total_ispin2 = self._findall(\n xml, './/calculation/dos/total/array/set/set[@comment=\"spin 2\"]/r')\n\n # partial spin 1\n entry_partial_ispin1 = self._findall(\n xml,\n './/calculation/dos/partial/array/set/set/set[@comment=\"spin 1\"]/r'\n )\n\n # partial spin 2\n entry_partial_ispin2 = self._findall(\n xml,\n './/calculation/dos/partial/array/set/set/set[@comment=\"spin 2\"]/r'\n )\n\n # if no entries for spin 1, eject right away\n if entry_total_ispin1 is None:\n return None, None\n\n num_atoms = 0\n species = self._lattice['species']\n if species is None:\n # Try to fetch species again, if still none, we cannot parse it\n # and thus we cannot parse the entries in here either.\n species = self._fetch_speciesw(xml)\n if species is None:\n return None, None\n num_atoms = species.shape[0]\n\n dos = self._extract_dos(entry_total_ispin1, entry_total_ispin2,\n entry_partial_ispin1, entry_partial_ispin2,\n fermi_level, num_atoms)\n\n # Now extract the density of states for specific k-point grids\n # fetch the Fermi level again as this can be different\n entry = self._find(\n xml,\n './/calculation/dos[@comment=\"interpolated\"]/i[@name=\"efermi\"]')\n\n if entry is not None:\n fermi_level = self._convert_f(entry)\n else:\n fermi_level = None\n\n # spin 1\n entry_total_ispin1 = self._findall(\n xml,\n './/calculation/dos[@comment=\"interpolated\"]/total/array/set/set[@comment=\"spin 1\"]/r'\n )\n\n # spin 2\n entry_total_ispin2 = self._findall(\n xml,\n './/calculation/dos[@comment=\"interpolated\"]/total/array/set/set[@comment=\"spin 2\"]/r'\n )\n\n # partial spin 1\n entry_partial_ispin1 = self._findall(\n xml,\n './/calculation/dos[@comment=\"interpolated\"]/partial/array/set/set/set[@comment=\"spin 1\"]/r'\n )\n\n # partial spin 2\n entry_partial_ispin2 = self._findall(\n xml,\n './/calculation/dos[@comment=\"interpolated\"]/partial/array/set/set/set[@comment=\"spin 2\"]/r'\n )\n\n # if no entries for spin 1, eject right away\n if entry_total_ispin1 is None:\n return dos, None\n\n dos_specific = self._extract_dos(entry_total_ispin1,\n entry_total_ispin2,\n entry_partial_ispin1,\n entry_partial_ispin2, fermi_level,\n num_atoms)\n\n return dos, dos_specific\n\n def _extract_dos(self, entry_total_ispin1, entry_total_ispin2,\n entry_partial_ispin1, entry_partial_ispin2, fermi_level,\n num_atoms):\n \"\"\"Extract the density of states.\n\n Parameters\n ----------\n entry_total_spin1 : list\n A list containing ElementTree objects of the total density of states entry for\n spin channel 1.\n entry_total_spin2 : list\n A list containing ElementTree objects of the total density of states entry for\n spin channel 2.\n entry_total_spin1 : list\n A list containing ElementTree objects of the partial density of states entry for\n spin channel 1.\n entry_total_spin1 : list\n A list containing ElementTree objects of the partial density of states entry for\n spin_channel 2.\n fermi_level : float\n The Fermi level in eV.\n num_atoms : int\n The number of atoms.\n\n Returns\n -------\n dos : dict\n A dict of ndarrays containing the energies, total and\n integrated density of states.\n\n \"\"\"\n if entry_total_ispin2:\n dos = {}\n dos = {'up': None, 'down': None}\n dos_ispin = self._convert_array2D_f(entry_total_ispin1, 3)\n _dosup = {}\n _dosdown = {}\n enrgy = dos_ispin[:, 0]\n _dosup['total'] = dos_ispin[:, 1]\n _dosup['integrated'] = dos_ispin[:, 2]\n # check if partial exists\n if entry_partial_ispin1:\n dos_ispin = self._convert_array2D_f(entry_partial_ispin1, 10)\n # do not need the energy term (similar to total)\n _dosup['partial'] = np.asarray(\n np.split(dos_ispin[:, 1:10], num_atoms))\n else:\n _dosup['partial'] = None\n dos['up'] = _dosup\n dos_ispin = self._convert_array2D_f(entry_total_ispin2, 3)\n _dosdown['total'] = dos_ispin[:, 1]\n _dosdown['integrated'] = dos_ispin[:, 2]\n if entry_partial_ispin2:\n dos_ispin = self._convert_array2D_f(entry_partial_ispin2, 10)\n # do not need the energy term (similar to total)\n _dosdown['partial'] = np.asarray(\n np.split(dos_ispin[:, 1:10], num_atoms))\n else:\n _dosdown['partial'] = None\n dos['down'] = _dosdown\n dos['total'] = {'fermi_level': fermi_level, 'energy': enrgy}\n else:\n dos = {}\n dos = {'total': None}\n dos_ispin = self._convert_array2D_f(entry_total_ispin1, 3)\n _dos = {}\n _dos['energy'] = dos_ispin[:, 0]\n _dos['total'] = dos_ispin[:, 1]\n _dos['integrated'] = dos_ispin[:, 2]\n # check if partial exists\n if entry_partial_ispin1:\n dos_ispin = self._convert_array2D_f(entry_partial_ispin1, 10)\n # do not need the energy term (similar to total)\n _dos['partial'] = np.asarray(\n np.split(dos_ispin[:, 1:10], num_atoms))\n else:\n _dos['partial'] = None\n _dos['fermi_level'] = fermi_level\n dos['total'] = _dos\n\n return dos\n\n def _fetch_dielectricsw(self, xml, method='dft', transfer=None):\n \"\"\" Fetch the dielectric function from the VASP XML file\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n method : {'dft', 'qp', 'bse'}, optional\n What method was used to obtain the dielectric function. VASP\n uses different output between DFT, QP and BSE calculations\n (defaults to 'dft').\n transfer : {'density', 'current'}, optional\n Which dielectric function do you want? Density-density or\n current-current? Defaults to the density-density.\n\n Returns\n -------\n diel_imag : (N,6) list of list of float\n If `method` is 'dft'.\n The imaginary dielectric function for N energies for the\n xx, yy, zz, xy, yz and zx component, respectively.\n diel_real : (N,6) list of float\n If `method` is 'dft'.\n The real dielectric function for N energies for the\n xx, yy, zz, xy, yz and zx component, respectively.\n diel_imag_mac : (N,6) list of list of float\n If `method` is 'qp'.\n The imaginary part of the macroscopic dielectric function.\n See `diel_imag` for layout.\n diel_real_mac : (N,6) list of list of float\n If `method` is 'qp'.\n The real part of the polarized dielectric function.\n See `diel_imag` for layout.\n diel_imag_pol : (N,6) list of list of float\n If `method` is 'qp'.\n The imaginary part of the polarized dielectric function.\n See `diel_imag` for layout.\n diel_real_pol : (N,6) list of list of floa\n If `method` is 'qp'.\n The real part of the polarized dielectric function.\n See `diel_imag` for layout.\n diel_imag_invlfrpa : (N,6) list of list of float\n If `method` is 'qp'.\n The imaginary part of the inverse dielectric function with\n local field effects on the RPA level.\n See `diel_imag` for layout.\n diel_real_invlfrpa : (N,6) list of list of float\n If `method` is 'qp'.\n The real part of the inverse dielectric function with\n local field effects on the RPA level.\n See `diel_imag` for layout.\n diel_imag : (N,6) list of list of float\n If `method` is 'bse'.\n The imaginary part of the BSE dielectric function.\n See `diel_imag` above for layout.\n diel_real : (N,6) list of list of float\n If `method` is 'bse'.\n The real part of the BSE dielectric function.\n See `diel_imag` at the top for layout.\n epsilon : (3,3) list of list of float\n\n \"\"\"\n\n if method == 'dft':\n diel = {}\n if transfer == 'density':\n tag = 'dielectricfunction[@comment=\"density-density\"]'\n elif transfer == 'current':\n tag = 'dielectricfunction[@comment=\"current-current\"]'\n else:\n tag = 'dielectricfunction'\n\n # imaginary part\n entry = self._findall(\n xml, './/calculation/' + tag + '/imag/array/set/r')\n if entry is None:\n diel['imag'] = None\n diel['energy'] = None\n else:\n data = self._convert_array2D_f(entry, 7)\n diel['energy'] = data[:, 0]\n diel['imag'] = data[:, 1:7]\n\n # real part\n entry = self._findall(\n xml, './/calculation/' + tag + '/real/array/set/r')\n if entry is None:\n diel['real'] = None\n else:\n data = self._convert_array2D_f(entry, 7)\n diel['real'] = data[:, 1:7]\n\n # epsilon part\n entry = self._findall(xml,\n './/calculation/varray[@name=\"epsilon\"]/v')\n if entry is not None:\n diel['epsilon'] = self._convert_array2D_f(entry, 3)\n else:\n diel['epsilon'] = None\n\n # ionic epsilon part\n entry = self._findall(\n xml, './/calculation/varray[@name=\"epsilon_ion\"]/v')\n if entry is not None:\n diel['epsilon_ion'] = self._convert_array2D_f(entry, 3)\n else:\n diel['epsilon_ion'] = None\n\n return diel\n\n # if method == \"qp\":\n # try:\n # dielectric_xml = root.findall('dielectricfunction')\n # except AttributeError:\n # logger.error(\n # \"Did not find <dielectricfunction> tag in the current XML.\"\n # \"Exiting.\")\n # sys.exit(1)\n\n # # first head of macroscopic\n # diel_imag_xml = dielectric_xml[0].find(\n # 'imag').find('array').find('set')\n # diel_imag_mac = []\n # # first imag part\n # for energy in diel_imag_xml.iter('r'):\n # diel_imag_mac.append([float(x) for x in energy.text.split()])\n # diel_real_xml = dielectric_xml[0].find(\n # 'real').find('array').find('set')\n # diel_real_mac = []\n # # then real part\n # for energy in diel_real_xml.iter('r'):\n # diel_real_mac.append([float(x) for x in energy.text.split()])\n\n # # then polarized\n # diel_imag_xml = dielectric_xml[1].find(\n # 'imag').find('array').find('set')\n # diel_imag_pol = []\n # # first imag part\n # for energy in diel_imag_xml.iter('r'):\n # diel_imag_pol.append([float(x) for x in energy.text.split()])\n # diel_real_xml = dielectric_xml[1].find(\n # 'real').find('array').find('set')\n # diel_real_pol = []\n # # then real part\n # for energy in diel_real_xml.iter('r'):\n # diel_real_pol.append([float(x) for x in energy.text.split()])\n\n # # then inverse macroscopic (including local field)\n # diel_imag_xml = dielectric_xml[2].find(\n # 'imag').find('array').find('set')\n # diel_imag_invlfrpa = []\n # # first imag part\n # for energy in diel_imag_xml.iter('r'):\n # diel_imag_invlfrpa.append([float(x) for x in energy.text.split()])\n # diel_real_xml = dielectric_xml[2].find(\n # 'real').find('array').find('set')\n # diel_real_invlfrpa = []\n # # then real part\n # for energy in diel_real_xml.iter('r'):\n # diel_real_invlfrpa.append([float(x) for x in energy.text.split()])\n # return diel_imag_mac, diel_real_mac, diel_imag_pol, diel_real_pol, \\\n # diel_imag_invlfrpa, diel_real_invlfrpa\n\n # if method == \"bse\":\n # try:\n # dielectric_xml = root.find('dielectricfunction')\n # except AttributeError:\n # logger.error(\n # \"Did not find <dielectricfunction> tag in the current XML.\"\n # \"Exiting.\")\n # sys.exit(1)\n # diel_imag_xml = dielectric_xml.find('imag').find('array').find('set')\n # diel_imag = []\n # # first imag part\n # for energy in diel_imag_xml.iter('r'):\n # diel_imag.append([float(x) for x in energy.text.split()])\n # diel_real_xml = dielectric_xml.find('real').find('array').find('set')\n # diel_real = []\n # # then real part\n # for energy in diel_real_xml.iter('r'):\n # diel_real.append([float(x) for x in energy.text.split()])\n # return diel_imag, diel_real\n\n def _extract_eigenvalues(self, spin1, spin2, num_kpoints):\n \"\"\"Extract the eigenvalues.\n\n Parameters\n ----------\n spin1 : list\n A list of ElementTree object to be used for parsing of the\n ispin=1 entries.\n spin2 : list\n A list of ElementTree object to be used for parsing of the\n ispin=2 entries.\n num_kpoints : int\n The number of k-points to extract\n\n Returns\n -------\n eigenvalues, occupancies : tupple of dicts\n An tupple of two dicts containing ndarrays with the eigenvalues\n and occupancies for each band and kpoint index.\n\n \"\"\"\n\n # then check if we have asigned ispin\n if self._parameters['ispin'] is None:\n self._logger.error(self.ERROR_MESSAGES[self.ERROR_NO_ISPIN])\n sys.exit(self.ERROR_NO_ISPIN)\n\n # then check if we have asigned nbands\n if self._parameters['nbands'] is None:\n self._logger.error(self.ERROR_MESSAGES[self.ERROR_NO_NBANDS])\n sys.exit(self.ERROR_NO_NBADS)\n\n # ispin\n ispin = self._parameters['ispin']\n\n # number of bands\n num_bands = self._parameters['nbands']\n\n # set dicts\n eigenvalues = {}\n occupancies = {}\n\n data = []\n\n if len(spin1) != num_bands * num_kpoints:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_MISMATCH_KPOINTS_NBANDS])\n sys.exit(self.ERROR_MISMATCH_KPOINTS_NBANDS)\n\n # check number of elements in first entry of spin1 (we assume all are equal)\n entries = len(spin1[0].text.split())\n if entries > 1:\n data.append(self._convert_array2D_f(spin1, entries))\n else:\n data.append(self._convert_array1D_f(spin1))\n data[0] = np.asarray(np.split(data[0], num_kpoints))\n if spin2 is not None:\n if len(spin2) != num_bands * num_kpoints:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_MISMATCH_KPOINTS_NBANDS])\n sys.exit(self.ERROR_MISMATCH_KPOINTS_NBANDS)\n if entries > 1:\n data.append(self._convert_array2D_f(spin2, entries))\n else:\n data.append(self._convert_array1D_f(spin2))\n data[1] = np.asarray(np.split(data[1], num_kpoints))\n\n # convert to numpy arrays\n data = np.asarray(data)\n # swap axis if the band index should be before the kpoint index\n if not self._k_before_band:\n data = np.swapaxes(data, 1, 2)\n if spin2 is not None:\n if entries > 1:\n eigenvalues['up'] = np.ascontiguousarray(data[0, :, :, 0])\n eigenvalues['down'] = np.ascontiguousarray(data[1, :, :, 0])\n occupancies['up'] = np.ascontiguousarray(data[0, :, :, 1])\n occupancies['down'] = np.ascontiguousarray(data[1, :, :, 1])\n else:\n eigenvalues['up'] = np.ascontiguousarray(data[0, :, :])\n eigenvalues['down'] = np.ascontiguousarray(data[1, :, :])\n else:\n if entries > 1:\n eigenvalues['total'] = np.ascontiguousarray(data[0, :, :, 0])\n occupancies['total'] = np.ascontiguousarray(data[0, :, :, 1])\n else:\n eigenvalues['total'] = np.ascontiguousarray(data[0, :, :])\n if entries > 1:\n return eigenvalues, occupancies\n else:\n return eigenvalues, None\n\n def _extract_eigenvelocities(self, spin1, spin2, num_kpoints):\n \"\"\"Extract the eigenvalues and eigenvelocities.\n\n Parameters\n ----------\n spin1 : list\n A list of ElementTree object to be used for parsing of the\n ispin=1 entries.\n spin2 : list\n A list of ElementTree object to be used for parsing of the\n ispin=2 entries.\n num_kpoints : dict\n The number of k-point in the full BZ\n on which the eigenvelocities were extracted.\n\n Returns\n -------\n eigenvelocities : dict\n A dict containing ndarrays with the eigenvalues\n and eigenvelocities for each band and kpoint index.\n\n \"\"\"\n\n # check if we have asigned ispin\n if self._parameters['ispin'] is None:\n self._logger.error(self.ERROR_MESSAGES[self.ERROR_NO_ISPIN])\n sys.exit(self.ERROR_NO_ISPIN)\n\n # then check if we have asigned nbands\n if self._parameters['nbands'] is None:\n self._logger.error(self.ERROR_MESSAGES[self.ERROR_NO_NBANDS])\n sys.exit(self.ERROR_NO_NBANDS)\n\n # ispin\n ispin = self._parameters['ispin']\n\n # number of bands\n num_bands = self._parameters['nbands']\n\n # set dicts\n eigenvelocities = {}\n\n data = []\n if len(spin1) != num_bands * num_kpoints:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_MISMATCH_KPOINTS_NBANDS])\n sys.exit(self.ERROR_MISMATCH_KPOINTS_NBANDS)\n data.append(self._convert_array2D_f(spin1, 4))\n data[0] = np.asarray(np.split(data[0], num_kpoints))\n if spin2 is not None:\n if len(spin2) != num_bands * num_kpoints:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_MISMATCH_KPOINTS_NBANDS])\n sys.exit(self.ERROR_MISMATCH_KPOINTS_NBANDS)\n data.append(self._convert_array2D_f(spin2, 4))\n data[1] = np.asarray(np.split(data[1], num_kpoints))\n\n # convert to numpy arrays\n data = np.asarray(data)\n # swap axis if the band index should be before the kpoint index\n if not self._k_before_band:\n data = np.swapaxes(data, 1, 2)\n if spin2 is not None:\n eigenvelocities['up'] = np.ascontiguousarray(data[0])\n eigenvelocities['down'] = np.ascontiguousarray(data[1])\n else:\n eigenvelocities['total'] = np.ascontiguousarray(data[0])\n\n return eigenvelocities\n\n def _extract_projectors(self, spin1, spin2):\n \"\"\"Extract the projectors.\n\n Parameters\n ----------\n spin1 : list\n A list of ElementTree object to be used for parsing of the\n ispin=1 entries. Contains the projectors.\n spin2 : list\n A list of ElementTree object to be used for parsing of the\n ispin=2 entries. Contains the projectors.\n\n\n Returns\n -------\n projectors : dict\n A dict containing ndarrays with the projectors for each atom,\n band and kpoint index.\n\n \"\"\"\n\n # first check if we have extracted the kpoints\n if self._lattice['kpoints'] is None:\n self._logger.error(self.ERROR_MESSAGES[self.ERROR_NO_KPOINTS])\n sys.exit(self.ERROR_NO_KPOINTS)\n\n # then check if we have asigned ispin\n if self._parameters['ispin'] is None:\n self._logger.error(self.ERROR_MESSAGES[self.ERROR_NO_ISPIN])\n sys.exit(self.ERROR_NO_ISPIN)\n\n # then check if we have asigned nbands\n if self._parameters['nbands'] is None:\n self._logger.error(self.ERROR_MESSAGES[self.ERROR_NO_NBANDS])\n sys.exit(self.ERROR_NO_NBANDS)\n\n num_atoms = 0\n # also need the number of atoms if the projected values are supplied\n if self._lattice['species'] is None:\n self._logger.error(self.ERROR_MESSAGES[self.ERROR_NO_NBANDS])\n sys.exit(self.ERROR_NO_NBANDS)\n else:\n num_atoms = self._lattice['species'].shape[0]\n\n # number of kpoints to disect the eigenvalue sets later\n num_kpoints = self._lattice['kpoints'].shape[0]\n\n # ispin\n ispin = self._parameters['ispin']\n\n # number of bands\n num_bands = self._parameters['nbands']\n\n # set dicts\n projectors = {}\n\n pdata = []\n if len(spin1) != num_bands * num_kpoints * num_atoms:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_MISMATCH_KPOINTS_NBANDS])\n sys.exit(self.ERROR_MISMATCH_KPOINTS_NBANDS)\n pdata.append(self._convert_array2D_f(spin1, 9))\n pdata[0] = np.asarray(np.split(pdata[0], num_kpoints))\n pdata[0] = np.asarray(np.split(pdata[0], num_bands, axis=1))\n if spin2 is not None:\n if len(spin2) != num_bands * num_kpoints * num_atoms:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_MISMATCH_KPOINTS_NBANDS])\n sys.exit(self.ERROR_MISMATCH_KPOINTS_NBANDS)\n pdata.append(self._convert_array2D_f(spin2, 9))\n pdata[1] = np.asarray(np.split(pdata[1], num_kpoints))\n pdata[1] = np.asarray(np.split(pdata[1], num_bands, axis=1))\n\n # convert to numpy arrays\n pdata = np.asarray(pdata)\n # swap axis if the band index should be before the kpoint index\n # make sure atomic index is first\n pdata = np.swapaxes(pdata, 0, 3)\n pdata = np.swapaxes(pdata, 1, 3)\n if not self._k_before_band:\n pdata = np.swapaxes(pdata, 2, 3)\n\n if spin2 is not None:\n projectors['up'] = np.ascontiguousarray(pdata[:, 0, :, :])\n projectors['down'] = np.ascontiguousarray(pdata[:, 1, :, :])\n else:\n projectors['total'] = np.ascontiguousarray(pdata[:, 0, :, :])\n\n return projectors\n\n def _convert_array_i(self, entry):\n \"\"\"Convert the input entry to numpy array\n\n Parameters\n ----------\n entry : string\n A string containing N integer elements separated by\n blank spaces.\n\n Returns\n -------\n data : ndarray\n | Dimension: (N)\n An array containing N integers.\n\n \"\"\"\n\n data = None\n if entry is not None:\n try:\n data = np.fromstring(entry.text, sep=' ', dtype='intc')\n except ValueError as e:\n if str(e) == 'setting an array element with a sequence.':\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_OVERFLOW])\n sys.exit(self.ERROR_OVERFLOW)\n\n return data\n\n def _convert_array_f(self, entry):\n \"\"\"Convert the input entry to numpy array\n\n Parameters\n ----------\n entry : string\n A string containing N float elements separated by\n blank spaces.\n\n Returns\n -------\n data : ndarray\n | Dimension: (N)\n An array containing N floats.\n\n \"\"\"\n\n data = None\n if entry is not None:\n try:\n data = np.fromstring(entry.text, sep=' ', dtype='double')\n except ValueError as e:\n if str(e) == 'setting an array element with a sequence.':\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_OVERFLOW])\n sys.exit(self.ERROR_OVERFLOW)\n\n return data\n\n def _convert_array1D_i(self, entry):\n \"\"\"Convert the input entry to numpy array\n\n Parameters\n ----------\n entry : list\n A list containing Element objects where each\n element is an integer\n\n Returns\n -------\n data : ndarray\n | Dimension: (N)\n An array containing N integers.\n\n \"\"\"\n\n data = None\n if entry is not None:\n data = np.zeros(len(entry), dtype='intc')\n for index, element in enumerate(entry):\n try:\n data[index] = np.fromstring(element.text, sep=' ')\n except ValueError as e:\n if str(e) == 'setting an array element with a sequence.':\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_OVERFLOW])\n sys.exit(self.ERROR_OVERFLOW)\n\n return data\n\n def _convert_array1D_f(self, entry):\n \"\"\"Convert the input entry to numpy array\n\n Parameters\n ----------\n entry : list\n A list containing Element objects where each\n element is a float\n\n Returns\n -------\n data : ndarray\n | Dimension: (N)\n An array containing N double elements.\n\n \"\"\"\n\n data = None\n\n if entry is not None:\n data = np.zeros(len(entry), dtype='double')\n for index, element in enumerate(entry):\n try:\n data[index] = np.fromstring(element.text, sep=' ')\n except ValueError as e:\n if str(e) == 'setting an array element with a sequence.':\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_OVERFLOW])\n sys.exit(self.ERROR_OVERFLOW)\n\n return data\n\n def _convert_array2D_f(self, entry, dim):\n \"\"\"Convert the input entry to numpy array\n\n Parameters\n ----------\n entry : list\n A list containing Element objects where each\n element is a float\n dim : int\n The dimension of the second index.\n\n Returns\n -------\n data : ndarray\n | Dimension: (N,M)\n An array containing N elements with M float\n elements.\n\n \"\"\"\n\n data = None\n if entry is not None:\n data = np.zeros((len(entry), dim), dtype='double')\n\n for index, element in enumerate(entry):\n try:\n data[index] = np.fromstring(element.text, sep=' ')\n except ValueError as e:\n if str(e) == 'setting an array element with a sequence.':\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_OVERFLOW])\n sys.exit(self.ERROR_OVERFLOW)\n\n return data\n\n def _convert_f(self, entry):\n \"\"\"Convert the input entry to a float.\n\n Parameters\n ----------\n entry : object\n An Element object containing an integer value.\n\n Returns\n -------\n data : float\n The float value.\n\n \"\"\"\n\n data = None\n if entry.text is not None:\n if '****' in entry.text:\n self._logger.error(self.ERROR_MESSAGES[self.ERROR_OVERFLOW])\n sys.exit(self.ERROR_OVERFLOW)\n data = float(entry.text)\n\n return data\n\n def _convert_i(self, entry):\n \"\"\"Convert the input entry to an integer.\n\n Parameters\n ----------\n entry : object\n An Element object containing an integer value.\n\n Returns\n -------\n data : int\n The integer value.\n\n \"\"\"\n\n data = None\n if entry.text is not None:\n if '****' in entry.text:\n self._logger.error(self.ERROR_MESSAGES[self.ERROR_OVERFLOW])\n sys.exit(self.ERROR_OVERFLOW)\n data = int(entry.text)\n\n return data\n\n def _convert_species(self, entry):\n \"\"\"Set the atomic species to the correct value\n\n Parameters\n ----------\n entry : list\n A list containing Element objects, where each\n element is one atomic species.\n\n Returns\n -------\n unitcell : ndarray\n | Dimension: (N,3)\n An array containing the positions of N atoms in\n direct units.\n\n \"\"\"\n\n species = None\n if entry is not None:\n species = np.zeros(len(entry), dtype='intc')\n for index, spec in enumerate(entry):\n try:\n species[index] = constants.elements[entry[index].text.split()\n [0].lower()]\n except KeyError:\n self._logger.warning(\n self.ERROR_MESSAGES[self.ERROR_UNKNOWN_ELEMENT])\n sys.exit(self.ERROR_UNKNOWN_ELEMENT)\n\n return species\n\n def get_forces(self, status):\n\n forces = self._data['forces']\n if forces is None:\n return None\n self._check_calc_status(status)\n if status == 'initial':\n return forces[1]\n elif status == 'last':\n largest_key = max(self._data['forces'].keys())\n return forces[largest_key]\n elif status == 'all':\n return forces\n\n def get_stress(self, status):\n\n stress = self._data['stress']\n if stress is None:\n return None\n self._check_calc_status(status)\n if status == 'initial':\n return stress[1]\n elif status == 'last':\n largest_key = max(self._data['stress'].keys())\n return stress[largest_key]\n elif status == 'all':\n return stress\n\n def get_hessian(self):\n\n hessian = self._data['hessian']\n return hessian\n\n def get_dynmat(self):\n\n dynmat = self._data['dynmat']\n return dynmat\n\n def get_dielectrics(self):\n\n dielectrics = self._data['dielectrics']\n return dielectrics\n\n def get_epsilon(self):\n\n epsilon = self._data['dielectrics']['epsilon']\n return epsilon\n\n def get_epsilon_ion(self):\n\n epsilon_ion = self._data['dielectrics']['epsilon_ion']\n return epsilon_ion\n\n def get_fermi_level(self):\n\n fermi_level = self._data['dos']['total']['fermi_level']\n return fermi_level\n\n def get_fermi_level_specific(self):\n\n fermi_level_specific = self._data['dos_specific']['total'][\n 'fermi_level']\n return fermi_level_specific\n\n def get_born(self):\n\n born = self._data['born']\n return born\n\n def get_unitcell(self, status):\n\n unitcell = self._lattice['unitcell']\n if unitcell is None:\n return None\n self._check_calc_status(status)\n if status == 'initial':\n return unitcell[1]\n elif status == 'last':\n largest_key = max(self._lattice['unitcell'].keys())\n return unitcell[largest_key]\n elif status == 'all':\n return unitcell\n\n def get_positions(self, status):\n\n positions = self._lattice['positions']\n if positions is None:\n return None\n self._check_calc_status(status)\n if status == 'initial':\n return positions[1]\n elif status == 'last':\n largest_key = max(self._lattice['positions'].keys())\n return positions[largest_key]\n elif status == 'all':\n return positions\n\n def get_species(self):\n\n species = self._lattice['species']\n return species\n\n def get_lattice(self, status):\n\n species = self.get_species()\n unitcell = self.get_unitcell(status)\n positions = self.get_positions(status)\n return {\n 'unitcell': unitcell,\n 'positions': positions,\n 'species': species\n }\n\n def get_kpoints(self):\n\n kpoints = self._lattice['kpoints']\n return kpoints\n\n def get_kpointsw(self):\n\n kpointsw = self._lattice['kpointsw']\n return kpointsw\n\n def get_energies(self, status, etype=None, nosc=True):\n\n if etype is None:\n etype = ['energy_extrapolated']\n # Check if the supplied etype is in the support list\n for item in etype:\n if item not in _SUPPORTED_TOTAL_ENERGIES.keys():\n raise ValueError(f'The supplied total energy type: {item} is not supported.')\n\n return self._get_energies(status, etype, nosc)\n\n def _get_energies(self, status, etype, nosc):\n\n enrgies = self._data['totens']\n if enrgies is None:\n return None\n self._check_calc_status(status)\n energies = {}\n # We can have different number of electronic steps for each ionic step, so\n # the array would be staggered. In order to save on storage and still utilize\n # regular NumPy array functions, we flatten the array and store a separate array\n # that keeps track of the number of electronic steps per ionic step.\n for item in etype:\n # For the energies inside the electronic step sections.\n energies_per_etype = np.array([])\n # For the final energy available inside the calculations (ionic steps) sections after closure\n # of the electronic steps and applying corrections.\n energy_per_etype = np.array([])\n electronic_steps = np.array([], dtype=int)\n steps = 1\n if status == 'initial':\n # Initial ionic step\n energy_per_etype = np.append(energy_per_etype, enrgies[1][item+'_final'])\n if nosc:\n e = enrgies[1][item][-1]\n else:\n e = enrgies[1][item]\n steps = len(e)\n energies_per_etype = np.append(energies_per_etype, e)\n electronic_steps = np.append(electronic_steps, steps)\n elif status == 'last':\n # Last ionic step\n largest_key = max(enrgies.keys())\n energy_per_etype = np.append(energy_per_etype, enrgies[largest_key][item+'_final'])\n if nosc:\n e = enrgies[largest_key][item][-1]\n else:\n e = enrgies[largest_key][item]\n steps = len(e)\n energies_per_etype = np.append(energies_per_etype, e)\n electronic_steps = np.append(electronic_steps, steps)\n elif status == 'all':\n # For all the ionic steps\n _energies_per_etype = sorted(enrgies.items())\n for index, element in _energies_per_etype:\n energy_per_etype = np.append(energy_per_etype, element[item+'_final'])\n if nosc:\n e = element[item][-1]\n else:\n e = element[item]\n steps = len(e)\n energies_per_etype = np.append(energies_per_etype, e)\n electronic_steps = np.append(electronic_steps, steps)\n energies[item+'_final'] = energy_per_etype\n energies[item] = energies_per_etype\n\n energies['electronic_steps'] = electronic_steps\n\n return energies\n\n def get_dos(self):\n\n dos = self._data['dos']\n return dos\n\n def get_dos_specific(self):\n\n dos_specific = self._data['dos_specific']\n return dos_specific\n\n def get_eigenvalues(self):\n\n eigenvalues = self._data['eigenvalues']\n return eigenvalues\n\n def get_eigenvalues_specific(self):\n\n eigenvalues_specific = self._data['eigenvalues_specific']\n return eigenvalues_specific\n\n def get_eigenvelocities(self):\n\n eigenvelocities = self._data['eigenvelocities']\n return eigenvelocities\n\n def get_kpoints_specific(self):\n\n kpoints_specific = self._data['kpoints']\n return kpoints_specific\n\n def get_kpointsw_specific(self):\n\n kpointsw_specific = self._data['kpointsw']\n return kpointsw_specific\n\n def get_occupancies(self):\n\n occupancies = self._data['occupancies']\n return occupancies\n\n def get_projectors(self):\n\n projectors = self._data['projectors']\n return projectors\n\n def get_dict(self):\n\n dictionary = {\n 'parameters': self._parameters,\n 'lattice': self._lattice,\n 'data': self._data\n }\n\n return dictionary\n\n def get_parameters(self):\n\n parameters = self._parameters\n return parameters\n\n def get_version(self):\n from re import compile\n\n version = self._version.strip()\n # The version entry in the xml file is typically of the form\n # X.Y.Z or X.Y.Z.something so only keep three integers and two dots\n pattern = r'(\\d+)\\.(\\d+)\\.(\\d+)'\n match = compile(pattern)\n\n return match.search(version).group(0)\n \n def _check_calc_status(self, status):\n allowed_entries = ['initial', 'last', 'all']\n if status not in allowed_entries:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_UNSUPPORTED_STATUS] +\n ' Please use any of the following values ' +\n str(allowed_entries))\n sys.exit(self.ERROR_UNSUPPORTED_STATUS)\n\n def _find(self, xml, locator):\n \"\"\"Wrapper to check if the request returns something.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n locator : string\n The locator string to try.\n\n Returns\n -------\n entry : object\n An Element object if something is found, otherwise it\n returns None.\n\n \"\"\"\n\n entry = xml.find(locator)\n\n if entry is None:\n return None\n else:\n return entry\n\n def _findall(self, xml, locator):\n \"\"\"Wrapper to check if the request returns something.\n\n Parameters\n ----------\n xml : object\n An ElementTree object to be used for parsing.\n locator : string\n The locator string to try.\n\n Returns\n -------\n entry : object\n An Element object if something is found, otherwise it\n returns None.\n\n \"\"\"\n\n entry = xml.findall(locator)\n\n if not entry:\n return None\n else:\n return entry\n\n def _file_size(self):\n \"\"\"Returns the file size of a file.\n\n Returns\n -------\n The file size in megabytes.\n\n \"\"\"\n\n if self._file_path is None and self._file_handler is None:\n self._logger.error(\n self.ERROR_MESSAGES[self.ERROR_ONLY_ONE_ARGUMENT])\n return None\n if self._file_handler is None:\n # check if file exists\n if not utils.file_exists(self._file_path, logger=self._logger):\n self._logger.error(self.ERROR_MESSAGES[self.ERROR_NO_SIZE])\n return None\n\n file_size = os.stat(self._file_path).st_size\n else:\n file_size = os.fstat(self._file_handler.fileno()).st_size\n\n return file_size / 1048576.0\n\n def _check_xml(self):\n \"\"\"Do a primitive check of XML file to see if it is\n truncated.\n\n Returns\n -------\n bool\n True if xml is truncated, False otherwise.\n\n \"\"\"\n\n if self._file_handler is not None:\n handler = self._file_handler\n mapping = mmap.mmap(handler.fileno(), 0, prot=mmap.PROT_READ)\n else:\n handler = open(self._file_path)\n with handler as source:\n mapping = mmap.mmap(source.fileno(), 0, prot=mmap.PROT_READ)\n\n last_line = mapping[mapping.rfind(b'\\n', 0, -1) + 1:]\n if last_line == '</modeling>\\n':\n return False\n else:\n return True\n"
] |
[
[
"numpy.array",
"numpy.asarray",
"numpy.ascontiguousarray",
"numpy.split",
"numpy.swapaxes",
"numpy.append",
"numpy.fromstring"
]
] |
pdxgx/immunorx_response_pipeline
|
[
"5a3faa72370e96545b46caa790090d022eaa5ece"
] |
[
"scripts/process_netctlpan_results.py"
] |
[
"#!/usr/bin/env python\n\nfrom __future__ import print_function\nfrom collections import defaultdict\nfrom numpy import median\nimport argparse\nimport glob\nimport os\nimport pickle\n\nif __name__ == \"__main__\":\n\n\t# Parse command line options\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('-m', '--manifest', type=str, required=True,\n\t\t\t\t\t\thelp='path to tumor-normal pair manifest'\n\t)\n\tparser.add_argument('-i', '--input-dir', type=str, required=True,\n\t\t\t\t\t\thelp='path to input directory with neCTLpan result dictionaries'\n\t)\n\tparser.add_argument('-o', '--output-file', type=str, required=True,\n\t\t\t\t\t\thelp='path to output file'\n\t)\n\targs = parser.parse_args()\n\n\t# Iterate through manifest to process scores for each sample\n\ttumor_dict = defaultdict(int)\n\twith open(os.path.abspath(args.manifest)) as f:\n\t\tfor line in f:\n\t\t\ttokens = line.strip().split('\\t')\n\t\t\t# Grab all dictionaries for the sample\n\t\t\tdict_wildcard = os.path.join(\n\t\t\t\t\t\t\t\t\t\t\tos.path.abspath(args.input_dir), \n\t\t\t\t\t\t\t\t\t\t\t'.'.join([tokens[0], tokens[2], '*', 'pickle'])\n\t\t\t)\n\t\t\tdictionaries = glob.glob(dict_wildcard)\n\t\t\t# Process scores to find best rank for each epitope\n\t\t\tscore_dict = defaultdict(lambda:100.0)\n\t\t\tfor dic in dictionaries:\n\t\t\t\twith open(dic, 'rb') as p:\n\t\t\t\t\td = pickle.load(p)\n\t\t\t\tfor entry in d:\n\t\t\t\t\tfor score_set in d[entry]:\n\t\t\t\t\t\trank = float(score_set[4])\n\t\t\t\t\t\t# Score retained is minumum rank for each peptide\n\t\t\t\t\t\tscore_dict[entry[1]] = min(rank, score_dict[entry[1]])\n\t\t\tfor peptide in score_dict:\n\t\t\t\tif score_dict[peptide] < 1:\n\t\t\t\t\ttumor_dict[(tokens[0], tokens[2])] += 1\n\n\t# Get median values for multi-sample patients\n\tmultisample = set([x[0] for x in tumor_dict if len([y for y in tumor_dict if x[0] in y]) > 1])\n\tfor patient in multisample:\n\t\t# Extract relevant keys/entries\n\t\ttry:\n\t\t\t# Separate patients from Roh/Amaria cohorts (#s as pat. IDs)\n\t\t\tp = int(patient)\n\t\t\trelevant_keys = [x for x in tumor_dict if x[0] == patient and (\n\t\t\t\t x[1][-1] in ['A', 'B', 'C', 'D', 'E'] or x[1] in [''.join([patient, 'D1']), ''.join([patient, 'D2'])]\n\t\t\t\t)\n\t\t\t]\n\t\t\tif len(relevant_keys) == 1:\n\t\t\t\tcontinue\n\t\texcept ValueError:\n\t\t\trelevant_keys = [x for x in tumor_dict if x[0] == patient]\n\t\trelevant_entries = [tumor_dict[x] for x in relevant_keys]\n\t\t# Set up new combined key/entry\n\t\tnew_key = (patient, ';'.join(sorted([x[1] for x in relevant_keys])))\n\t\tnew_entry = median(relevant_entries)\n\t\ttumor_dict[new_key] = new_entry\n\t\tfor key in relevant_keys:\n\t\t\tdel tumor_dict[key]\n\n\t# Write output file\n\twith open(os.path.abspath(args.output_file), 'w') as f:\n\t\theader = ['Patient', 'Tumor_ID', 'NetCTLpan_epitopes']\n\t\tprint('\\t'.join(header), file=f)\n\t\tfor patient in tumor_dict:\n\t\t\tout_line = [patient[0], patient[1], str(tumor_dict[patient])]\n\t\t\tprint('\\t'.join(out_line), file=f)\n\n"
] |
[
[
"numpy.median"
]
] |
MW55/ChimeraMATE
|
[
"0af276fb7c89f529b18348d52d3738bcf84d671e"
] |
[
"setup.py"
] |
[
"from distutils.core import setup\nfrom Cython.Build import cythonize\nimport numpy\n\nsetup(ext_modules = cythonize('chimeramate_main.pyx'),include_dirs=[numpy.get_include()])\n"
] |
[
[
"numpy.get_include"
]
] |
travc/ddtool
|
[
"33ef02afb4ebf62823f285d1dc6ead125f39bf54"
] |
[
"temps2daily_gui.py"
] |
[
"#!/usr/bin/env python3\nimport os\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter.scrolledtext import ScrolledText\nimport tkinter.filedialog\nfrom multicolumn_listbox import Multicolumn_Listbox\n\nimport numpy as np\nimport pandas as pd\nfrom collections import OrderedDict as ordereddict\n\nclass App(ttk.Frame):\n def __init__(self, parent, *args, **kwargs):\n ttk.Frame.__init__(self, parent, *args, **kwargs)\n self.root = parent\n self.tfiles = ordereddict()\n self.stations = []\n self.selected_files_strvar = tk.StringVar()\n self.root.title(\"DD Tool\")\n self.pack(fill=tk.BOTH, expand=1)\n\n ## Temperature files area\n foo = ttk.Label(self, text='Temperature Data Files', font='fixed 14 bold')\n foo.pack(fill=tk.X, expand=0, padx=3, pady=3)\n\n # multicolumnlisbox\n # Frame, for scrollbar placement\n mcf = ttk.Frame(self)\n mcf.pack(fill=tk.BOTH, expand=1, side=tk.TOP, padx=0, pady=0)\n # multicolumn listbox widget\n self.mc = Multicolumn_Listbox(mcf, [\"station\",\n \"first date\",\n \"last date\",\n \"number\",\n \"filename\"],\n stripped_rows = (\"white\",\"#f2f2f2\"),\n command=self._on_select,\n adjust_heading_to_content=True,\n cell_anchor=\"center\")\n # scrollbars\n ysb = ttk.Scrollbar(mcf, orient='vertical', command=self.mc.interior.yview)\n self.mc.interior.configure(yscrollcommand=ysb.set)\n ysb.pack(fill=tk.BOTH, expand=0, side=tk.RIGHT)\n xsb = ttk.Scrollbar(mcf, orient='horizontal', command=self.mc.interior.xview)\n self.mc.interior.configure(xscrollcommand=xsb.set)\n xsb.pack(fill=tk.BOTH, expand=0, side=tk.BOTTOM)\n # place\n self.mc.interior.pack(fill=tk.BOTH, expand=1, side=tk.TOP, padx=0, pady=0)\n self.mc.fit_width_to_content()\n\n # buttons\n mcbf = ttk.Frame(self)\n mcbf.pack(fill=tk.BOTH, expand=0, side=tk.TOP, padx=0, pady=0)\n remove_selected_files_button = ttk.Button(mcbf, text='Remove Files', command=self._remove_selected_files)\n remove_selected_files_button.pack(expand=0, side=tk.RIGHT, padx=3, pady=3)\n open_button = ttk.Button(mcbf, text='Add Files', command=self._selectFiles)\n open_button.pack(expand=0, side=tk.RIGHT, padx=3, pady=3)\n sort_button = ttk.Button(mcbf, text='Sort', command=self.sort_tfiles)\n sort_button.pack(expand=0, side=tk.LEFT, padx=3, pady=3)\n\n ## Station priority \n self.station_priority_frame = ttk.LabelFrame(self, text='Station Priority')\n self.station_priority_frame.pack(side=tk.TOP, fill=tk.BOTH, expand=0, padx=3, pady=3)\n self.stations_priority_lbs = []\n \n def update_stations(self):\n tmp = [ x[1]['station'] for x in self.tfiles.items() ]\n stations = []\n for x in tmp:\n if x not in stations:\n stations.append(x)\n print(stations)\n # @TCC should preserve selections already made if possible\n self.stations = stations\n\n for lb in self.stations_priority_lbs:\n lb.destroy()\n self.stations_priority_lbs = [] \n for i,station in enumerate(self.stations):\n lb = ttk.Combobox(self.station_priority_frame, state=\"readonly\",\n values=self.stations[i:],\n exportselection=0)\n lb.set(self.stations[i])\n lb.pack(side=tk.LEFT, padx=3, pady=3)\n self.stations_priority_lbs.append(lb)\n\n \n def _selectFiles(self):\n selected_files = tk.filedialog.askopenfilenames(\n parent=self.root,\n title='Choose Temperature Files',\n filetypes=((\"CSV files\",\"*.csv\"),(\"all files\",\"*.*\"))) \n self.update_selected_files(selected_files, replace=False)\n \n def update_tfiles_listbox(self):\n # update the multicolumn_listbox\n self.selected_files_strvar.set(str(self.tfiles.keys()))\n self.mc.clear()\n for fn, tfile in self.tfiles.items():\n # note: filename is assumed to be the last element by _remove_selected_files\n self.mc.insert_row([tfile['station'], \n tfile['df'].index[0],\n tfile['df'].index[-1],\n tfile['df'].shape[0],\n fn])#, index=self.mc.number_of_rows)\n self.mc.fit_width_to_content()\n\n def _on_select(self, data):\n # called when a multicolumn_listbox row is selected\n pass\n\n def _remove_selected_files(self):\n for row in self.mc.selected_rows:\n del self.tfiles[row[-1]]\n self.mc.delete_all_selected_rows()\n \n def update_selected_files(self, selected_files, replace=False):\n if replace:\n self.tfiles = ordereddict()\n for i,fn in enumerate(selected_files):\n if fn not in self.tfiles:\n print(\"Loading\", fn)\n df = pd.read_csv(fn, parse_dates=['Date']).dropna()\n tcol = [x for x in df.columns if x.startswith('Temperature ')]\n if len(tcol) < 1:\n print(\"ERROR: Temperature column not found\", file=sts.stderr)\n else:\n tmp = [x.strip() for x in tcol[0].split(',')]\n station = tmp[-1] \n t = df.loc[:,['Date',tcol[0]]]\n t.set_index('Date', inplace=True)\n t.columns = ['temperature']\n t.sort_index(inplace=True)\n #t['station'] = station\n first = t.index[0]\n last = t.index[-1]\n self.tfiles[fn] = dict()\n self.tfiles[fn]['df'] = t \n self.tfiles[fn]['station'] = station\n self.tfiles[fn]['tcol'] = tcol[0]\n self.sort_tfiles()\n self.update_stations()\n\n def sort_tfiles(self):\n # sort by station, first date, last date\n self.tfiles = ordereddict(sorted(self.tfiles.items(), \n key=lambda x: (x[1]['df'].index[-1], \n x[1]['df'].index[0],\n x[1]['station'])))\n print(*list(self.tfiles.keys()), sep='\\n')\n self.update_tfiles_listbox()\n\n\nif __name__ == '__main__':\n root = tk.Tk()\n #root.geometry(\"800x600\")\n app = App(root)\n root.mainloop()\n\n"
] |
[
[
"pandas.read_csv"
]
] |
Nintendofan885/wiki-detox
|
[
"571a3e4ae13ecf7a564a01bcf16507f0a87351a4"
] |
[
"src/analysis/load_utils.py"
] |
[
"import os\nimport pandas as pd\nimport re\n\n\ndef load_diffs(keep_diff = False):\n \n nick_map = {\n 'talk_diff_no_admin_sample.tsv': 'sample',\n 'talk_diff_no_admin_2015.tsv': '2015',\n 'all_blocked_user.tsv': 'blocked',\n 'd_annotated.tsv': 'annotated',\n }\n\n base = '../../data/samples/'\n nss = ['user', 'article']\n\n samples = [ \n 'talk_diff_no_admin_sample.tsv',\n 'talk_diff_no_admin_2015.tsv',\n 'all_blocked_user.tsv',\n 'd_annotated.tsv'\n ]\n\n d ={}\n for s in samples:\n dfs = []\n for ns in nss:\n inf = os.path.join(base, ns, 'scored', s)\n df = pd.read_csv(inf, sep = '\\t')\n if not keep_diff:\n del df['clean_diff']\n df['ns'] = ns\n dfs.append(df)\n d[nick_map[s]] = augment(pd.concat(dfs))\n\n \n d['blocked']['blocked'] = 1\n\n return d\n\ndef is_ip(x):\n pattern = r\"^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\"\n return re.match(pattern,str(x)) is not None\n\n\ndef augment(df):\n df['author_anon'] = df['user_id'].isnull()\n df['recipient_anon'] = df['page_title'].apply(is_ip)\n df['rev_timestamp'] = pd.to_datetime(df['rev_timestamp'])\n df['year'] = df['rev_timestamp'].apply(lambda x: x.year)\n df['month'] = df['rev_timestamp'].apply(lambda x: x.month)\n df['hour'] = df['rev_timestamp'].apply(lambda x: x.hour)\n df['own_page'] = df['user_text'] == df['page_title']\n return df\n\n\n\ndef load_block_events_and_users():\n \n df_events = pd.read_csv('../../data/block_events.tsv', sep = '\\t')\\\n .rename(columns= lambda x: x.split('.')[1])\\\n .assign(timestamp= lambda x: pd.to_datetime(x.timestamp),\n anon = lambda x: x.user_text.apply(is_ip))\n \n \n df_events['year'] = df_events['timestamp'].apply(lambda x: x.year)\n df_events['month'] = df_events['timestamp'].apply(lambda x: x.month)\n df_events['hour'] = df_events['timestamp'].apply(lambda x: x.hour)\n\n df_blocked_user_text = df_events[['user_text']]\\\n .drop_duplicates()\\\n .assign(blocked = 1) \n\n return df_events, df_blocked_user_text\n\n\ndef load_warn_events_and_users():\n \n df_events = pd.read_csv('../../data/npa_warnings.tsv', sep = '\\t')\\\n .rename(columns= lambda x: x.split('.')[1])\\\n .assign(timestamp= lambda x: pd.to_datetime(x.warning_timestamp),\n anon = lambda x: x.attacker_user_text.apply(is_ip),\n user_text = lambda x: x.attacker_user_text)\n \n \n df_events['year'] = df_events['timestamp'].apply(lambda x: x.year)\n df_events['month'] = df_events['timestamp'].apply(lambda x: x.month)\n df_events['hour'] = df_events['timestamp'].apply(lambda x: x.hour)\n\n df_blocked_user_text = df_events[['user_text']]\\\n .drop_duplicates()\\\n .assign(blocked = 1) \n\n return df_events, df_blocked_user_text\n\n\n"
] |
[
[
"pandas.to_datetime",
"pandas.read_csv",
"pandas.concat"
]
] |
anisotropi4/goldfinch
|
[
"eb0f3c76f193e548bbd3b37f3223002431f1cc92"
] |
[
"xl2tsv/xl2ndjson.py"
] |
[
"#!/usr/bin/env python3\n\nimport pandas as pd\nimport argparse\nimport os\nimport sys\n\nparser = argparse.ArgumentParser(description='Dump xls(x) files tab(s) to .tsv files, to the (default output) path')\n\nparser.add_argument('inputfiles', type=str, nargs='*', help='name of xls-file to process')\n\ntabgroup = parser.add_mutually_exclusive_group()\n\ntabgroup.add_argument('--tabnames', dest='tabnames', action='store_true',\n default=False, help='dump name of tabs')\n\ntabgroup.add_argument('--tab', type=str, dest='tab', default=None,\n help='name of tab to process')\n\nfilegroup = parser.add_mutually_exclusive_group()\n\nfilegroup.add_argument('--path', dest='path', type=str, default='output',\n help='output directory file')\n\nfilegroup.add_argument('--stdout', dest='stdout', action='store_true',\n default=False, help='dump a tab to stdout')\n\nparser.add_argument('--sourcename', dest='sourcename', action='store_true',\n default=False, help='prepend filename to output tab file')\n\nparser.add_argument('--headers', type=str, dest='h_list', default=None,\n help='list of header names')\n\nparser.add_argument('--skip-rows', type=str, dest='skip', default=0,\n help='skip rows')\n\nargs = parser.parse_args()\n\npath = args.path\n\nif not os.path.exists(path):\n os.makedirs(path)\n\nif args.tabnames:\n for filename in args.inputfiles:\n if len(args.inputfiles) > 1:\n print(filename)\n df = pd.read_excel(filename, None)\n print('\\t'.join(df.keys()))\n sys.exit(0)\n\nfor filename in args.inputfiles: \n if args.tab:\n tab = args.tab\n filebase = ''\n if args.sourcename:\n filebase = filename + ':'\n if '.' in filename:\n filebase = filename.rsplit('.', 1)[0] + ':'\n try:\n header_list = None\n if args.h_list:\n header_list = [i.strip() for i in args.h_list.split(',')]\n if args.skip:\n skip_list = [int(i.strip()) for i in args.skip.split(',')]\n df = pd.read_excel(filename, tab, names=header_list, skiprows=skip_list).fillna('')\n if args.stdout:\n df.to_json(sys.stdout, orient='records', lines=True)\n print()\n else:\n df.to_json('{}/{}{}.ndjson'.format(path, filebase, tab), orient='records', lines=True)\n\n except KeyError:\n pass\n else:\n df = pd.read_excel(filename, None)\n filebase = ''\n if args.sourcename:\n filebase = filename + ':'\n if '.' in filename:\n filebase = filename.rsplit('.', 1)[0] + ':'\n for tab in df.keys():\n df[tab] = df[tab].fillna('')\n df[tab].to_json('{}/{}{}.ndjson'.format(path, filebase, tab), orient='records', lines=True)\n\n"
] |
[
[
"pandas.read_excel"
]
] |
priyamDalmia/predator-prey
|
[
"0902e305f029d164966c4c65cf1498d5406fe3ab"
] |
[
"data/replay_buffer.py"
] |
[
"import random\nimport numpy as np\nimport pdb\nimport gc\n\nclass ReplayBuffer():\n def __init__(self, buffer_size, batch_size, state_size):\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.state_size = state_size\n self.counter = 0\n # memory \n self.states = np.zeros((self.buffer_size, *self.state_size), dtype=np.float32)\n self.actions = np.zeros((self.buffer_size), dtype=np.int32)\n self.rewards = np.zeros((self.buffer_size), dtype=np.float32)\n self.next_states = np.zeros((self.buffer_size, *self.state_size), dtype=np.float32)\n self.dones = np.zeros((self.buffer_size), dtype=np.float32) \n self.probs = []\n #self.next_actions = np.zeros((self.\n #self.dones = []\n self.infos = []\n\n def store_transition(self, state, action, reward, next_state, done, **kwargs):\n index = self.counter % self.buffer_size\n self.states[index] = state\n self.actions[index] = action\n self.rewards[index] = reward\n if next_state.size > 0:\n self.next_states[index] = next_state\n else:\n self.next_states[index] = state\n self.dones[index] = 1 - int(done)\n if \"probs\" in kwargs:\n self.probs.append(kwargs[\"probs\"])\n self.counter += 1\n\n def sample_batch(self):\n max_index = min(self.counter, self.batch_size)\n batch_ids = np.random.choice(max_index, self.batch_size, replace=False)\n\n bstates = self.states[batch_ids]\n bactions = self.actions[batch_ids]\n brewards = self.rewards[batch_ids]\n bnext_states = self.next_states[batch_ids]\n bdones = self.dones[batch_ids]\n \n return bstates, bactions, brewards, bnext_states, bdones\n \n def sample_transition(self):\n actions= self.actions[:self.counter]\n states = self.states[:self.counter]\n rewards = self.rewards[:self.counter]\n next_ = self.next_states[:self.counter]\n dones = self.dones[:self.counter]\n action_probs = self.probs\n self.clear_buffer()\n return states, actions, rewards, next_, dones, action_probs\n\n def clear_buffer(self):\n self.counter = 0\n # memory \n self.states = np.zeros((self.buffer_size, *self.state_size), dtype=np.float32)\n self.actions = np.zeros((self.buffer_size), dtype=np.int32)\n self.rewards = np.zeros((self.buffer_size), dtype=np.float32)\n self.next_states = np.zeros((self.buffer_size, *self.state_size), dtype=np.float32)\n self.dones = np.zeros((self.buffer_size), dtype=np.float32) \n self.probs = []\n #self.next_actions = np.zeros((self.\n #self.dones = []\n self.infos = []\n\nclass Critic_Buffer():\n def __init__(self, buffer_size, batch_size, state_size):\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.state_size = state_size\n self.counter = 0\n # memory \n self.states = np.zeros((self.buffer_size, *self.state_size), dtype=np.float32)\n self.rewards = np.zeros((self.buffer_size), dtype=np.float32)\n \n def clear_buffer(self):\n self.counter = 0\n # memory \n self.states = np.zeros((self.buffer_size, *self.state_size), dtype=np.float32)\n self.rewards = np.zeros((self.buffer_size), dtype=np.float32)\n \n def sample_transition(self):\n states = self.states[:self.counter]\n rewards = self.rewards[:self.counter]\n self.clear_buffer()\n return states, rewards\n\n def store_transition(self, state, reward):\n index = self.counter % self.buffer_size\n self.states[index] = state\n self.rewards[index] = reward\n self.counter += 1\n\n\n"
] |
[
[
"numpy.random.choice",
"numpy.zeros"
]
] |
YB27/GPy
|
[
"abef7b3deec593f02cfbed81929b21c660e7069c"
] |
[
"GPy/examples/regressionCensored.py"
] |
[
"\n''' temporary : Use this to use the forked version of GPy'''\nimport sys\nsys.path.insert(1, '/home/breux/GPy')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport GPy\n'''\nGaussian Processes regression with censored data example using artificial data as in \n\"Gaussian Process Regression with Censored Data Using Expectation Propagation, P. Groot, P. Lucas\" \n'''\n\ndef f(x):\n return ((6.*x - 2.)**2)*np.sin(2.*(6.*x-2.))\n\ndef plotModels(xc, yc, xc2, yc2, m_tobit, m_normal, m_normalWithoutCensoredData):\n x_gt = np.linspace(0,1,200)\n y_gt = f(x_gt)\n\n fig, ax = plt.subplots()\n plt.title(\"Tobit GP model\")\n plt.plot(x_gt, y_gt, linestyle='-', color=\"r\", label=\"GT\")\n plt.plot(xc, yc, linestyle='None', marker='+', markersize=10, color='k', label=\"Data\")\n m_tobit.plot_f(fignum=0, ax=ax)\n plt.xlim([0, 1])\n\n fig, ax = plt.subplots()\n plt.title(\"Standart GP model\")\n plt.plot(x_gt, y_gt,linestyle='-', color=\"r\", label=\"GT\")\n plt.plot(xc, yc, linestyle='None', marker='+', markersize=10, color='k', label=\"Data\")\n m_normal.plot_f(fignum=1, ax=ax)\n plt.xlim([0,1])\n\n fig, ax = plt.subplots()\n plt.title(\"Standart ignoring censured data GP model\")\n plt.plot(x_gt, y_gt, linestyle='-', color=\"r\", label=\"GT\")\n plt.plot(xc2, yc2, linestyle='None', marker='+', markersize=10, color='k', label=\"Data\")\n m_normalWithoutCensoredData.plot_f(fignum=2, ax=ax)\n plt.xlim([0, 1])\n plt.show()\n\ndef artificialExample():\n ''' Generate Data '''\n np.random.seed(4)\n n = 30\n x = np.linspace(0,1,n)\n y = f(x) + np.random.normal(0, np.sqrt(0.1), x.shape[0])\n x = x.reshape((n,1))\n l = -0.45 #-0.2265\n lowerCensoredData = np.zeros((n,), dtype='int64')\n lowerCensoredData_indexes = [idx for idx, val in np.ndenumerate(y) if val < l]\n np.put(lowerCensoredData, lowerCensoredData_indexes, 1)\n gaussianData = np.zeros((n,), dtype='int64')\n gaussianData_indexes = [idx for idx, val in np.ndenumerate(y) if val >= l]\n np.put(gaussianData, gaussianData_indexes, 1)\n\n y_metadata = {\"lowerCensored\": lowerCensoredData.reshape((n,1)), \"gaussianIndexes\": gaussianData.reshape((n,1))}\n #y_metadata = {\"lowerCensored\": np.array([idx for idx, val in np.ndenumerate(y) if val < l]),\n # \"gaussianIndexes\": np.array([idx for idx, val in np.ndenumerate(y) if val >= l])}\n\n ''' Censored data '''\n yc = y.copy()\n np.put(yc, lowerCensoredData_indexes, l)\n\n ''' Data without censored data'''\n yc2 = np.delete(yc, lowerCensoredData_indexes)\n x2 = np.delete(x,lowerCensoredData_indexes)\n yc = yc.reshape((n,1))\n yc2 = yc2.reshape(yc2.shape[0],1)\n x2 = x2.reshape(x2.shape[0],1)\n\n ''' GP models '''\n kernel_tobit = GPy.kern.RBF(input_dim=1, variance=5, lengthscale=0.1)\n kernel_normal = GPy.kern.RBF(input_dim=1, variance=5, lengthscale=0.1)\n kernel_normalCensored = GPy.kern.RBF(input_dim=1, variance=5, lengthscale=0.1)\n print(\"Create GP with tobit model ...\")\n m_tobit = GPy.models.GPRegressionCensored(x, yc, lowerThreshold=l, upperThreshold=None, kernel=kernel_tobit, noise_var = 0.1, Y_metadata=y_metadata)\n m_tobit.likelihood.variance.fix()\n print(\"Create standart GP model ...\")\n m_normal = GPy.models.GPRegression(x, yc, kernel=kernel_normal)\n m_normal.likelihood.variance.fix()\n print(\"Create standart GP model and ignoring censured data...\")\n m_normalWithoutCensoredData = GPy.models.GPRegression(x2, yc2, kernel=kernel_normalCensored)\n m_normalWithoutCensoredData.likelihood.variance.fix()\n\n ''' Optimization '''\n print(\"Optimizer with tobit model ...\")\n print(\"---> Model before opt : \")\n print(m_tobit[''])\n m_tobit.optimize(optimizer='lbfgs', max_iters=500, messages=True)\n print(\"---> Model after opt : \")\n print(m_tobit[''])\n print(\"Optimizer with standart model ...\")\n print(m_normal[''])\n m_normal.optimize(optimizer='lbfgs', max_iters=500, messages=True)\n print(\"Optimizer with standart model and ignoring censured data...\")\n m_normalWithoutCensoredData.optimize(optimizer='lbfgs', max_iters=500, messages=True)\n\n ''' Plots '''\n plotModels(x, yc, x2, yc2, m_tobit, m_normal, m_normalWithoutCensoredData)\n\nif __name__ == \"__main__\":\n artificialExample()\n"
] |
[
[
"numpy.delete",
"numpy.sin",
"matplotlib.pyplot.xlim",
"numpy.ndenumerate",
"numpy.zeros",
"numpy.random.seed",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplots",
"numpy.sqrt",
"numpy.put",
"matplotlib.pyplot.show",
"numpy.linspace"
]
] |
flowersteam/TeachMyAgent
|
[
"a8f71cbfce4cb8ca6da24d00ea690495e3afbd2e",
"a8f71cbfce4cb8ca6da24d00ea690495e3afbd2e",
"a8f71cbfce4cb8ca6da24d00ea690495e3afbd2e"
] |
[
"TeachMyAgent/students/openai_baselines/ppo2/model.py",
"TeachMyAgent/students/openai_baselines/common/plot_util.py",
"TeachMyAgent/teachers/utils/torch.py"
] |
[
"import tensorflow as tf\nimport functools\n\nfrom TeachMyAgent.students.openai_baselines.common.tf_util import get_session, save_variables, load_variables\nfrom TeachMyAgent.students.openai_baselines.common.tf_util import initialize\n\ntry:\n from TeachMyAgent.students.openai_baselines.common.mpi_adam_optimizer import MpiAdamOptimizer\n from mpi4py import MPI\n from TeachMyAgent.students.openai_baselines.common.mpi_util import sync_from_root\nexcept ImportError:\n MPI = None\n\nclass Model(object):\n \"\"\"\n We use this object to :\n __init__:\n - Creates the step_model\n - Creates the train_model\n\n train():\n - Make the training part (feedforward and retropropagation of gradients)\n\n save/load():\n - Save load the model\n \"\"\"\n def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,\n nsteps, ent_coef, vf_coef, max_grad_norm, mpi_rank_weight=1, comm=None, microbatch_size=None):\n self.sess = sess = get_session()\n\n if MPI is not None and comm is None:\n comm = MPI.COMM_WORLD\n\n with tf.variable_scope('ppo2_model', reuse=tf.AUTO_REUSE):\n # CREATE OUR TWO MODELS\n # act_model that is used for sampling\n act_model = policy(nbatch_act, 1, sess)\n\n # Train model for training\n if microbatch_size is None:\n train_model = policy(nbatch_train, nsteps, sess)\n else:\n train_model = policy(microbatch_size, nsteps, sess)\n\n # CREATE THE PLACEHOLDERS\n self.A = A = train_model.pdtype.sample_placeholder([None])\n self.ADV = ADV = tf.placeholder(tf.float32, [None])\n self.R = R = tf.placeholder(tf.float32, [None])\n # Keep track of old actor\n self.OLDNEGLOGPAC = OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])\n # Keep track of old critic\n self.OLDVPRED = OLDVPRED = tf.placeholder(tf.float32, [None])\n self.LR = LR = tf.placeholder(tf.float32, [])\n # Cliprange\n self.CLIPRANGE = CLIPRANGE = tf.placeholder(tf.float32, [])\n\n neglogpac = train_model.pd.neglogp(A)\n\n # Calculate the entropy\n # Entropy is used to improve exploration by limiting the premature convergence to suboptimal policy.\n entropy = tf.reduce_mean(train_model.pd.entropy())\n\n # CALCULATE THE LOSS\n # Total loss = Policy gradient loss - entropy * entropy coefficient + Value coefficient * value loss\n\n # Clip the value to reduce variability during Critic training\n # Get the predicted value\n vpred = train_model.vf\n vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf - OLDVPRED, - CLIPRANGE, CLIPRANGE)\n # Unclipped value\n vf_losses1 = tf.square(vpred - R)\n # Clipped value\n vf_losses2 = tf.square(vpredclipped - R)\n\n vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))\n\n # Calculate ratio (pi current policy / pi old policy)\n ratio = tf.exp(OLDNEGLOGPAC - neglogpac)\n\n # Defining Loss = - J is equivalent to max J\n pg_losses = -ADV * ratio\n\n pg_losses2 = -ADV * tf.clip_by_value(ratio, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)\n\n # Final PG loss\n pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))\n approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - OLDNEGLOGPAC))\n clipfrac = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), CLIPRANGE)))\n\n # Total loss\n loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef\n\n # UPDATE THE PARAMETERS USING LOSS\n # 1. Get the model parameters\n params = tf.trainable_variables('ppo2_model')\n # 2. Build our trainer\n if comm is not None and comm.Get_size() > 1:\n self.trainer = MpiAdamOptimizer(comm, learning_rate=LR, mpi_rank_weight=mpi_rank_weight, epsilon=1e-5)\n else:\n self.trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)\n # 3. Calculate the gradients\n grads_and_var = self.trainer.compute_gradients(loss, params)\n grads, var = zip(*grads_and_var)\n\n if max_grad_norm is not None:\n # Clip the gradients (normalize)\n grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)\n grads_and_var = list(zip(grads, var))\n # zip aggregate each gradient with parameters associated\n # For instance zip(ABCD, xyza) => Ax, By, Cz, Da\n\n self.grads = grads\n self.var = var\n self._train_op = self.trainer.apply_gradients(grads_and_var)\n self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']\n self.stats_list = [pg_loss, vf_loss, entropy, approxkl, clipfrac]\n\n\n self.train_model = train_model\n self.act_model = act_model\n self.step = act_model.step\n self.value = act_model.value\n self.initial_state = act_model.initial_state\n\n self.save = functools.partial(save_variables, sess=sess)\n self.load = functools.partial(load_variables, sess=sess)\n\n initialize()\n global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=\"\")\n if MPI is not None:\n sync_from_root(sess, global_variables, comm=comm) #pylint: disable=E1101\n\n def train(self, lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):\n # Here we calculate advantage A(s,a) = R + yV(s') - V(s)\n # Returns = R + yV(s')\n advs = returns - values\n\n # Normalize the advantages\n advs = (advs - advs.mean()) / (advs.std() + 1e-8)\n\n td_map = {\n self.train_model.X : obs,\n self.A : actions,\n self.ADV : advs,\n self.R : returns,\n self.LR : lr,\n self.CLIPRANGE : cliprange,\n self.OLDNEGLOGPAC : neglogpacs,\n self.OLDVPRED : values\n }\n if states is not None:\n td_map[self.train_model.S] = states\n td_map[self.train_model.M] = masks\n\n return self.sess.run(\n self.stats_list + [self._train_op],\n td_map\n )[:-1]\n\n def reset(self):\n self.sess.run(tf.global_variables_initializer())\n\n",
"import matplotlib.pyplot as plt\nimport os.path as osp\nimport json\nimport os\nimport numpy as np\nimport pandas\nfrom collections import defaultdict, namedtuple\nfrom TeachMyAgent.students.openai_baselines.bench import monitor\nfrom TeachMyAgent.students.openai_baselines.logger import read_json, read_csv\n\ndef smooth(y, radius, mode='two_sided', valid_only=False):\n '''\n Smooth signal y, where radius is determines the size of the window\n\n mode='twosided':\n average over the window [max(index - radius, 0), min(index + radius, len(y)-1)]\n mode='causal':\n average over the window [max(index - radius, 0), index]\n\n valid_only: put nan in entries where the full-sized window is not available\n\n '''\n assert mode in ('two_sided', 'causal')\n if len(y) < 2*radius+1:\n return np.ones_like(y) * y.mean()\n elif mode == 'two_sided':\n convkernel = np.ones(2 * radius+1)\n out = np.convolve(y, convkernel,mode='same') / np.convolve(np.ones_like(y), convkernel, mode='same')\n if valid_only:\n out[:radius] = out[-radius:] = np.nan\n elif mode == 'causal':\n convkernel = np.ones(radius)\n out = np.convolve(y, convkernel,mode='full') / np.convolve(np.ones_like(y), convkernel, mode='full')\n out = out[:-radius+1]\n if valid_only:\n out[:radius] = np.nan\n return out\n\ndef one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):\n '''\n perform one-sided (causal) EMA (exponential moving average)\n smoothing and resampling to an even grid with n points.\n Does not do extrapolation, so we assume\n xolds[0] <= low && high <= xolds[-1]\n\n Arguments:\n\n xolds: array or list - x values of data. Needs to be sorted in ascending order\n yolds: array of list - y values of data. Has to have the same length as xolds\n\n low: float - min value of the new x grid. By default equals to xolds[0]\n high: float - max value of the new x grid. By default equals to xolds[-1]\n\n n: int - number of points in new x grid\n\n decay_steps: float - EMA decay factor, expressed in new x grid steps.\n\n low_counts_threshold: float or int\n - y values with counts less than this value will be set to NaN\n\n Returns:\n tuple sum_ys, count_ys where\n xs - array with new x grid\n ys - array of EMA of y at each point of the new x grid\n count_ys - array of EMA of y counts at each point of the new x grid\n\n '''\n\n low = xolds[0] if low is None else low\n high = xolds[-1] if high is None else high\n\n assert xolds[0] <= low, 'low = {} < xolds[0] = {} - extrapolation not permitted!'.format(low, xolds[0])\n assert xolds[-1] >= high, 'high = {} > xolds[-1] = {} - extrapolation not permitted!'.format(high, xolds[-1])\n assert len(xolds) == len(yolds), 'length of xolds ({}) and yolds ({}) do not match!'.format(len(xolds), len(yolds))\n\n\n xolds = xolds.astype('float64')\n yolds = yolds.astype('float64')\n\n luoi = 0 # last unused old index\n sum_y = 0.\n count_y = 0.\n xnews = np.linspace(low, high, n)\n decay_period = (high - low) / (n - 1) * decay_steps\n interstep_decay = np.exp(- 1. / decay_steps)\n sum_ys = np.zeros_like(xnews)\n count_ys = np.zeros_like(xnews)\n for i in range(n):\n xnew = xnews[i]\n sum_y *= interstep_decay\n count_y *= interstep_decay\n while True:\n if luoi >= len(xolds):\n break\n xold = xolds[luoi]\n if xold <= xnew:\n decay = np.exp(- (xnew - xold) / decay_period)\n sum_y += decay * yolds[luoi]\n count_y += decay\n luoi += 1\n else:\n break\n sum_ys[i] = sum_y\n count_ys[i] = count_y\n\n ys = sum_ys / count_ys\n ys[count_ys < low_counts_threshold] = np.nan\n\n return xnews, ys, count_ys\n\ndef symmetric_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8):\n '''\n perform symmetric EMA (exponential moving average)\n smoothing and resampling to an even grid with n points.\n Does not do extrapolation, so we assume\n xolds[0] <= low && high <= xolds[-1]\n\n Arguments:\n\n xolds: array or list - x values of data. Needs to be sorted in ascending order\n yolds: array of list - y values of data. Has to have the same length as xolds\n\n low: float - min value of the new x grid. By default equals to xolds[0]\n high: float - max value of the new x grid. By default equals to xolds[-1]\n\n n: int - number of points in new x grid\n\n decay_steps: float - EMA decay factor, expressed in new x grid steps.\n\n low_counts_threshold: float or int\n - y values with counts less than this value will be set to NaN\n\n Returns:\n tuple sum_ys, count_ys where\n xs - array with new x grid\n ys - array of EMA of y at each point of the new x grid\n count_ys - array of EMA of y counts at each point of the new x grid\n\n '''\n xs, ys1, count_ys1 = one_sided_ema(xolds, yolds, low, high, n, decay_steps, low_counts_threshold=0)\n _, ys2, count_ys2 = one_sided_ema(-xolds[::-1], yolds[::-1], -high, -low, n, decay_steps, low_counts_threshold=0)\n ys2 = ys2[::-1]\n count_ys2 = count_ys2[::-1]\n count_ys = count_ys1 + count_ys2\n ys = (ys1 * count_ys1 + ys2 * count_ys2) / count_ys\n ys[count_ys < low_counts_threshold] = np.nan\n return xs, ys, count_ys\n\nResult = namedtuple('Result', 'monitor progress dirname metadata')\nResult.__new__.__defaults__ = (None,) * len(Result._fields)\n\ndef load_results(root_dir_or_dirs, enable_progress=True, enable_monitor=True, verbose=False):\n '''\n load summaries of runs from a list of directories (including subdirectories)\n Arguments:\n\n enable_progress: bool - if True, will attempt to load data from progress.csv files (data saved by logger). Default: True\n\n enable_monitor: bool - if True, will attempt to load data from monitor.csv files (data saved by Monitor environment wrapper). Default: True\n\n verbose: bool - if True, will print out list of directories from which the data is loaded. Default: False\n\n\n Returns:\n List of Result objects with the following fields:\n - dirname - path to the directory data was loaded from\n - metadata - run metadata (such as command-line arguments and anything else in metadata.json file\n - monitor - if enable_monitor is True, this field contains pandas dataframe with loaded monitor.csv file (or aggregate of all *.monitor.csv files in the directory)\n - progress - if enable_progress is True, this field contains pandas dataframe with loaded progress.csv file\n '''\n import re\n if isinstance(root_dir_or_dirs, str):\n rootdirs = [osp.expanduser(root_dir_or_dirs)]\n else:\n rootdirs = [osp.expanduser(d) for d in root_dir_or_dirs]\n allresults = []\n for rootdir in rootdirs:\n assert osp.exists(rootdir), \"%s doesn't exist\"%rootdir\n for dirname, dirs, files in os.walk(rootdir):\n if '-proc' in dirname:\n files[:] = []\n continue\n monitor_re = re.compile(r'(\\d+\\.)?(\\d+\\.)?monitor\\.csv')\n if set(['metadata.json', 'monitor.json', 'progress.json', 'progress.csv']).intersection(files) or \\\n any([f for f in files if monitor_re.match(f)]): # also match monitor files like 0.1.monitor.csv\n # used to be uncommented, which means do not go deeper than current directory if any of the data files\n # are found\n # dirs[:] = []\n result = {'dirname' : dirname}\n if \"metadata.json\" in files:\n with open(osp.join(dirname, \"metadata.json\"), \"r\") as fh:\n result['metadata'] = json.load(fh)\n progjson = osp.join(dirname, \"progress.json\")\n progcsv = osp.join(dirname, \"progress.csv\")\n if enable_progress:\n if osp.exists(progjson):\n result['progress'] = pandas.DataFrame(read_json(progjson))\n elif osp.exists(progcsv):\n try:\n result['progress'] = read_csv(progcsv)\n except pandas.errors.EmptyDataError:\n print('skipping progress file in ', dirname, 'empty data')\n else:\n if verbose: print('skipping %s: no progress file'%dirname)\n\n if enable_monitor:\n try:\n result['monitor'] = pandas.DataFrame(monitor.load_results(dirname))\n except monitor.LoadMonitorResultsError:\n print('skipping %s: no monitor files'%dirname)\n except Exception as e:\n print('exception loading monitor file in %s: %s'%(dirname, e))\n\n if result.get('monitor') is not None or result.get('progress') is not None:\n allresults.append(Result(**result))\n if verbose:\n print('successfully loaded %s'%dirname)\n\n if verbose: print('loaded %i results'%len(allresults))\n return allresults\n\nCOLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',\n 'brown', 'orange', 'teal', 'lightblue', 'lime', 'lavender', 'turquoise',\n 'darkgreen', 'tan', 'salmon', 'gold', 'darkred', 'darkblue']\n\n\ndef default_xy_fn(r):\n x = np.cumsum(r.monitor.l)\n y = smooth(r.monitor.r, radius=10)\n return x,y\n\ndef default_split_fn(r):\n import re\n # match name between slash and -<digits> at the end of the string\n # (slash in the beginning or -<digits> in the end or either may be missing)\n match = re.search(r'[^/-]+(?=(-\\d+)?\\Z)', r.dirname)\n if match:\n return match.group(0)\n\ndef plot_results(\n allresults, *,\n xy_fn=default_xy_fn,\n split_fn=default_split_fn,\n group_fn=default_split_fn,\n average_group=False,\n shaded_std=True,\n shaded_err=True,\n figsize=None,\n legend_outside=False,\n resample=0,\n smooth_step=1.0,\n tiling='vertical',\n xlabel=None,\n ylabel=None\n):\n '''\n Plot multiple Results objects\n\n xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.\n By default, x is cumsum of episode lengths, and y is episode rewards\n\n split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.\n That is, the results r for which split_fn(r) is different will be put on different sub-panels.\n By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are\n stacked vertically in the figure.\n\n group_fn: function Result -> hashable - function that converts results objects into keys to group curves by.\n That is, the results r for which group_fn(r) is the same will be put into the same group.\n Curves in the same group have the same color (if average_group is False), or averaged over\n (if average_group is True). The default value is the same as default value for split_fn\n\n average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling\n (if resample = 0, will use 512 steps)\n\n shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be\n shown (only applicable if average_group = True)\n\n shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves\n (that is, standard deviation divided by square root of number of curves) will be\n shown (only applicable if average_group = True)\n\n figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of\n sub-panels.\n\n\n legend_outside: bool - if True, will place the legend outside of the sub-panels.\n\n resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric\n EMA smoothing (see the docstring for symmetric_ema).\n Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default\n value is 512.\n\n smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).\n See docstrings for decay_steps in symmetric_ema or one_sided_ema functions.\n\n '''\n\n if split_fn is None: split_fn = lambda _ : ''\n if group_fn is None: group_fn = lambda _ : ''\n sk2r = defaultdict(list) # splitkey2results\n for result in allresults:\n splitkey = split_fn(result)\n sk2r[splitkey].append(result)\n assert len(sk2r) > 0\n assert isinstance(resample, int), \"0: don't resample. <integer>: that many samples\"\n if tiling == 'vertical' or tiling is None:\n nrows = len(sk2r)\n ncols = 1\n elif tiling == 'horizontal':\n ncols = len(sk2r)\n nrows = 1\n elif tiling == 'symmetric':\n import math\n N = len(sk2r)\n largest_divisor = 1\n for i in range(1, int(math.sqrt(N))+1):\n if N % i == 0:\n largest_divisor = i\n ncols = largest_divisor\n nrows = N // ncols\n figsize = figsize or (6 * ncols, 6 * nrows)\n\n f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize)\n\n groups = list(set(group_fn(result) for result in allresults))\n\n default_samples = 512\n if average_group:\n resample = resample or default_samples\n\n for (isplit, sk) in enumerate(sorted(sk2r.keys())):\n g2l = {}\n g2c = defaultdict(int)\n sresults = sk2r[sk]\n gresults = defaultdict(list)\n idx_row = isplit // ncols\n idx_col = isplit % ncols\n ax = axarr[idx_row][idx_col]\n for result in sresults:\n group = group_fn(result)\n g2c[group] += 1\n x, y = xy_fn(result)\n if x is None: x = np.arange(len(y))\n x, y = map(np.asarray, (x, y))\n if average_group:\n gresults[group].append((x,y))\n else:\n if resample:\n x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)\n l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])\n g2l[group] = l\n if average_group:\n for group in sorted(groups):\n xys = gresults[group]\n if not any(xys):\n continue\n color = COLORS[groups.index(group) % len(COLORS)]\n origxs = [xy[0] for xy in xys]\n minxlen = min(map(len, origxs))\n def allequal(qs):\n return all((q==qs[0]).all() for q in qs[1:])\n if resample:\n low = max(x[0] for x in origxs)\n high = min(x[-1] for x in origxs)\n usex = np.linspace(low, high, resample)\n ys = []\n for (x, y) in xys:\n ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])\n else:\n assert allequal([x[:minxlen] for x in origxs]),\\\n 'If you want to average unevenly sampled data, set resample=<number of samples you want>'\n usex = origxs[0]\n ys = [xy[1][:minxlen] for xy in xys]\n ymean = np.mean(ys, axis=0)\n ystd = np.std(ys, axis=0)\n ystderr = ystd / np.sqrt(len(ys))\n l, = axarr[idx_row][idx_col].plot(usex, ymean, color=color)\n g2l[group] = l\n if shaded_err:\n ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)\n if shaded_std:\n ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)\n\n\n # https://matplotlib.org/users/legend_guide.html\n plt.tight_layout()\n if any(g2l.keys()):\n ax.legend(\n g2l.values(),\n ['%s (%i)'%(g, g2c[g]) for g in g2l] if average_group else g2l.keys(),\n loc=2 if legend_outside else None,\n bbox_to_anchor=(1,1) if legend_outside else None)\n ax.set_title(sk)\n # add xlabels, but only to the bottom row\n if xlabel is not None:\n for ax in axarr[-1]:\n plt.sca(ax)\n plt.xlabel(xlabel)\n # add ylabels, but only to left column\n if ylabel is not None:\n for ax in axarr[:,0]:\n plt.sca(ax)\n plt.ylabel(ylabel)\n\n return f, axarr\n\ndef regression_analysis(df):\n xcols = list(df.columns.copy())\n xcols.remove('score')\n ycols = ['score']\n import statsmodels.api as sm\n mod = sm.OLS(df[ycols], sm.add_constant(df[xcols]), hasconst=False)\n res = mod.fit()\n print(res.summary())\n\ndef test_smooth():\n norig = 100\n nup = 300\n ndown = 30\n xs = np.cumsum(np.random.rand(norig) * 10 / norig)\n yclean = np.sin(xs)\n ys = yclean + .1 * np.random.randn(yclean.size)\n xup, yup, _ = symmetric_ema(xs, ys, xs.min(), xs.max(), nup, decay_steps=nup/ndown)\n xdown, ydown, _ = symmetric_ema(xs, ys, xs.min(), xs.max(), ndown, decay_steps=ndown/ndown)\n xsame, ysame, _ = symmetric_ema(xs, ys, xs.min(), xs.max(), norig, decay_steps=norig/ndown)\n plt.plot(xs, ys, label='orig', marker='x')\n plt.plot(xup, yup, label='up', marker='x')\n plt.plot(xdown, ydown, label='down', marker='x')\n plt.plot(xsame, ysame, label='same', marker='x')\n plt.plot(xs, yclean, label='clean', marker='x')\n plt.legend()\n plt.show()\n\n\n",
"# Taken from https://github.com/psclklnk/spdl\n# Copy of the license at TeachMyAgent/teachers/LICENSES/SPDL\n\nimport torch\nimport numpy as np\n\n\ndef set_weights(parameters, weights, use_cuda):\n \"\"\"\n Function used to set the value of a set of torch parameters given a\n vector of values.\n\n Args:\n parameters (list): list of parameters to be considered;\n weights (numpy.ndarray): array of the new values for\n the parameters;\n use_cuda (bool): whether the parameters are cuda tensors or not;\n\n \"\"\"\n idx = 0\n for p in parameters:\n shape = p.data.shape\n\n c = 1\n for s in shape:\n c *= s\n\n w = np.reshape(weights[idx:idx + c], shape)\n\n if not use_cuda:\n w_tensor = torch.from_numpy(w).type(p.data.dtype)\n else:\n w_tensor = torch.from_numpy(w).type(p.data.dtype).cuda()\n\n p.data = w_tensor\n idx += c\n\n assert idx == weights.size\n\n\ndef get_weights(parameters):\n \"\"\"\n Function used to get the value of a set of torch parameters as\n a single vector of values.\n\n Args:\n parameters (list): list of parameters to be considered.\n\n Returns:\n A numpy vector consisting of all the values of the vectors.\n\n \"\"\"\n weights = list()\n\n for p in parameters:\n w = p.data.detach().cpu().numpy()\n weights.append(w.flatten())\n\n weights = np.concatenate(weights, 0)\n\n return weights\n\n\ndef zero_grad(parameters):\n \"\"\"\n Function used to set to zero the value of the gradient of a set\n of torch parameters.\n\n Args:\n parameters (list): list of parameters to be considered.\n\n \"\"\"\n\n for p in parameters:\n if p.grad is not None:\n p.grad.detach_()\n p.grad.zero_()\n\n\ndef get_gradient(params):\n \"\"\"\n Function used to get the value of the gradient of a set of\n torch parameters.\n\n Args:\n parameters (list): list of parameters to be considered.\n\n \"\"\"\n views = []\n for p in params:\n if p.grad is None:\n view = p.new(p.numel()).zero_()\n else:\n view = p.grad.view(-1)\n views.append(view)\n return torch.cat(views, 0)\n\n\ndef to_float_tensor(x, use_cuda=False):\n \"\"\"\n Function used to convert a numpy array to a float torch tensor.\n\n Args:\n x (np.ndarray): numpy array to be converted as torch tensor;\n use_cuda (bool): whether to build a cuda tensors or not.\n\n Returns:\n A float tensor build from the values contained in the input array.\n\n \"\"\"\n x = torch.tensor(x, dtype=torch.float)\n return x.cuda() if use_cuda else x\n"
] |
[
[
"tensorflow.exp",
"tensorflow.trainable_variables",
"tensorflow.abs",
"tensorflow.train.AdamOptimizer",
"tensorflow.global_variables_initializer",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.clip_by_value",
"tensorflow.maximum",
"tensorflow.clip_by_global_norm",
"tensorflow.square",
"tensorflow.get_collection"
],
[
"numpy.ones_like",
"numpy.random.rand",
"numpy.exp",
"numpy.mean",
"numpy.cumsum",
"numpy.zeros_like",
"numpy.sin",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"numpy.convolve",
"matplotlib.pyplot.sca",
"numpy.random.randn",
"numpy.std",
"matplotlib.pyplot.show",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"numpy.linspace"
],
[
"numpy.concatenate",
"torch.cat",
"numpy.reshape",
"torch.from_numpy",
"torch.tensor"
]
] |
Shafaq-Siddiqi/systemml
|
[
"5cc523971854cdf4f22e6199987a86e213fae4e2"
] |
[
"src/main/python/tests/frame/test_transform_apply.py"
] |
[
"# -------------------------------------------------------------\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n# -------------------------------------------------------------\n\nimport json\nimport os\nimport shutil\nimport sys\nimport unittest\n\nimport numpy as np\nimport pandas as pd\nfrom systemds.context import SystemDSContext\n\n\nclass TestTransformApply(unittest.TestCase):\n\n sds: SystemDSContext = None\n HOMES_PATH = \"../../test/resources/datasets/homes/homes.csv\"\n HOMES_SCHEMA = '\"int,string,int,int,double,int,boolean,int,int\"'\n JSPEC_PATH = \"../../test/resources/datasets/homes/homes.tfspec_bin2.json\"\n\n @classmethod\n def setUpClass(cls):\n cls.sds = SystemDSContext()\n\n @classmethod\n def tearDownClass(cls):\n cls.sds.close()\n\n def tearDown(self):\n pass\n\n def test_apply_recode_bin(self):\n with open(self.JSPEC_PATH) as jspec_file:\n JSPEC = json.load(jspec_file)\n F1 = self.sds.read(\n self.HOMES_PATH,\n data_type=\"frame\",\n schema=self.HOMES_SCHEMA,\n format=\"csv\",\n header=True,\n )\n pd_F1 = F1.compute()\n jspec = self.sds.read(self.JSPEC_PATH, data_type=\"scalar\", value_type=\"string\")\n X, M = F1.transform_encode(spec=jspec).compute()\n self.assertTrue(isinstance(X, np.ndarray))\n self.assertTrue(isinstance(M, pd.DataFrame))\n self.assertTrue(X.shape == pd_F1.shape)\n self.assertTrue(np.all(np.isreal(X)))\n relevant_columns = set()\n for col_name in JSPEC[\"recode\"]:\n relevant_columns.add(pd_F1.columns.get_loc(col_name))\n self.assertTrue(M[col_name].nunique() == pd_F1[col_name].nunique())\n for binning in JSPEC[\"bin\"]:\n col_name = binning[\"name\"]\n relevant_columns.add(pd_F1.columns.get_loc(col_name))\n self.assertTrue(M[col_name].nunique() == binning[\"numbins\"])\n\n X2 = F1.transform_apply(spec=jspec, meta=self.sds.from_pandas(M)).compute()\n self.assertTrue(X.shape == X2.shape)\n self.assertTrue(np.all(np.isreal(X2)))\n\n\nif __name__ == \"__main__\":\n unittest.main(exit=False)\n"
] |
[
[
"numpy.isreal"
]
] |
wenshuin/mkpy
|
[
"52d22b9bac50eede794bacd756869b1600b71ec0"
] |
[
"mkpy/events.py"
] |
[
"import re\nimport yaml\nimport h5py\nimport numpy as np\nimport pandas as pd\nfrom . import h5tools, mkh5\n\n\ndef read_excel_codemap(file, sheet_name=0):\n \"\"\"Read Excel .xlsx file, return codemap pandas DataFrame.\"\"\"\n\n codemap = pd.read_excel(file, sheet_name=sheet_name, index_col=\"Index\")\n if \"regexp\" not in codemap.columns:\n raise ValueError('\"regexp\" column must be present.')\n\n return codemap\n\n\ndef read_txt_codemap(file):\n \"\"\"Read tab-separated text file, return codemap pandas DataFrame.\"\"\"\n\n codemap = pd.read_table(file, index_col=\"Index\")\n if \"regexp\" not in codemap.columns:\n raise ValueError('\"regexp\" column must be present.')\n\n return codemap\n\n\ndef read_yaml_codemap(file):\n \"\"\"Read YAML file, return codemap pandas DataFrame.\"\"\"\n\n with open(file, \"r\") as f:\n yaml_dict = yaml.load(f)\n\n _validate_yaml_dict(yaml_dict)\n\n columns = yaml_dict[\"columns\"]\n rows = yaml_dict[\"rows\"]\n\n codemap = pd.DataFrame(data=rows, columns=columns).set_index(\"Index\")\n return codemap\n\n\ndef _validate_yaml_dict(yaml_dict):\n \"\"\"Check validity of YAML file contents.\"\"\"\n\n if not isinstance(yaml_dict, dict):\n raise ValueError(\n \"YAML file must define a dictionary-like mapping, \"\n f\"got a {type(yaml_dict)} instead.\"\n )\n\n if \"columns\" not in yaml_dict:\n raise ValueError('YAML file must have a \"columns\" entry.')\n\n columns = yaml_dict[\"columns\"]\n if not isinstance(columns, list):\n raise ValueError('\"columns\" must be a sequence (a list).')\n\n if \"Index\" not in columns or \"regexp\" not in columns:\n raise ValueError('Both \"Index\" and \"regexp\" columns must be present.')\n\n if \"rows\" not in yaml_dict:\n raise ValueError('YAML file must have a \"rows\" entry.')\n\n rows = yaml_dict[\"rows\"]\n if not isinstance(rows, list):\n raise ValueError('\"columns\" must be a sequence (a list).')\n\n ncols = len(columns)\n for row in rows:\n if not isinstance(row, list) or len(row) != ncols:\n raise ValueError(\n f\"Each row must be a list \"\n f\"and contain {ncols} items: {columns},\\n\"\n f\"but this row doesn't: {row}.\"\n )\n\n\ndef find_evcodes(pattern, ticks, evcodes):\n \"\"\"Run a regular expression search on an array of event codes.\n\n Parameters\n ----------\n\n pattern : str\n A regular expression pattern string containing exactly one anchor. For\n a detailed explanation of the format, see notes below.\n ticks, evcodes : NumPy arrays\n Arrays of the same shape containing ticks and event codes of a single\n data block from positions with nonzero event codes. Although the last\n requirement is not mandatory, this is the intended use.\n\n Returns\n -------\n\n df : pandas DataFrame\n DataFrame describing matches for the pattern.\n \"\"\"\n\n _validate_ticks_and_evcodes(ticks, evcodes)\n _validate_pattern(pattern)\n\n # the hash denotes the anchor group, we make it named\n pattern = pattern.replace(\"(#\", \"(?P<anchor>\")\n\n # group match should align with an alphanumeric word boundary on the right\n pattern = pattern.replace(r\")\", r\"\\b)\")\n\n compiled_pattern = re.compile(pattern)\n\n # this is necessary to identify anchor groups later\n anchor_group_id = compiled_pattern.groupindex[\"anchor\"]\n\n # convert evcodes to string so we can run regex\n sep = \" \"\n codestring = sep + sep.join(evcodes.astype(str))\n\n # map positions in code string to indices in evcodes\n sep_matches = re.finditer(sep, codestring)\n position_to_index = {match.end(): i for i, match in enumerate(sep_matches)}\n assert len(position_to_index) == len(evcodes)\n\n # run regular expression search on the codestring\n matches = list(re.finditer(compiled_pattern, codestring))\n\n # collect information about each match aligned with a code\n matches_info = [\n {\n \"group\": group.strip(),\n \"group_id\": group_id,\n \"group_position\": match.start(group_id),\n \"match_id\": match_id,\n }\n for match_id, match in enumerate(matches)\n # enumerate from index 1, since at 0 we have the universal match group\n for group_id, group in enumerate(match.groups(), 1)\n # match must align with an eventcode position\n if match.start() in position_to_index\n ]\n\n # check that no group matched more than one code\n if any(len(item[\"group\"].split(\" \")) != 1 for item in matches_info):\n raise ValueError(\"Groups must match one code.\")\n\n # further manipulations are better done in pandas\n df = pd.DataFrame(matches_info)\n if df.empty:\n return df\n\n # we need to recover indices from code positions in the code string\n indices = df[\"group_position\"].map(position_to_index)\n df[\"dblock_ticks\"] = ticks[indices]\n df[\"match_code\"] = evcodes[indices]\n df[\"is_anchor\"] = df[\"group_id\"] == anchor_group_id\n\n # verify that matched codes are equal to corresponding evcodes\n assert (df[\"group\"].astype(int) == df[\"match_code\"]).all()\n df.drop([\"group\", \"group_position\"], axis=1, inplace=True)\n\n # derive anchor information\n anchors = df[df[\"is_anchor\"]]\n anchor_data = anchors[[\"match_id\", \"dblock_ticks\", \"match_code\"]].rename(\n columns={\"dblock_ticks\": \"anchor_tick\", \"match_code\": \"anchor_code\"}\n )\n df = df.merge(anchor_data, on=\"match_id\")\n df[\"anchor_tick_delta\"] = df[\"dblock_ticks\"] - df[\"anchor_tick\"]\n\n return df\n\n\ndef _validate_pattern(pattern):\n \"\"\"Check that regex pattern conforms to type and format requirements.\"\"\"\n\n if not isinstance(pattern, str):\n raise TypeError(\"Pattern must be a string.\")\n\n if pattern.count(\"(#\") != 1:\n raise ValueError(\"Pattern must contain exactly one anchor group.\")\n\n if pattern.startswith(\" \") or pattern.endswith(\" \"):\n raise ValueError(\"Pattern cannot start or end with a whitespace.\")\n\n if 2 * \" \" in pattern:\n raise ValueError(\"Pattern cannot contain consecutive whitespaces.\")\n\n return re.compile(pattern)\n\n\ndef _validate_ticks_and_evcodes(ticks, evcodes):\n \"\"\"Ensure ticks and evcodes are NumPy arrays and have matching shapes.\"\"\"\n\n if not isinstance(ticks, np.ndarray):\n raise TypeError(f\"ticks must be a NumPy array, not {type(ticks)}.\")\n\n if not isinstance(evcodes, np.ndarray):\n raise TypeError(f\"evcodes must be a NumPy array, not {type(evcodes)}.\")\n\n if ticks.shape != evcodes.shape:\n raise ValueError(\n f\"ticks and evcodes should have equal shape:\\n\"\n f\"ticks is {ticks.shape}, evcodes is {evcodes.shape}\"\n )\n\n\ndef build_event_table(h5_fname, code_map, header_map_f):\n \"\"\"Construct an event table from the provided codemap and header map file.\n\n Parameters\n ----------\n h5_fname : str\n HDF5 file name\n code_map : pandas DataFrame\n DataFrame containing at least columns Index and regexp. The regexp\n column specifies regular expressions describing event code patterns.\n header_map_f : str\n header map file name, to be replaced by DataFrame\n\n Returns\n -------\n event_table : pandas DataFrame\n \"\"\"\n\n with h5py.File(h5_fname, \"r\") as h5:\n\n # dblock census\n dblocks_and_paths = [\n (h5[dblock_path], dblock_path)\n for dgroup_path in h5tools.get_data_group_paths(h5_fname)\n for dblock_path in h5tools.get_dblock_paths(h5_fname, dgroup_path)\n ]\n\n # subset every dblock for nonzero event codes\n nonzero = [\n (dblock[dblock[\"log_evcodes\"] != 0], dblock_path)\n for dblock, dblock_path in dblocks_and_paths\n ]\n\n # build three dataframes\n header_df = build_header_df(dblocks_and_paths, header_map_f)\n match_df = build_match_df(nonzero, code_map)\n dblock_df = build_dblock_df(nonzero)\n\n # merge them to get the event table\n event_table = match_df.merge(\n header_df, how=\"left\", on=\"dblock_path\"\n ).merge(dblock_df, how=\"left\", on=[\"dblock_path\", \"dblock_ticks\"])\n\n # we love pandas, but we want to make sure no information is lost\n # first, we check that no rows from the match_df are missing\n assert len(match_df) == len(event_table)\n\n # second, we want to make sure the merges were complete in the sense\n # that no values are missing\n assert event_table.notnull().values.all()\n\n # finally, set epoch information\n event_table[\"epoch_match_tick_delta\"] = 0\n event_table[\"epoch_ticks\"] = 1\n\n return event_table\n\n\ndef build_match_df(dblocks_and_paths, code_map):\n \"\"\"Run pattern matcher on dblocks using codemap.\"\"\"\n\n match_dfs = (\n (\n find_evcodes(\n row.regexp, db[\"dblock_ticks\"], db[\"log_evcodes\"]\n ).assign(Index=row.Index, dblock_path=dbp)\n )\n for db, dbp in dblocks_and_paths\n for row in code_map.itertuples()\n )\n\n nonempty_match_dfs = [\n match_df for match_df in match_dfs if not match_df.empty\n ]\n\n match_df = pd.concat(nonempty_match_dfs, ignore_index=True)\n match_df = match_df.join(code_map, on=\"Index\")\n\n return match_df\n\n\ndef build_header_df(dblocks_and_paths, header_map_f):\n \"\"\"Collect header 'slicing' data from given dblocks.\"\"\"\n\n hio = mkh5.mkh5.HeaderIO()\n hio.set_slicer(header_map_f)\n\n header_data = []\n for dblock, dblock_path in dblocks_and_paths:\n hio.get(dblock)\n data = {\n **dict(hio.get_slices()),\n \"dblock_path\": dblock_path,\n \"data_group\": dblock.parent.name.lstrip(\"/\"),\n \"dblock_srate\": hio.header[\"samplerate\"],\n }\n header_data.append(data)\n\n return pd.DataFrame(header_data)\n\n\ndef build_dblock_df(dblocks_and_paths):\n \"\"\"Make a DataFrame from a subset of dblock columns.\"\"\"\n\n dblock_dfs = [\n pd.DataFrame(dblock).assign(dblock_path=dblock_path)\n for dblock, dblock_path in dblocks_and_paths\n ]\n\n cols = [\n \"dblock_ticks\",\n \"crw_ticks\",\n \"raw_evcodes\",\n \"log_evcodes\",\n \"log_ccodes\",\n \"log_flags\",\n \"dblock_path\",\n ]\n\n dblock_df = pd.concat(dblock_dfs)[cols]\n\n return dblock_df\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_excel",
"pandas.read_table",
"pandas.concat"
]
] |
sandialabs/slycat
|
[
"690e1cb07a6fa990d7206265e18edb22ae3f62e7"
] |
[
"web-client/slycat/web/client/cca_random.py"
] |
[
"#!/bin/env python\n# Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC . Under the terms of Contract\n# DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC, the U.S. Government\n# retains certain rights in this software.\n\n\"\"\"Demonstrates uploading data to Slycat Web Server to compute a CCA model.\n\nThis script computes a Slycat cca model using a set of semi-random observations. Use\nthis script as a starting-point for uploading your own data to a CCA model.\n\nA Slycat CCA model requires the following data, which you will have to provide\nin your own scripts:\n\n data-table A single 1D array containing M input observations with N features (array attributes).\n\"\"\"\n\nimport numpy\nimport slycat.web.client\n\n# parse arguments\ndef parse_arguments(list_input=None):\n\n # get and parse arguments for creating CCA model\n parser = slycat.web.client.ArgumentParser(description=\"Create CCA model from randomly generated data.\")\n parser.add_argument(\"--column-prefix\", default=\"a\", help=\"Column prefix. Default: %(default)s\")\n parser.add_argument(\"--constant-input-count\", type=int, default=0, help=\"Number of input columns to make constant. Default: %(default)s\")\n parser.add_argument(\"--constant-output-count\", type=int, default=0, help=\"Number of output columns to make constant. Default: %(default)s\")\n parser.add_argument(\"--duplicate-input-count\", type=int, default=0, help=\"Number of input columns to duplicate. Default: %(default)s\")\n parser.add_argument(\"--duplicate-output-count\", type=int, default=0, help=\"Number of output columns to duplicate. Default: %(default)s\")\n parser.add_argument(\"--input-count\", type=int, default=3, help=\"Input column count. Default: %(default)s\")\n parser.add_argument(\"--marking\", default=\"\", help=\"Marking type. Default: %(default)s\")\n parser.add_argument(\"--model-name\", default=\"Sample CCA Model\", help=\"New model name. Default: %(default)s\")\n parser.add_argument(\"--output-count\", type=int, default=3, help=\"Output column count. Default: %(default)s\")\n parser.add_argument(\"--project-name\", default=\"Sample CCA Project\", help=\"New project name. Default: %(default)s\")\n parser.add_argument(\"--row-count\", type=int, default=100, help=\"Row count. Default: %(default)s\")\n parser.add_argument(\"--seed\", type=int, default=12345, help=\"Random seed. Default: %(default)s\")\n parser.add_argument(\"--unused-count\", type=int, default=3, help=\"Unused column count. Default: %(default)s\")\n arguments = parser.parse_args(list_input)\n\n if arguments.input_count < 1:\n raise Exception(\"Input count must be greater-than zero.\")\n if arguments.output_count < 1:\n raise Exception(\"Output count must be greater-than zero.\")\n if arguments.constant_input_count > arguments.input_count:\n raise Exception(\"Constant input count must be less than input count.\")\n if arguments.constant_output_count > arguments.output_count:\n raise Exception(\"Constant output count must be less than output count.\")\n if arguments.duplicate_input_count >= arguments.input_count:\n raise Exception(\"Duplicate input count must be less than input count.\")\n if arguments.duplicate_output_count >= arguments.output_count:\n raise Exception(\"Duplicate output count must be less than output count.\")\n\n return arguments\n\n# create CCA model\ndef main(arguments, connection):\n\n total_columns = arguments.input_count + arguments.output_count + arguments.unused_count\n\n # Create some random data using a gaussian distribution.\n numpy.random.seed(arguments.seed)\n data = numpy.random.normal(size=(arguments.row_count, total_columns))\n\n # Force a somewhat-linear relationship between the inputs and outputs.\n for i in range(arguments.input_count, arguments.input_count + min(arguments.input_count, arguments.output_count)):\n data[:, i] = data[:, 0] ** i\n\n # Optionally make some columns constant.\n for i in range(arguments.constant_input_count):\n data[:,i] = data[0,i]\n for i in range(arguments.input_count, arguments.input_count + arguments.constant_output_count):\n data[:,i] = data[0,i]\n\n # Optionally duplicate some columns to create rank-deficient data.\n for i in range(1, 1 + arguments.duplicate_input_count):\n data[:,i] = data[:,0]\n for i in range(1 + arguments.input_count, 1 + arguments.input_count + arguments.duplicate_output_count):\n data[:,i] = data[:, arguments.input_count]\n\n # Create a new project to contain our model.\n pid = connection.find_or_create_project(arguments.project_name)\n\n # Create the new, empty model.\n mid = connection.post_project_models(pid, \"cca\", arguments.model_name, arguments.marking)\n\n # Upload our observations as \"data-table\".\n connection.put_model_arrayset(mid, \"data-table\")\n\n # Start our single \"data-table\" array.\n dimensions = [dict(name=\"row\", end=arguments.row_count)]\n attributes = [dict(name=\"%s%s\" % (arguments.column_prefix, column), type=\"float64\") for column in range(total_columns)]\n connection.put_model_arrayset_array(mid, \"data-table\", 0, dimensions, attributes)\n\n # Upload data into the array.\n for i in range(total_columns):\n connection.put_model_arrayset_data(mid, \"data-table\", \"0/%s/...\" % i, [data.T[i]])\n\n # Store the remaining parameters.\n connection.put_model_parameter(mid, \"input-columns\", list(range(0, arguments.input_count)))\n connection.put_model_parameter(mid, \"output-columns\", \n list(range(arguments.input_count, arguments.input_count + arguments.output_count)))\n connection.put_model_parameter(mid, \"scale-inputs\", False)\n\n # Signal that we're done uploading data to the model. This lets Slycat Web\n # Server know that it can start computation.\n connection.post_model_finish(mid)\n # Wait until the model is ready.\n connection.join_model(mid)\n\n # Supply the user with a direct link to the new model.\n host = arguments.host\n if arguments.port:\n host = host + \":\" + arguments.port\n slycat.web.client.log.info(\"Your new model is located at %s/models/%s\" % (host, mid))\n\n return mid\n\n# command line entry point\nif __name__ == \"__main__\":\n \n # set up parser and get arguments\n arguments = parse_arguments()\n\n # connect and get projects\n connection = slycat.web.client.connect(arguments)\n\n # list projects\n main(arguments, connection)\n"
] |
[
[
"numpy.random.seed",
"numpy.random.normal"
]
] |
ZhaoyangLyu/Point_Diffusion_Refinement
|
[
"9747265a5f141e5546fd4f862bfa66aa59f1bd33"
] |
[
"pointnet2/models/pvd/modules/pointnet.py"
] |
[
"import torch\nimport torch.nn as nn\n\nimport modules.functional as F\nfrom modules.ball_query import BallQuery\nfrom modules.shared_mlp import SharedMLP\n\n__all__ = ['PointNetAModule', 'PointNetSAModule', 'PointNetFPModule']\n\n\nclass PointNetAModule(nn.Module):\n def __init__(self, in_channels, out_channels, include_coordinates=True):\n super().__init__()\n if not isinstance(out_channels, (list, tuple)):\n out_channels = [[out_channels]]\n elif not isinstance(out_channels[0], (list, tuple)):\n out_channels = [out_channels]\n\n mlps = []\n total_out_channels = 0\n for _out_channels in out_channels:\n mlps.append(\n SharedMLP(in_channels=in_channels + (3 if include_coordinates else 0),\n out_channels=_out_channels, dim=1)\n )\n total_out_channels += _out_channels[-1]\n\n self.include_coordinates = include_coordinates\n self.out_channels = total_out_channels\n self.mlps = nn.ModuleList(mlps)\n\n def forward(self, inputs):\n features, coords = inputs\n if self.include_coordinates:\n features = torch.cat([features, coords], dim=1)\n coords = torch.zeros((coords.size(0), 3, 1), device=coords.device)\n if len(self.mlps) > 1:\n features_list = []\n for mlp in self.mlps:\n features_list.append(mlp(features).max(dim=-1, keepdim=True).values)\n return torch.cat(features_list, dim=1), coords\n else:\n return self.mlps[0](features).max(dim=-1, keepdim=True).values, coords\n\n def extra_repr(self):\n return f'out_channels={self.out_channels}, include_coordinates={self.include_coordinates}'\n\n\nclass PointNetSAModule(nn.Module):\n def __init__(self, num_centers, radius, num_neighbors, in_channels, out_channels, include_coordinates=True):\n super().__init__()\n if not isinstance(radius, (list, tuple)):\n radius = [radius]\n if not isinstance(num_neighbors, (list, tuple)):\n num_neighbors = [num_neighbors] * len(radius)\n assert len(radius) == len(num_neighbors)\n if not isinstance(out_channels, (list, tuple)):\n out_channels = [[out_channels]] * len(radius)\n elif not isinstance(out_channels[0], (list, tuple)):\n out_channels = [out_channels] * len(radius)\n assert len(radius) == len(out_channels)\n\n groupers, mlps = [], []\n total_out_channels = 0\n for _radius, _out_channels, _num_neighbors in zip(radius, out_channels, num_neighbors):\n groupers.append(\n BallQuery(radius=_radius, num_neighbors=_num_neighbors, include_coordinates=include_coordinates)\n )\n mlps.append(\n SharedMLP(in_channels=in_channels + (3 if include_coordinates else 0),\n out_channels=_out_channels, dim=2)\n )\n total_out_channels += _out_channels[-1]\n\n self.num_centers = num_centers\n self.out_channels = total_out_channels\n self.groupers = nn.ModuleList(groupers)\n self.mlps = nn.ModuleList(mlps)\n\n def forward(self, inputs):\n features, coords, temb = inputs\n centers_coords = F.furthest_point_sample(coords, self.num_centers)\n features_list = []\n for grouper, mlp in zip(self.groupers, self.mlps):\n features, temb = mlp(grouper(coords, centers_coords, temb, features))\n features_list.append(features.max(dim=-1).values)\n if len(features_list) > 1:\n return features_list[0], centers_coords, temb.max(dim=-1).values if temb.shape[1] > 0 else temb\n else:\n return features_list[0], centers_coords, temb.max(dim=-1).values if temb.shape[1] > 0 else temb\n\n def extra_repr(self):\n return f'num_centers={self.num_centers}, out_channels={self.out_channels}'\n\n\nclass PointNetFPModule(nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.mlp = SharedMLP(in_channels=in_channels, out_channels=out_channels, dim=1)\n\n def forward(self, inputs):\n if len(inputs) == 3:\n points_coords, centers_coords, centers_features, temb = inputs\n points_features = None\n else:\n points_coords, centers_coords, centers_features, points_features, temb = inputs\n interpolated_features = F.nearest_neighbor_interpolate(points_coords, centers_coords, centers_features)\n interpolated_temb = F.nearest_neighbor_interpolate(points_coords, centers_coords, temb)\n if points_features is not None:\n interpolated_features = torch.cat(\n [interpolated_features, points_features], dim=1\n )\n return self.mlp(interpolated_features), points_coords, interpolated_temb\n"
] |
[
[
"torch.cat",
"torch.nn.ModuleList"
]
] |
cabrittin/volumetric_analysis
|
[
"82004378abae963ef02858bf4711786dad76f133",
"82004378abae963ef02858bf4711786dad76f133"
] |
[
"scripts/varshney_hierarchy.py",
"scripts/logistic_test.py"
] |
[
"\"\"\"\nvarshney_hierarchy.py\n\n\n\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nfrom connectome.load import load_lite\nimport networks.hierarchy as hierarchy\nfrom figures.hierarchy import plot_varshney\nimport aux\n\nDB = 'N2U'\nGROUP = './mat/n2u_class_group.txt'\nREMOVE = './mat/n2u_remove.txt'\nTHRESH = 0.1\nL0 = ['MUBODY','MUHEAD','anal','int','intestine','sph','um','vm']\ncol = {'S':'r','I':'b','M':'#D7DF01','Mu':'#8904B1'}\nNCLASS = './mat/n2u_class_simmu.txt'\n\ndef run():\n _group = aux.read.into_map(GROUP)\n group = {'map':_group,'key':'class'}\n remove = aux.read.into_map(REMOVE)\n C = load_lite(DB,chemical=True,electrical=True,\n group=group,remove=remove,\n dataType='networkx')\n C.combine_chem_and_elec()\n H = hierarchy.hierarchy(C.D,L0,THRESH)\n print(H)\n nodes,xyz = hierarchy.varshney_modified(H,C.D)\n nclass = aux.read.into_dict(NCLASS)\n fig,ax = plt.subplots(1,1,figsize=(20,10))\n plot_varshney(ax,nodes,xyz,C.D.edges(),nclass,col)\n plt.show()\n \nif __name__ == '__main__':\n run()\n\n\n \n",
"\"\"\"\ndist_adj_weight_decision.py\n\nPredicted number of synaptic connections for each cell compared \nto the actual number. Predictions made using a logistic regression \nclassifier model. Red line indicates perfect agreement between predicted \nand actual values. The residual is the distance from the data point to \nthe line. Colors indicate the probability of observing a residual\nas large or larger. p adj is a representative probability for all data \npoints, computed using multiple hypothesis testing.\n\ncreated: Christopher Brittin\ndate: 01 November 2018\n\n\"\"\"\nimport sys\nsys.path.append(r'./volumetric_analysis')\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.special import digamma\n\nimport matplotlib as mpl\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.model_selection import cross_val_score\n\n\nfrom connectome.load import from_db\nfrom networks.stats import get_corresponding_edge_attr\nfrom models.mutual_info import *\nfrom figures.stats import plot_adj_syn_mi\nfrom models.mht import *\nimport aux\n\nmpl.rcParams['xtick.labelsize'] = 32\nmpl.rcParams['ytick.labelsize'] = 32\n\ndb = 'N2U'\nremove = ['VC01','VD01','VB01','VB02']\nSCALE = 5*90*(1e-6)\nKMAX = 100\nTHETA = 0\n\ndef get_data(G1,G2):\n #Get edge weights\n N = G2.ecount()\n data = np.zeros((N,2))\n for i in range(N):\n e = G2.es[i]\n data[i,0] = e['weight']\n if G1.are_connected(e.source,e.target):\n w = G1.es[G1.get_eid(e.source,e.target)]['weight']\n if w > THETA: \n data[i,1] = 1\n data[:,0] *= SCALE\n data[:,0] = np.log(data[:,0])\n \n return data\n\ndef result_data(G1,G2,model):\n N = G2.vcount()\n data = np.zeros((N,4))\n for i in range(N):\n size = G2.degree(i)\n #actual = C.C.degree(i)\n actual = 0\n for e in G1.incident(i):\n if G1.es[e]['weight'] > THETA:\n actual += 1\n p = actual / float(size)\n var = size*p*(1-p)\n w = np.log(np.array(G2.es[G2.incident(i)]['weight']).reshape(-1,1)*SCALE)\n predict = np.sum(model.predict(w))\n #w[w >= 1.28] = 1\n #w[w < 1] = 0\n #predict = np.sum(w)\n data[i,:] = [size,actual,predict,var]\n \n \n return data\n\ndef run(fout=None,source_data = None):\n C = from_db(db,adjacency=True,chemical=True,electrical=True,remove=remove)\n C.C.reduce_to(C.A)\n C.E.reduce_to(C.A)\n N = C.C.ecount()\n\n C.C.to_undirected(combine_edges=sum)\n\n data = get_data(C.C,C.A)\n data = data[data[:,0].argsort()]\n\n X = data[:,0].reshape(-1,1)\n y = np.ravel(data[:,1])\n _x = np.linspace(-4,4,81).reshape(-1,1)\n\n # instantiate a logistic regression model, and fit with X and y\n model = LogisticRegression()\n model = model.fit(X, y)\n\n # check the accuracy on the training set\n print(model.score(X, y))\n print(y.mean())\n #print(model.predict_proba(_x))\n\n _data = result_data(C.C,C.A,model)\n if source_data:\n dout = []\n for i in range(C.A.vcount()):\n dout.append([C.A.vs[i]['name']] + _data[i,:].tolist())\n aux.write.from_list(source_data,dout)\n \n \"\"\"\n plt.figure()\n plt.plot(data[:,1],data[:,2],'bo')\n plt.plot([0,50],[0,50],'r-',linewidth=3)\n plt.xlim([0,50])\n plt.ylim([0,50])\n plt.show()\n \"\"\"\n\n fig,ax = plt.subplots(1,1,figsize=(15,10))\n plot_actual_vs_predict(ax,_data,colorbar=True)\n ax.set_xlim([0,50])\n ax.set_ylim([0,50])\n ax.set_title('Predicted number of synaptic connections per cell',\n fontsize=32,y=1.04)\n ax.set_xlabel('# actual connections',fontsize=32)\n ax.set_ylabel('# predicted connections',fontsize=32)\n plt.tight_layout()\n if fout: plt.savefig(fout)\n plt.show()\n\n \"\"\"\n # evaluate the model by splitting into train and test sets\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,random_state=0)\n model2 = LogisticRegression()\n model2.fit(X_train, y_train)\n\n # predict class labels for the test set\n predicted = model2.predict(X_test)\n print(predicted)\n\n # generate class probabilities\n probs = model2.predict_proba(X_test)\n print(probs)\n \n # generate evaluation metrics\n print(metrics.accuracy_score(y_test, predicted))\n print(metrics.roc_auc_score(y_test, probs[:, 1]))\n\n print(metrics.confusion_matrix(y_test, predicted))\n print(metrics.classification_report(y_test, predicted))\n \n # evaluate the model using 10-fold cross-validation\n scores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10)\n print(scores)\n print(scores.mean())\n \"\"\"\n\nif __name__ == \"__main__\":\n run()\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
],
[
"numpy.log",
"numpy.zeros",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots",
"sklearn.linear_model.LogisticRegression",
"numpy.ravel",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"numpy.linspace"
]
] |
pawandeep2155/FixMatch-pytorch
|
[
"d86dc6d85d3d69518c100edb93b2fc13354191cb"
] |
[
"train.py"
] |
[
"import argparse\nimport logging\nimport math\nimport os\nimport random\nimport shutil\nimport time\nfrom collections import OrderedDict\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\n\nfrom dataset.cifar import DATASET_GETTERS\nfrom utils import AverageMeter, accuracy\n\nlogger = logging.getLogger(__name__)\nbest_acc = 0\n\n\ndef save_checkpoint(state, is_best, checkpoint, filename='checkpoint.pth.tar'):\n filepath = os.path.join(checkpoint, filename)\n torch.save(state, filepath)\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint,\n 'model_best.pth.tar'))\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef get_cosine_schedule_with_warmup(optimizer,\n num_warmup_steps,\n num_training_steps,\n num_cycles=7. / 16.,\n last_epoch=-1):\n def _lr_lambda(current_step):\n if current_step < num_warmup_steps:\n return float(current_step) / float(max(1, num_warmup_steps))\n no_progress = float(current_step - num_warmup_steps) / \\\n float(max(1, num_training_steps - num_warmup_steps))\n return max(0., math.cos(math.pi * num_cycles * no_progress))\n\n return LambdaLR(optimizer, _lr_lambda, last_epoch)\n\n\ndef interleave(x, size):\n s = list(x.shape)\n return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])\n\n\ndef de_interleave(x, size):\n s = list(x.shape)\n return x.reshape([size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])\n\n\ndef main():\n parser = argparse.ArgumentParser(description='PyTorch FixMatch Training')\n parser.add_argument('--gpu-id', default='0', type=int,\n help='id(s) for CUDA_VISIBLE_DEVICES')\n parser.add_argument('--num-workers', type=int, default=4,\n help='number of workers')\n parser.add_argument('--dataset', default='filtered1500', type=str,\n choices=['cifar10', 'cifar100', 'filtered1500'],\n help='dataset name')\n parser.add_argument('--num-labeled', type=int, default=2000,\n help='number of labeled data')\n parser.add_argument(\"--expand-labels\", action=\"store_true\",\n help=\"expand labels to fit eval steps\")\n parser.add_argument('--arch', default='wideresnet', type=str,\n choices=['wideresnet', 'resnext'],\n help='dataset name')\n parser.add_argument('--total-steps', default=2 ** 20, type=int,\n help='number of total steps to run')\n parser.add_argument('--eval-step', default=1024, type=int,\n help='number of eval steps to run')\n parser.add_argument('--start-epoch', default=0, type=int,\n help='manual epoch number (useful on restarts)')\n parser.add_argument('--batch-size', default=64, type=int,\n help='train batchsize')\n parser.add_argument('--lr', '--learning-rate', default=0.03, type=float,\n help='initial learning rate')\n parser.add_argument('--warmup', default=0, type=float,\n help='warmup epochs (unlabeled data based)')\n parser.add_argument('--wdecay', default=5e-4, type=float,\n help='weight decay')\n parser.add_argument('--nesterov', action='store_true', default=True,\n help='use nesterov momentum')\n parser.add_argument('--use-ema', action='store_true', default=True,\n help='use EMA model')\n parser.add_argument('--ema-decay', default=0.999, type=float,\n help='EMA decay rate')\n parser.add_argument('--mu', default=7, type=int,\n help='coefficient of unlabeled batch size')\n parser.add_argument('--lambda-u', default=1, type=float,\n help='coefficient of unlabeled loss')\n parser.add_argument('--T', default=1, type=float,\n help='pseudo label temperature')\n parser.add_argument('--threshold', default=0.95, type=float,\n help='pseudo label threshold')\n parser.add_argument('--out', default='result',\n help='directory to output the result')\n parser.add_argument('--resume', default='', type=str,\n help='path to latest checkpoint (default: none)')\n parser.add_argument('--seed', default=None, type=int,\n help=\"random seed\")\n parser.add_argument(\"--amp\", action=\"store_true\",\n help=\"use 16-bit (mixed) precision through NVIDIA apex AMP\")\n parser.add_argument(\"--opt_level\", type=str, default=\"O1\",\n help=\"apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\")\n parser.add_argument(\"--local_rank\", type=int, default=-1,\n help=\"For distributed training: local_rank\")\n parser.add_argument('--no-progress', action='store_true',\n help=\"don't use progress bar\")\n\n args = parser.parse_args()\n global best_acc\n\n def create_model(args):\n if args.arch == 'wideresnet':\n import models.wideresnet as models\n model = models.build_wideresnet(depth=args.model_depth,\n widen_factor=args.model_width,\n dropout=0,\n num_classes=args.num_classes)\n elif args.arch == 'resnext':\n import models.resnext as models\n model = models.build_resnext(cardinality=args.model_cardinality,\n depth=args.model_depth,\n width=args.model_width,\n num_classes=args.num_classes)\n logger.info(\"Total params: {:.2f}M\".format(\n sum(p.numel() for p in model.parameters()) / 1e6))\n return model\n\n if args.local_rank == -1:\n device = torch.device('cuda', args.gpu_id)\n args.world_size = 1\n args.n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device('cuda', args.local_rank)\n torch.distributed.init_process_group(backend='nccl')\n args.world_size = torch.distributed.get_world_size()\n args.n_gpu = 1\n\n args.device = device\n\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)\n\n logger.warning(\n f\"Process rank: {args.local_rank}, \"\n f\"device: {args.device}, \"\n f\"n_gpu: {args.n_gpu}, \"\n f\"distributed training: {bool(args.local_rank != -1)}, \"\n f\"16-bits training: {args.amp}\", )\n\n logger.info(dict(args._get_kwargs()))\n\n if args.seed is not None:\n set_seed(args)\n\n if args.local_rank in [-1, 0]:\n os.makedirs(args.out, exist_ok=True)\n args.writer = SummaryWriter(args.out)\n\n if args.dataset == 'cifar10':\n args.num_classes = 10\n if args.arch == 'wideresnet':\n args.model_depth = 28\n args.model_width = 2\n elif args.arch == 'resnext':\n args.model_cardinality = 4\n args.model_depth = 28\n args.model_width = 4\n\n elif args.dataset == 'cifar100':\n args.num_classes = 100\n if args.arch == 'wideresnet':\n args.model_depth = 28\n args.model_width = 8\n elif args.arch == 'resnext':\n args.model_cardinality = 8\n args.model_depth = 29\n args.model_width = 64\n elif args.dataset == 'filtered1500':\n args.num_classes = 8\n if args.arch == 'wideresnet':\n args.model_depth = 28\n args.model_width = 2\n elif args.arch == 'resnext':\n args.model_cardinality = 4\n args.model_depth = 28\n args.model_width = 4\n\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier()\n\n labeled_dataset, unlabeled_dataset, test_dataset = DATASET_GETTERS[args.dataset](\n args, './data')\n\n if args.local_rank == 0:\n torch.distributed.barrier()\n\n train_sampler = RandomSampler if args.local_rank == -1 else DistributedSampler\n\n labeled_trainloader = DataLoader(\n labeled_dataset,\n sampler=train_sampler(labeled_dataset),\n batch_size=args.batch_size,\n num_workers=args.num_workers,\n drop_last=True)\n\n unlabeled_trainloader = DataLoader(\n unlabeled_dataset,\n sampler=train_sampler(unlabeled_dataset),\n batch_size=args.batch_size * args.mu,\n num_workers=args.num_workers,\n drop_last=True)\n\n test_loader = DataLoader(\n test_dataset,\n sampler=SequentialSampler(test_dataset),\n batch_size=args.batch_size,\n num_workers=args.num_workers)\n\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier()\n\n model = create_model(args)\n\n if args.local_rank == 0:\n torch.distributed.barrier()\n\n model.to(args.device)\n\n no_decay = ['bias', 'bn']\n grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(\n nd in n for nd in no_decay)], 'weight_decay': args.wdecay},\n {'params': [p for n, p in model.named_parameters() if any(\n nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = optim.SGD(grouped_parameters, lr=args.lr,\n momentum=0.9, nesterov=args.nesterov)\n\n args.epochs = math.ceil(args.total_steps / args.eval_step)\n # args.epochs = 2\n scheduler = get_cosine_schedule_with_warmup(\n optimizer, args.warmup, args.total_steps)\n\n if args.use_ema:\n from models.ema import ModelEMA\n ema_model = ModelEMA(args, model, args.ema_decay)\n\n args.start_epoch = 0\n\n if args.resume:\n logger.info(\"==> Resuming from checkpoint..\")\n assert os.path.isfile(\n args.resume), \"Error: no checkpoint directory found!\"\n args.out = os.path.dirname(args.resume)\n checkpoint = torch.load(args.resume)\n best_acc = checkpoint['best_acc']\n args.start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n if args.use_ema:\n ema_model.ema.load_state_dict(checkpoint['ema_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n\n if args.amp:\n from apex import amp\n model, optimizer = amp.initialize(\n model, optimizer, opt_level=args.opt_level)\n\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank],\n output_device=args.local_rank, find_unused_parameters=True)\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Task = {args.dataset}@{args.num_labeled}\")\n logger.info(f\" Num Epochs = {args.epochs}\")\n logger.info(f\" Batch size per GPU = {args.batch_size}\")\n logger.info(\n f\" Total train batch size = {args.batch_size * args.world_size}\")\n logger.info(f\" Total optimization steps = {args.total_steps}\")\n\n model.zero_grad()\n train(args, labeled_trainloader, unlabeled_trainloader, test_loader,\n model, optimizer, ema_model, scheduler)\n\n\ndef train(args, labeled_trainloader, unlabeled_trainloader, test_loader,\n model, optimizer, ema_model, scheduler):\n if args.amp:\n from apex import amp\n global best_acc\n test_accs = []\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n losses_x = AverageMeter()\n losses_u = AverageMeter()\n mask_probs = AverageMeter()\n end = time.time()\n\n if args.world_size > 1:\n labeled_epoch = 0\n unlabeled_epoch = 0\n labeled_trainloader.sampler.set_epoch(labeled_epoch)\n unlabeled_trainloader.sampler.set_epoch(unlabeled_epoch)\n\n labeled_iter = iter(labeled_trainloader)\n unlabeled_iter = iter(unlabeled_trainloader)\n\n model.train()\n for epoch in range(args.start_epoch, args.epochs):\n if args.world_size > 1:\n pass\n if not args.no_progress:\n p_bar = tqdm(range(args.eval_step),\n disable=args.local_rank not in [-1, 0])\n for batch_idx in range(args.eval_step):\n try:\n inputs_x, targets_x = labeled_iter.next()\n except:\n if args.world_size > 1:\n labeled_epoch += 1\n labeled_trainloader.sampler.set_epoch(labeled_epoch)\n labeled_iter = iter(labeled_trainloader)\n inputs_x, targets_x = labeled_iter.next()\n\n try:\n (inputs_u_w, inputs_u_s), _ = unlabeled_iter.next()\n except:\n if args.world_size > 1:\n unlabeled_epoch += 1\n unlabeled_trainloader.sampler.set_epoch(unlabeled_epoch)\n unlabeled_iter = iter(unlabeled_trainloader)\n (inputs_u_w, inputs_u_s), _ = unlabeled_iter.next()\n\n data_time.update(time.time() - end)\n batch_size = inputs_x.shape[0]\n inputs = interleave(\n torch.cat((inputs_x, inputs_u_w, inputs_u_s)), 2 * args.mu + 1).to(args.device)\n targets_x = targets_x.to(args.device)\n logits = model(inputs)\n logits = de_interleave(logits, 2 * args.mu + 1)\n logits_x = logits[:batch_size]\n logits_u_w, logits_u_s = logits[batch_size:].chunk(2)\n del logits\n\n Lx = F.cross_entropy(logits_x, targets_x, reduction='mean')\n\n pseudo_label = torch.softmax(logits_u_w.detach() / args.T, dim=-1)\n max_probs, targets_u = torch.max(pseudo_label, dim=-1)\n mask = max_probs.ge(args.threshold).float()\n\n Lu = (F.cross_entropy(logits_u_s, targets_u,\n reduction='none') * mask).mean()\n\n loss = Lx + args.lambda_u * Lu\n\n if args.amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n losses.update(loss.item())\n losses_x.update(Lx.item())\n losses_u.update(Lu.item())\n optimizer.step()\n scheduler.step()\n if args.use_ema:\n ema_model.update(model)\n model.zero_grad()\n\n batch_time.update(time.time() - end)\n end = time.time()\n mask_probs.update(mask.mean().item())\n if not args.no_progress:\n p_bar.set_description(\n \"Train Epoch: {epoch}/{epochs:4}. Iter: {batch:4}/{iter:4}. LR: {lr:.4f}. Data: {data:.3f}s. Batch: {bt:.3f}s. Loss: {loss:.4f}. Loss_x: {loss_x:.4f}. Loss_u: {loss_u:.4f}. Mask: {mask:.2f}. \".format(\n epoch=epoch + 1,\n epochs=args.epochs,\n batch=batch_idx + 1,\n iter=args.eval_step,\n lr=scheduler.get_last_lr()[0],\n data=data_time.avg,\n bt=batch_time.avg,\n loss=losses.avg,\n loss_x=losses_x.avg,\n loss_u=losses_u.avg,\n mask=mask_probs.avg))\n p_bar.update()\n\n if not args.no_progress:\n p_bar.close()\n\n if args.use_ema:\n test_model = ema_model.ema\n else:\n test_model = model\n\n if args.local_rank in [-1, 0]:\n test_loss, test_acc = test(args, test_loader, test_model, epoch)\n\n args.writer.add_scalar('train/1.train_loss', losses.avg, epoch)\n args.writer.add_scalar('train/2.train_loss_x', losses_x.avg, epoch)\n args.writer.add_scalar('train/3.train_loss_u', losses_u.avg, epoch)\n args.writer.add_scalar('train/4.mask', mask_probs.avg, epoch)\n args.writer.add_scalar('test/1.test_acc', test_acc, epoch)\n args.writer.add_scalar('test/2.test_loss', test_loss, epoch)\n\n is_best = test_acc > best_acc\n best_acc = max(test_acc, best_acc)\n\n model_to_save = model.module if hasattr(model, \"module\") else model\n if args.use_ema:\n ema_to_save = ema_model.ema.module if hasattr(\n ema_model.ema, \"module\") else ema_model.ema\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model_to_save.state_dict(),\n 'ema_state_dict': ema_to_save.state_dict() if args.use_ema else None,\n 'acc': test_acc,\n 'best_acc': best_acc,\n 'optimizer': optimizer.state_dict(),\n 'scheduler': scheduler.state_dict(),\n }, is_best, args.out)\n\n test_accs.append(test_acc)\n logger.info('Best top-1 acc: {:.2f}'.format(best_acc))\n logger.info('Mean top-1 acc: {:.2f}\\n'.format(\n np.mean(test_accs[-20:])))\n\n if args.local_rank in [-1, 0]:\n args.writer.close()\n\n\ndef test(args, test_loader, model, epoch):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n end = time.time()\n\n if not args.no_progress:\n test_loader = tqdm(test_loader,\n disable=args.local_rank not in [-1, 0])\n\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(test_loader):\n data_time.update(time.time() - end)\n model.eval()\n\n inputs = inputs.to(args.device)\n targets = targets.to(args.device)\n outputs = model(inputs)\n loss = F.cross_entropy(outputs, targets)\n\n prec1, prec5 = accuracy(outputs, targets, topk=(1, 3))\n losses.update(loss.item(), inputs.shape[0])\n top1.update(prec1.item(), inputs.shape[0])\n top5.update(prec5.item(), inputs.shape[0])\n batch_time.update(time.time() - end)\n end = time.time()\n if not args.no_progress:\n test_loader.set_description(\n \"Test Iter: {batch:4}/{iter:4}. Data: {data:.3f}s. Batch: {bt:.3f}s. Loss: {loss:.4f}. top1: {top1:.2f}. top5: {top5:.2f}. \".format(\n batch=batch_idx + 1,\n iter=len(test_loader),\n data=data_time.avg,\n bt=batch_time.avg,\n loss=losses.avg,\n top1=top1.avg,\n top5=top5.avg,\n ))\n if not args.no_progress:\n test_loader.close()\n\n logger.info(\"top-1 acc: {:.2f}\".format(top1.avg))\n logger.info(\"top-5 acc: {:.2f}\".format(top5.avg))\n return losses.avg, top1.avg\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.distributed.get_world_size",
"torch.cat",
"numpy.mean",
"torch.nn.functional.cross_entropy",
"torch.load",
"torch.distributed.init_process_group",
"torch.manual_seed",
"torch.utils.tensorboard.SummaryWriter",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.max",
"torch.save",
"torch.optim.SGD",
"torch.utils.data.SequentialSampler",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.distributed.barrier",
"numpy.random.seed",
"torch.no_grad",
"torch.optim.lr_scheduler.LambdaLR"
]
] |
sandutsar/jax
|
[
"409684f9c2d72a93b0d242afde79fc424ede5250"
] |
[
"jax/_src/numpy/lax_numpy.py"
] |
[
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# pytype: skip-file\n\"\"\"\nImplements the NumPy API, using the primitives in :mod:`jax.lax`.\n\nNumPy operations are implemented in Python in terms of the primitive operations\nin :mod:`jax.lax`. Since NumPy operations are not primitive and instead are\nimplemented in terms of :mod:`jax.lax` operations, we do not need to define\ntransformation rules such as gradient or batching rules. Instead,\ntransformations for NumPy primitives can be derived from the transformation\nrules for the underlying :code:`lax` primitives.\n\"\"\"\n\nimport builtins\nimport collections\nimport collections.abc\nimport operator\nimport types\nfrom typing import Any, Sequence, FrozenSet, Optional, Tuple, Union, cast\nfrom textwrap import dedent as _dedent\nimport warnings\n\nimport numpy as np\nimport opt_einsum\n\nimport jax\nfrom jax import jit, custom_jvp\nfrom .vectorize import vectorize\nfrom .util import _wraps\nfrom jax import core\nfrom jax._src import dtypes\nfrom jax import errors\nfrom jax.core import UnshapedArray, ShapedArray, ConcreteArray, canonicalize_shape\nfrom jax.config import config\nfrom jax.interpreters.xla import DeviceArray, _DeviceArray, _CppDeviceArray\nfrom jax import lax\nfrom jax._src.lax.lax import _device_put_raw\nfrom jax import ops\nfrom jax._src.ops import scatter\nfrom jax._src.util import (partial, unzip2, prod as _prod, subvals, safe_zip, ceil_of_ratio,\n canonicalize_axis as _canonicalize_axis, maybe_named_axis)\nfrom jax.tree_util import tree_leaves, tree_flatten, tree_map\n\nnewaxis = None\n\n# Common docstring additions:\n\n_PRECISION_DOC = \"\"\"\\\nIn addition to the original NumPy arguments listed below, also supports\n``precision`` for extra control over matrix-multiplication precision\non supported devices. ``precision`` may be set to ``None``, which means\ndefault precision for the backend, a ``lax.Precision`` enum value\n(``Precision.DEFAULT``, ``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple\nof two ``lax.Precision`` enums indicating separate precision for each argument.\n\"\"\"\n\n# We replace some builtin names to follow Numpy's API, so we capture here.\n_abs = builtins.abs\n_all = builtins.all\n_any = builtins.any\n_max = builtins.max\n_min = builtins.min\n_sum = builtins.sum\n_divmod = builtins.divmod\n\n# NumPy constants\n\npi = np.pi\ne = np.e\neuler_gamma = np.euler_gamma\ninf = np.inf\nNINF = np.NINF\nPZERO = np.PZERO\nNZERO = np.NZERO\nnan = np.nan\n\n# And some numpy utility functions\nset_printoptions = np.set_printoptions\n\n# We want isinstance(x, np.ndarray) checks in user code to work with the our\n# array-like types, including DeviceArray and UnshapedArray (i.e. the abstract\n# array base class). We can override the isinstance behavior directly, without\n# having the complexity of multiple inheritance on those classes, by defining\n# the ndarray class to have a metaclass with special __instancecheck__ behavior.\n_arraylike_types = (np.ndarray, UnshapedArray, DeviceArray)\n\nclass _ArrayMeta(type(np.ndarray)): # type: ignore\n \"\"\"Metaclass for overriding ndarray isinstance checks.\"\"\"\n\n def __instancecheck__(self, instance):\n try:\n return isinstance(instance.aval, _arraylike_types)\n except AttributeError:\n return isinstance(instance, _arraylike_types)\n\nclass ndarray(np.ndarray, metaclass=_ArrayMeta):\n dtype: np.dtype\n shape: Tuple[int, ...]\n size: int\n\n def __init__(shape, dtype=None, buffer=None, offset=0, strides=None,\n order=None):\n raise TypeError(\"jax.numpy.ndarray() should not be instantiated explicitly.\"\n \" Use jax.numpy.array, or jax.numpy.zeros instead.\")\n\n\niscomplexobj = np.iscomplexobj\n\nshape = _shape = np.shape\nndim = _ndim = np.ndim\nsize = np.size\n_dtype = dtypes.result_type\n\n# At present JAX doesn't have a reason to distinguish between scalars and arrays\n# in its object system. Further, we want JAX scalars to have the same type\n# promotion behaviors as JAX arrays. Rather than introducing a new type of JAX\n# scalar object with JAX promotion behaviors, instead we make the JAX scalar\n# types return JAX arrays when instantiated.\n\nclass _ScalarMeta(type):\n def __hash__(self):\n return hash(self.dtype.type)\n\n def __eq__(self, other):\n return id(self) == id(other) or self.dtype.type == other\n\n def __ne__(self, other):\n return not (self == other)\n\n def __call__(self, x):\n return array(x, dtype=self.dtype)\n\n def __instancecheck__(self, instance):\n return isinstance(instance, self.dtype.type)\n\ndef _make_scalar_type(np_scalar_type):\n return _ScalarMeta(np_scalar_type.__name__, (object,),\n {\"dtype\": np.dtype(np_scalar_type)})\n\nbool_ = _make_scalar_type(np.bool_)\nuint8 = _make_scalar_type(np.uint8)\nuint16 = _make_scalar_type(np.uint16)\nuint32 = _make_scalar_type(np.uint32)\nuint64 = _make_scalar_type(np.uint64)\nint8 = _make_scalar_type(np.int8)\nint16 = _make_scalar_type(np.int16)\nint32 = _make_scalar_type(np.int32)\nint64 = _make_scalar_type(np.int64)\nbfloat16 = _make_scalar_type(dtypes.bfloat16)\nfloat16 = _make_scalar_type(np.float16)\nfloat32 = single = _make_scalar_type(np.float32)\nfloat64 = double = _make_scalar_type(np.float64)\ncomplex64 = csingle = _make_scalar_type(np.complex64)\ncomplex128 = cdouble = _make_scalar_type(np.complex128)\n\nint_ = int32 if dtypes.int_ == np.int32 else int64\nfloat_ = float32 if dtypes.float_ == np.float32 else float64\ncomplex_ = complex64 if dtypes.complex_ == np.complex64 else complex128\n\nnumber = np.number\ninexact = np.inexact\ncomplexfloating = np.complexfloating\nfloating = np.floating\ninteger = np.integer\nsignedinteger = np.signedinteger\nunsignedinteger = np.unsignedinteger\n\nflexible = np.flexible\ncharacter = np.character\nobject_ = np.object_\n\niinfo = dtypes.iinfo\nfinfo = dtypes.finfo\n\ndtype = np.dtype\ncan_cast = dtypes.can_cast\nissubsctype = dtypes.issubsctype\npromote_types = dtypes.promote_types\n\nComplexWarning = np.ComplexWarning\n\narray_str = np.array_str\narray_repr = np.array_repr\n\nsave = np.save\nsavez = np.savez\nload = np.load\n\n\n### utility functions\n\n_DEFAULT_TYPEMAP = {\n np.bool_: bool_,\n np.int_: int_,\n np.float_: float_,\n np.complex_: complex_\n}\n\n_INT_DTYPES = {\n 16: np.int16,\n 32: np.int32,\n 64: np.int64,\n}\n\ndef _np_array(obj, dtype=None, **kwargs):\n \"\"\"Return a properly-typed numpy array.\n\n `_np_array(obj, **kwds)` is equivalent to `np.array(obj, **kwds)`, with the\n exception that when obj.dtype is not defined and dtype is not specified, it\n uses Jax's default dtypes.\n \"\"\"\n arr = np.array(obj, dtype=dtype, **kwargs)\n obj_dtype = getattr(obj, 'dtype', None)\n arr_dtype = np.dtype(arr.dtype).type\n if dtype is None and obj_dtype is None and arr_dtype in _DEFAULT_TYPEMAP:\n arr = arr.astype(_DEFAULT_TYPEMAP[arr_dtype])\n return arr\n\n_np_asarray = partial(_np_array, copy=False)\n\ndef _promote_shapes(fun_name, *args):\n \"\"\"Prepend implicit leading singleton dimensions for Numpy broadcasting.\"\"\"\n if len(args) < 2:\n return args\n else:\n shapes = [shape(arg) for arg in args]\n nonscalar_ranks = [len(shp) for shp in shapes if shp]\n if not nonscalar_ranks or len(set(nonscalar_ranks)) == 1:\n return args\n else:\n if config.jax_numpy_rank_promotion != \"allow\":\n _rank_promotion_warning_or_error(fun_name, shapes)\n result_rank = len(lax.broadcast_shapes(*shapes))\n return [broadcast_to(arg, (1,) * (result_rank - len(shp)) + shp)\n for arg, shp in zip(args, shapes)]\n\ndef _rank_promotion_warning_or_error(fun_name, shapes):\n if config.jax_numpy_rank_promotion == \"warn\":\n msg = (\"Following NumPy automatic rank promotion for {} on shapes {}. \"\n \"Set the jax_numpy_rank_promotion config option to 'allow' to \"\n \"disable this warning; for more information, see \"\n \"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.\")\n warnings.warn(msg.format(fun_name, ' '.join(map(str, shapes))))\n elif config.jax_numpy_rank_promotion == \"raise\":\n msg = (\"Operands could not be broadcast together for {} on shapes {} \"\n \"and with the config option jax_numpy_rank_promotion='raise'. \"\n \"For more information, see \"\n \"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.\")\n raise ValueError(msg.format(fun_name, ' '.join(map(str, shapes))))\n\ndef _promote_dtypes(*args):\n \"\"\"Convenience function to apply Numpy argument dtype promotion.\"\"\"\n # TODO(dougalm,mattjj): This is a performance bottleneck. Consider memoizing.\n if len(args) < 2:\n return args\n else:\n to_dtype, weak_type = dtypes._lattice_result_type(*args)\n to_dtype = dtypes.canonicalize_dtype(to_dtype)\n return [lax._convert_element_type(x, to_dtype, weak_type) for x in args]\n\ndef _promote_dtypes_inexact(*args):\n \"\"\"Convenience function to apply Numpy argument dtype promotion.\n\n Promotes arguments to an inexact type.\"\"\"\n to_dtype, weak_type = dtypes._lattice_result_type(*args)\n to_dtype = dtypes.canonicalize_dtype(to_dtype)\n to_dtype_inexact = _to_inexact_dtype(to_dtype)\n weak_type = (weak_type and to_dtype == to_dtype_inexact)\n return [lax._convert_element_type(x, to_dtype_inexact, weak_type) for x in args]\n\ndef _to_inexact_dtype(dtype):\n \"\"\"Promotes a dtype into an inexact dtype, if it is not already one.\"\"\"\n return dtype if issubdtype(dtype, inexact) else promote_types(dtype, float_)\n\ndef _complex_elem_type(dtype):\n \"\"\"Returns the float type of the real/imaginary parts of a complex dtype.\"\"\"\n return np.abs(np.zeros((), dtype)).dtype\n\ndef _result_dtype(op, *args):\n \"\"\"Compute result dtype of applying op to arguments with given dtypes.\"\"\"\n args = [np.ones((0,) * ndim(arg), _dtype(arg)) for arg in args]\n return _dtype(op(*args))\n\n\ndef _arraylike(x):\n return isinstance(x, ndarray) or isscalar(x) or hasattr(x, '__jax_array__')\n\ndef _check_arraylike(fun_name, *args):\n \"\"\"Check if all args fit JAX's definition of arraylike.\"\"\"\n assert isinstance(fun_name, str), f\"fun_name must be a string. Got {fun_name}\"\n if _any(not _arraylike(arg) for arg in args):\n pos, arg = next((i, arg) for i, arg in enumerate(args)\n if not _arraylike(arg))\n msg = \"{} requires ndarray or scalar arguments, got {} at position {}.\"\n raise TypeError(msg.format(fun_name, type(arg), pos))\n\ndef _check_no_float0s(fun_name, *args):\n \"\"\"Check if none of the args have dtype float0.\"\"\"\n if _any(dtypes.dtype(arg) is dtypes.float0 for arg in args):\n raise TypeError(\n f\"Called {fun_name} with a float0 array. \"\n \"float0s do not support any operations by design because they \"\n \"are not compatible with non-trivial vector spaces. No implicit dtype \"\n \"conversion is done. You can use np.zeros_like(arr, dtype=np.float) \"\n \"to cast a float0 array to a regular zeros array. \\n\"\n \"If you didn't expect to get a float0 you might have accidentally \"\n \"taken a gradient with respect to an integer argument.\")\n\ndef _promote_args(fun_name, *args):\n \"\"\"Convenience function to apply Numpy argument shape and dtype promotion.\"\"\"\n _check_arraylike(fun_name, *args)\n _check_no_float0s(fun_name, *args)\n return _promote_shapes(fun_name, *_promote_dtypes(*args))\n\ndef _promote_args_inexact(fun_name, *args):\n \"\"\"Convenience function to apply Numpy argument shape and dtype promotion.\n\n Promotes non-inexact types to an inexact type.\"\"\"\n _check_arraylike(fun_name, *args)\n _check_no_float0s(fun_name, *args)\n return _promote_shapes(fun_name, *_promote_dtypes_inexact(*args))\n\ndef _convert_and_clip_integer(val, dtype):\n \"\"\"\n Convert integer-typed val to specified integer dtype, clipping to dtype\n range rather than wrapping.\n\n Args:\n val: value to be converted\n dtype: dtype of output\n\n Returns:\n equivalent of val in new dtype\n\n Examples\n --------\n Normal integer type conversion will wrap:\n\n >>> val = jnp.uint32(0xFFFFFFFF)\n >>> val.astype('int32')\n DeviceArray(-1, dtype=int32)\n\n This function clips to the values representable in the new type:\n\n >>> _convert_and_clip_integer(val, 'int32')\n DeviceArray(2147483647, dtype=int32)\n \"\"\"\n val = val if isinstance(val, ndarray) else asarray(val)\n dtype = dtypes.canonicalize_dtype(dtype)\n if not (issubdtype(dtype, integer) and issubdtype(val.dtype, integer)):\n raise TypeError(\"_convert_and_clip_integer only accepts integer dtypes.\")\n\n val_dtype = dtypes.canonicalize_dtype(val.dtype)\n if val_dtype != val.dtype:\n # TODO(jakevdp): this is a weird corner case; need to figure out how to handle it.\n # This happens in X32 mode and can either come from a jax value created in another\n # context, or a Python integer converted to int64.\n pass\n min_val = _constant_like(val, _max(iinfo(dtype).min, iinfo(val_dtype).min))\n max_val = _constant_like(val, _min(iinfo(dtype).max, iinfo(val_dtype).max))\n return clip(val, min_val, max_val).astype(dtype)\n\n\ndef _constant_like(x, const):\n return np.array(const, dtype=_dtype(x))\n\n### implementations of numpy functions in terms of lax\n\n@_wraps(np.fmin)\ndef fmin(x1, x2):\n return where((x1 < x2) | isnan(x2), x1, x2)\n\n@_wraps(np.fmax)\ndef fmax(x1, x2):\n return where((x1 > x2) | isnan(x2), x1, x2)\n\n@_wraps(np.issubdtype)\ndef issubdtype(arg1, arg2):\n return dtypes.issubdtype(arg1, arg2)\n\n@_wraps(np.isscalar)\ndef isscalar(element):\n if hasattr(element, '__jax_array__'):\n element = element.__jax_array__()\n return dtypes.is_python_scalar(element) or np.isscalar(element)\n\niterable = np.iterable\n\n@_wraps(np.result_type)\ndef result_type(*args):\n return dtypes.result_type(*args)\n\ndef _one_to_one_unop(numpy_fn, lax_fn, promote_to_inexact=False, lax_doc=False):\n if promote_to_inexact:\n fn = lambda x: lax_fn(*_promote_args_inexact(numpy_fn.__name__, x))\n else:\n fn = lambda x: lax_fn(*_promote_args(numpy_fn.__name__, x))\n if lax_doc:\n doc = _dedent('\\n\\n'.join(lax_fn.__doc__.split('\\n\\n')[1:])).strip()\n return _wraps(numpy_fn, lax_description=doc)(fn)\n else:\n return _wraps(numpy_fn)(fn)\n\ndef _one_to_one_binop(numpy_fn, lax_fn, promote_to_inexact=False, lax_doc=False):\n if promote_to_inexact:\n fn = lambda x1, x2: lax_fn(*_promote_args_inexact(numpy_fn.__name__, x1, x2))\n else:\n fn = lambda x1, x2: lax_fn(*_promote_args(numpy_fn.__name__, x1, x2))\n if lax_doc:\n doc = _dedent('\\n\\n'.join(lax_fn.__doc__.split('\\n\\n')[1:])).strip()\n return _wraps(numpy_fn, lax_description=doc)(fn)\n else:\n return _wraps(numpy_fn)(fn)\n\ndef _maybe_bool_binop(numpy_fn, lax_fn, bool_lax_fn, lax_doc=False):\n def fn(x1, x2):\n x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)\n return lax_fn(x1, x2) if x1.dtype != bool_ else bool_lax_fn(x1, x2)\n return _wraps(numpy_fn)(fn)\n if lax_doc:\n doc = _dedent('\\n\\n'.join(lax_fn.__doc__.split('\\n\\n')[1:])).strip()\n return _wraps(numpy_fn, lax_description=doc)(fn)\n else:\n return _wraps(numpy_fn)(fn)\n\nfabs = _one_to_one_unop(np.fabs, lax.abs, True)\nbitwise_not = _one_to_one_unop(np.bitwise_not, lax.bitwise_not)\ninvert = _one_to_one_unop(np.invert, lax.bitwise_not)\nnegative = _one_to_one_unop(np.negative, lax.neg)\npositive = _one_to_one_unop(np.positive, lambda x: x)\n\nfloor = _one_to_one_unop(np.floor, lax.floor, True)\nceil = _one_to_one_unop(np.ceil, lax.ceil, True)\nexp = _one_to_one_unop(np.exp, lax.exp, True)\nlog = _one_to_one_unop(np.log, lax.log, True)\nexpm1 = _one_to_one_unop(np.expm1, lax.expm1, True)\nlog1p = _one_to_one_unop(np.log1p, lax.log1p, True)\nsin = _one_to_one_unop(np.sin, lax.sin, True)\ncos = _one_to_one_unop(np.cos, lax.cos, True)\ntan = _one_to_one_unop(np.tan, lax.tan, True)\narcsin = _one_to_one_unop(np.arcsin, lax.asin, True)\narccos = _one_to_one_unop(np.arccos, lax.acos, True)\narctan = _one_to_one_unop(np.arctan, lax.atan, True)\nsinh = _one_to_one_unop(np.sinh, lax.sinh, True)\ncosh = _one_to_one_unop(np.cosh, lax.cosh, True)\narcsinh = _one_to_one_unop(np.arcsinh, lax.asinh, True)\ntanh = _one_to_one_unop(np.tanh, lax.tanh, True)\narcsinh = _one_to_one_unop(np.arcsinh, lax.asinh, True)\narctanh = _one_to_one_unop(np.arctanh, lax.atanh, True)\nsqrt = _one_to_one_unop(np.sqrt, lax.sqrt, True)\n\n\nadd = _maybe_bool_binop(np.add, lax.add, lax.bitwise_or)\nbitwise_and = _one_to_one_binop(np.bitwise_and, lax.bitwise_and)\nbitwise_or = _one_to_one_binop(np.bitwise_or, lax.bitwise_or)\nbitwise_xor = _one_to_one_binop(np.bitwise_xor, lax.bitwise_xor)\nleft_shift = _one_to_one_binop(np.left_shift, lax.shift_left)\nequal = _one_to_one_binop(np.equal, lax.eq)\nmultiply = _maybe_bool_binop(np.multiply, lax.mul, lax.bitwise_and)\nnot_equal = _one_to_one_binop(np.not_equal, lax.ne)\nsubtract = _one_to_one_binop(np.subtract, lax.sub)\narctan2 = _one_to_one_binop(np.arctan2, lax.atan2, True)\nminimum = _one_to_one_binop(np.minimum, lax.min)\nmaximum = _one_to_one_binop(np.maximum, lax.max)\nfloat_power = _one_to_one_binop(np.float_power, lax.pow, True)\nnextafter = _one_to_one_binop(np.nextafter, lax.nextafter, True, True)\n\n@_wraps(np.arccosh)\ndef arccosh(x):\n # Note: arccosh is multi-valued for complex input, and lax.acosh uses a different\n # convention than np.arccosh.\n out = lax.acosh(*_promote_args_inexact(\"arccosh\", x))\n if issubdtype(out.dtype, np.complexfloating):\n out = where(real(out) < 0, lax.neg(out), out)\n return out\n\ndef _comparison_op(numpy_fn, lax_fn):\n def fn(x1, x2):\n x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)\n # Comparison on complex types are defined as a lexicographic ordering on\n # the (real, imag) pair.\n if issubdtype(_dtype(x1), complexfloating):\n rx = lax.real(x1)\n ry = lax.real(x2)\n return lax.select(lax.eq(rx, ry), lax_fn(lax.imag(x1), lax.imag(x2)),\n lax_fn(rx, ry))\n return lax_fn(x1, x2)\n return _wraps(numpy_fn)(fn)\n\ngreater_equal = _comparison_op(np.greater_equal, lax.ge)\ngreater = _comparison_op(np.greater, lax.gt)\nless_equal = _comparison_op(np.less_equal, lax.le)\nless = _comparison_op(np.less, lax.lt)\n\n\ndef _logical_op(np_op, bitwise_op):\n @_wraps(np_op, update_doc=False)\n def op(*args):\n zero = lambda x: lax.full_like(x, shape=(), fill_value=0)\n args = (x if issubdtype(_dtype(x), bool_) else lax.ne(x, zero(x))\n for x in args)\n return bitwise_op(*_promote_args(np_op.__name__, *args))\n return op\n\nlogical_and = _logical_op(np.logical_and, lax.bitwise_and)\nlogical_not = _logical_op(np.logical_not, lax.bitwise_not)\nlogical_or = _logical_op(np.logical_or, lax.bitwise_or)\nlogical_xor = _logical_op(np.logical_xor, lax.bitwise_xor)\n\n\n@_wraps(np.right_shift)\ndef right_shift(x1, x2):\n x1, x2 = _promote_args(np.right_shift.__name__, x1, x2)\n lax_fn = lax.shift_right_logical if \\\n np.issubdtype(x1.dtype, np.unsignedinteger) else lax.shift_right_arithmetic\n return lax_fn(x1, x2)\n\n\n@_wraps(np.absolute)\ndef absolute(x):\n _check_arraylike('absolute', x)\n dt = _dtype(x)\n return x if dt == bool_ or issubdtype(dt, unsignedinteger) else lax.abs(x)\nabs = _wraps(np.abs)(absolute)\n\n\n@_wraps(np.rint)\ndef rint(x):\n _check_arraylike('rint', x)\n dtype = _dtype(x)\n if issubdtype(dtype, integer):\n return lax.convert_element_type(x, float_)\n if issubdtype(dtype, complexfloating):\n return lax.complex(rint(lax.real(x)), rint(lax.imag(x)))\n return lax.round(x, lax.RoundingMethod.TO_NEAREST_EVEN)\n\n\n@_wraps(np.sign)\ndef sign(x):\n _check_arraylike('sign', x)\n dtype = _dtype(x)\n if issubdtype(dtype, complexfloating):\n re = lax.real(x)\n return lax.complex(\n lax.sign(where(re != 0, re, lax.imag(x))), _constant_like(re, 0))\n return lax.sign(x)\n\n\n@_wraps(np.copysign)\ndef copysign(x1, x2):\n x1, x2 = _promote_args_inexact(\"copysign\", x1, x2)\n if issubdtype(_dtype(x1), complexfloating):\n raise TypeError(\"copysign does not support complex-valued inputs\")\n return where(signbit(x2), -lax.abs(x1), lax.abs(x1))\n\n\n@_wraps(np.true_divide)\ndef true_divide(x1, x2):\n x1, x2 = _promote_args_inexact(\"true_divide\", x1, x2)\n return lax.div(x1, x2)\n\ndivide = true_divide\n\n@_wraps(np.floor_divide)\ndef floor_divide(x1, x2):\n x1, x2 = _promote_args(\"floor_divide\", x1, x2)\n dtype = _dtype(x1)\n if issubdtype(dtype, integer):\n quotient = lax.div(x1, x2)\n select = logical_and(lax.sign(x1) != lax.sign(x2), lax.rem(x1, x2) != 0)\n # TODO(mattjj): investigate why subtracting a scalar was causing promotion\n return where(select, quotient - np.array(1, _dtype(quotient)), quotient)\n elif issubdtype(dtype, complexfloating):\n x1r = lax.real(x1)\n x1i = lax.imag(x1)\n x2r = lax.real(x2)\n x2i = lax.imag(x2)\n which = lax.ge(lax.abs(x2r), lax.abs(x2i))\n rat1 = where(which, lax._const(x2i, 1), lax.div(x2r, x2i))\n rat2 = where(which, lax.div(x2i, x2r), lax._const(x2i, 1))\n out = lax.floor(lax.div(lax.add(lax.mul(x1r, rat1), lax.mul(x1i, rat2)),\n lax.add(lax.mul(x2r, rat1), lax.mul(x2i, rat2))))\n return lax.convert_element_type(out, dtype)\n else:\n return _float_divmod(x1, x2)[0]\n\n\n@_wraps(np.divmod)\ndef divmod(x1, x2):\n x1, x2 = _promote_args(\"divmod\", x1, x2)\n if issubdtype(_dtype(x1), integer):\n return floor_divide(x1, x2), remainder(x1, x2)\n else:\n return _float_divmod(x1, x2)\n\n\ndef _float_divmod(x1, x2):\n # see float_divmod in floatobject.c of CPython\n mod = lax.rem(x1, x2)\n div = lax.div(lax.sub(x1, mod), x2)\n\n ind = lax.bitwise_and(mod != 0, lax.sign(x2) != lax.sign(mod))\n mod = lax.select(ind, mod + x2, mod)\n div = lax.select(ind, div - _constant_like(div, 1), div)\n\n return lax.round(div), mod\n\n\n@_wraps(np.power)\ndef power(x1, x2):\n # Special case for concrete integer scalars: use binary exponentiation.\n # Using lax.pow may be imprecise for floating-point values; the goal of this\n # code path is to make sure we end up with a precise output for the common\n # pattern ``x ** 2`` or similar.\n if isinstance(core.get_aval(x2), ConcreteArray):\n try:\n x2 = operator.index(x2)\n except TypeError:\n pass\n else:\n return lax.integer_pow(x1, x2)\n\n x1, x2 = _promote_args(\"power\", x1, x2)\n dtype = _dtype(x1)\n if not issubdtype(dtype, integer):\n return lax.pow(x1, x2)\n\n # Integer power => use binary exponentiation.\n\n # TODO(phawkins): add integer pow support to XLA.\n bits = 6 # Anything more would overflow for any x1 > 1\n zero = _constant_like(x2, 0)\n one = _constant_like(x2, 1)\n # Initialize acc carefully such that pow(0, x2) is zero for x2 != 0\n acc = where(lax.bitwise_and(lax.eq(x1, zero), lax.ne(x2, zero)), zero, one)\n for _ in range(bits):\n acc = where(lax.bitwise_and(x2, one), lax.mul(acc, x1), acc)\n x1 = lax.mul(x1, x1)\n x2 = lax.shift_right_logical(x2, one)\n return acc\n\n\n@custom_jvp\n@_wraps(np.logaddexp)\ndef logaddexp(x1, x2):\n x1, x2 = _promote_shapes(\"logaddexp\", *_promote_dtypes_inexact(x1, x2))\n amax = lax.max(x1, x2)\n delta = lax.sub(x1, x2)\n return lax.select(isnan(delta),\n lax.add(x1, x2), # NaNs or infinities of the same sign.\n lax.add(amax, lax.log1p(lax.exp(-lax.abs(delta)))))\n\n@logaddexp.defjvp\ndef _logaddexp_jvp(primals, tangents):\n x1, x2 = primals\n t1, t2 = tangents\n x1, x2, t1, t2 = broadcast_arrays(x1, x2, t1, t2)\n primal_out = logaddexp(x1, x2)\n tangent_out = (t1 * exp(_replace_inf(x1) - _replace_inf(primal_out)) +\n t2 * exp(_replace_inf(x2) - _replace_inf(primal_out)))\n return primal_out, tangent_out\n\ndef _replace_inf(x):\n return lax.select(isposinf(x), zeros_like(x), x)\n\n\n@custom_jvp\n@_wraps(np.logaddexp2)\ndef logaddexp2(x1, x2):\n x1, x2 = _promote_shapes(\"logaddexp2\", *_promote_dtypes_inexact(x1, x2))\n amax = lax.max(x1, x2)\n delta = lax.sub(x1, x2)\n return lax.select(isnan(delta),\n lax.add(x1, x2), # NaNs or infinities of the same sign.\n lax.add(amax, lax.div(lax.log1p(exp2(-lax.abs(delta))),\n _constant_like(x1, np.log(2)))))\n@logaddexp2.defjvp\ndef _logaddexp2_jvp(primals, tangents):\n x1, x2 = primals\n t1, t2 = tangents\n x1, x2, t1, t2 = broadcast_arrays(x1, x2, t1, t2)\n primal_out = logaddexp2(x1, x2)\n tangent_out = (t1 * 2 ** (_replace_inf(x1) - _replace_inf(primal_out)) +\n t2 * 2 ** (_replace_inf(x2) - _replace_inf(primal_out)))\n return primal_out, tangent_out\n\n\n@_wraps(np.log2)\ndef log2(x):\n x, = _promote_dtypes_inexact(x)\n return lax.div(lax.log(x), lax.log(_constant_like(x, 2)))\n\n\n@_wraps(np.log10)\ndef log10(x):\n x, = _promote_dtypes_inexact(x)\n return lax.div(lax.log(x), lax.log(_constant_like(x, 10)))\n\n\n@_wraps(np.exp2)\ndef exp2(x):\n x, = _promote_dtypes_inexact(x)\n return lax.exp(lax.mul(lax.log(_constant_like(x, 2)), x))\n\n@_wraps(np.signbit)\ndef signbit(x):\n x, = _promote_shapes(\"signbit\", x)\n dtype = _dtype(x)\n if issubdtype(dtype, integer):\n return lax.lt(x, _constant_like(x, 0))\n elif issubdtype(dtype, bool_):\n return full_like(x, False, dtype=bool_)\n elif not issubdtype(dtype, floating):\n raise ValueError(\n \"jax.numpy.signbit is not well defined for %s\" % dtype)\n\n # TPU supports BF16 but not S16 types, so as a workaround, convert BF16 to\n # F32.\n if dtype == bfloat16:\n dtype = float32\n x = lax.convert_element_type(x, float32)\n\n info = finfo(dtype)\n if info.bits not in _INT_DTYPES:\n raise NotImplementedError(\n \"jax.numpy.signbit only supports 16, 32, and 64-bit types.\")\n int_type = _INT_DTYPES[info.bits]\n x = lax.bitcast_convert_type(x, int_type)\n return lax.convert_element_type(x >> (info.nexp + info.nmant), np.bool_)\n\n\n@_wraps(np.trapz)\ndef trapz(y, x=None, dx=1.0, axis: int = -1):\n _check_arraylike('trapz', y)\n y = moveaxis(y, axis, -1)\n if x is not None:\n if ndim(x) == 1:\n dx = diff(x)\n else:\n dx = moveaxis(diff(x, axis=axis), axis, -1)\n return 0.5 * (dx * (y[..., 1:] + y[..., :-1])).sum(-1)\n\n\n@_wraps(np.trunc)\ndef trunc(x):\n _check_arraylike('trunc', x)\n return where(lax.lt(x, lax._const(x, 0)), ceil(x), floor(x))\n\n\n@partial(jit, static_argnums=(2, 3, 4))\ndef _conv(x, y, mode, op, precision):\n if ndim(x) != 1 or ndim(y) != 1:\n raise ValueError(f\"{op}() only support 1-dimensional inputs.\")\n x, y = _promote_dtypes_inexact(x, y)\n if len(x) == 0 or len(y) == 0:\n raise ValueError(f\"{op}: inputs cannot be empty, got shapes {x.shape} and {y.shape}.\")\n\n out_order = slice(None)\n if op == 'correlate':\n y = conj(y)\n if len(x) < len(y):\n x, y = y, x\n out_order = slice(None, None, -1)\n elif op == 'convolve':\n if len(x) < len(y):\n x, y = y, x\n y = y[::-1]\n\n if mode == 'valid':\n padding = [(0, 0)]\n elif mode == 'same':\n padding = [(y.shape[0] // 2, y.shape[0] - y.shape[0] // 2 - 1)]\n elif mode == 'full':\n padding = [(y.shape[0] - 1, y.shape[0] - 1)]\n else:\n raise ValueError(\"mode must be one of ['full', 'same', 'valid']\")\n\n result = lax.conv_general_dilated(x[None, None, :], y[None, None, :], (1,),\n padding, precision=precision)\n return result[0, 0, out_order]\n\n\n@_wraps(np.convolve, lax_description=_PRECISION_DOC)\ndef convolve(a, v, mode='full', *, precision=None):\n _check_arraylike(\"convolve\", a, v)\n return _conv(a, v, mode, 'convolve', precision)\n\n\n@_wraps(np.correlate, lax_description=_PRECISION_DOC)\ndef correlate(a, v, mode='valid', *, precision=None):\n _check_arraylike(\"correlate\", a, v)\n return _conv(a, v, mode, 'correlate', precision)\n\n\ndef _normalize_float(x):\n info = finfo(_dtype(x))\n cond = lax.abs(x) < info.tiny\n x1 = where(cond, x * lax._const(x, 1 << info.nmant), x)\n x2 = where(cond, lax._const(np.int32, -info.nmant), lax._const(np.int32, 0))\n int_type = _INT_DTYPES[info.bits]\n return lax.bitcast_convert_type(x1, int_type), x2\n\n\n@_wraps(np.ldexp)\n@jit\ndef ldexp(x1, x2):\n dtype = dtypes.canonicalize_dtype(_result_dtype(np.ldexp, x1, x2))\n x1, x2 = _promote_shapes(\"ldexp\", x1, x2)\n x1 = lax.convert_element_type(x1, dtype)\n\n info = finfo(dtype)\n mask = (1 << info.nexp) - 1\n bias = ((1 << info.nexp) - 1) >> 1\n\n int_type = _INT_DTYPES[info.bits]\n\n x, e = _normalize_float(x1)\n x2 += e + ((x >> info.nmant) & mask) - bias\n\n # find underflow/overflow before denormalization\n underflow_cond = x2 < -(bias + info.nmant)\n overflow_cond = x2 > bias\n\n m = ones_like(x, dtype=dtype)\n\n # denormals\n cond = x2 < -bias + 1\n x2 = where(cond, x2 + info.nmant, x2)\n m = where(cond, m / (1 << info.nmant), m)\n\n x2 = lax.convert_element_type(x2, np.int32)\n x &= ~(mask << info.nmant)\n x |= ((lax.convert_element_type(x2, int_type) + bias) << info.nmant)\n\n x = lax.convert_element_type(m, dtype) * lax.bitcast_convert_type(x, dtype)\n\n # underflow\n x = where(underflow_cond, zeros_like(x, dtype=dtype), x)\n # overflow\n x = where(overflow_cond, lax.sign(x1) * full_like(x, np.inf), x)\n # ldexp(x1, x2) = x1 for x1 = inf, -inf, nan, 0\n return where(isinf(x1) | isnan(x1) | (x1 == 0), x1, x)\n\n\n@_wraps(np.frexp)\n@jit\ndef frexp(x):\n x = asarray(x)\n if issubdtype(x.dtype, complexfloating):\n raise TypeError(\"frexp does not support complex-valued inputs\")\n elif not issubdtype(x.dtype, floating):\n x = lax.convert_element_type(x, float_)\n\n dtype = _dtype(x)\n info = finfo(dtype)\n mask = (1 << info.nexp) - 1\n bias = ((1 << info.nexp) - 1) >> 1\n\n x1, x2 = _normalize_float(x)\n x2 += ((x1 >> info.nmant) & mask) - bias + 1\n x1 &= ~(mask << info.nmant)\n x1 |= (bias - 1) << info.nmant\n x1 = lax.bitcast_convert_type(x1, dtype)\n\n cond = isinf(x) | isnan(x) | (x == 0)\n x2 = where(cond, zeros_like(x2), x2)\n return where(cond, x, x1), lax.convert_element_type(x2, int32)\n\n\n@_wraps(np.remainder)\ndef remainder(x1, x2):\n x1, x2 = _promote_args(\"remainder\", x1, x2)\n zero = _constant_like(x1, 0)\n trunc_mod = lax.rem(x1, x2)\n trunc_mod_not_zero = lax.ne(trunc_mod, zero)\n do_plus = lax.bitwise_and(\n lax.ne(lax.lt(trunc_mod, zero), lax.lt(x2, zero)), trunc_mod_not_zero)\n return lax.select(do_plus, lax.add(trunc_mod, x2), trunc_mod)\nmod = _wraps(np.mod)(remainder)\n\n\n@_wraps(np.fmod)\ndef fmod(x1, x2):\n _check_arraylike(\"fmod\", x1, x2)\n if issubdtype(_dtype(x1, x2), integer):\n x2 = where(x2 == 0, 1, x2)\n return lax.rem(*_promote_args(\"fmod\", x1, x2))\n\n\n@_wraps(np.cbrt)\ndef cbrt(x):\n _check_arraylike(\"cbrt\", x)\n x, = _promote_dtypes_inexact(x)\n return lax.sign(x) * power(lax.abs(x), _constant_like(x, 1. / 3.))\n\n\n@_wraps(np.square)\ndef square(x):\n _check_arraylike(\"square\", x)\n return lax.integer_pow(x, 2)\n\n\n@_wraps(np.deg2rad)\ndef deg2rad(x):\n _check_arraylike(\"deg2rad\", x)\n x, = _promote_dtypes_inexact(x)\n return lax.mul(x, lax._const(x, pi / 180))\n\n\n@_wraps(np.rad2deg)\ndef rad2deg(x):\n _check_arraylike(\"rad2deg\", x)\n x, = _promote_dtypes_inexact(x)\n return lax.mul(x, lax._const(x, 180 / pi))\n\n\ndegrees = rad2deg\nradians = deg2rad\n\n\n@_wraps(np.histogram_bin_edges)\ndef histogram_bin_edges(a, bins=10, range=None, weights=None):\n if isinstance(bins, str):\n raise NotImplementedError(\"string values for `bins` not implemented.\")\n a = ravel(a)\n b = asarray(bins)\n if b.ndim == 1:\n return b\n if range is None:\n range = (a.min(), a.max())\n assert len(range) == 2\n range = asarray(range)\n range = (where(ptp(range) == 0, range[0] - 0.5, range[0]),\n where(ptp(range) == 0, range[1] + 0.5, range[1]))\n dtype = _dtype(a)\n if issubdtype(dtype, integer):\n dtype = promote_types(dtype, float32)\n return linspace(range[0], range[1], bins + 1, dtype=dtype)\n\n\n@_wraps(np.histogram)\ndef histogram(a, bins=10, range=None, weights=None, density=None):\n if weights is not None and a.shape != weights.shape:\n raise ValueError(\"weights should have the same shape as a.\")\n a = ravel(a)\n if weights is not None:\n weights = ravel(weights)\n else:\n weights = ones_like(a)\n bin_edges = histogram_bin_edges(a, bins, range, weights)\n bin_idx = searchsorted(bin_edges, a, side='right')\n bin_idx = where(a == bin_edges[-1], len(bin_edges) - 1, bin_idx)\n counts = bincount(bin_idx, weights, length=len(bin_edges))[1:]\n if density:\n bin_widths = diff(bin_edges)\n counts = counts / bin_widths / counts.sum()\n return counts, bin_edges\n\n@_wraps(np.histogram2d)\ndef histogram2d(x, y, bins=10, range=None, weights=None, density=None):\n\n try:\n N = len(bins)\n except TypeError:\n N = 1\n\n if N != 1 and N != 2:\n x_edges = y_edges = asarray(bins)\n bins = [x_edges, y_edges]\n\n sample = transpose(asarray([x, y]))\n hist, edges = histogramdd(sample, bins, range, weights, density)\n return hist, edges[0], edges[1]\n\n@_wraps(np.histogramdd)\ndef histogramdd(sample, bins=10, range=None, weights=None, density=None):\n _check_arraylike(\"histogramdd\", sample)\n N, D = shape(sample)\n\n if weights is not None and weights.shape != (N,):\n raise ValueError(\"should have one weight for each sample.\")\n\n try:\n num_bins = len(bins)\n if num_bins != D:\n raise ValueError(\"should be a bin for each dimension.\")\n except TypeError:\n # when bin_size is integer, the same bin is used for each dimension\n bins = D * [bins]\n\n bin_idx_by_dim = D*[None]\n nbins = np.empty(D, int)\n bin_edges_by_dim = D*[None]\n dedges = D*[None]\n\n for i in builtins.range(D):\n bin_edges = histogram_bin_edges(sample[:, i], bins[i], range, weights)\n bin_idx = searchsorted(bin_edges, sample[:, i], side='right')\n bin_idx = where(sample[:, i] == bin_edges[-1], bin_idx - 1, bin_idx)\n bin_idx_by_dim[i] = bin_idx\n nbins[i] = len(bin_edges) + 1\n bin_edges_by_dim[i] = bin_edges\n dedges[i] = diff(bin_edges_by_dim[i])\n\n xy = ravel_multi_index(bin_idx_by_dim, nbins, mode='clip')\n hist = bincount(xy, weights, length=nbins.prod())\n hist = reshape(hist, nbins)\n core = D*(slice(1, -1),)\n hist = hist[core]\n\n if density:\n s = sum(hist)\n for i in builtins.range(D):\n _shape = np.ones(D, int)\n _shape[i] = nbins[i] - 2\n hist = hist / reshape(dedges[i], _shape)\n\n hist /= s\n\n return hist, bin_edges_by_dim\n\n@_wraps(np.heaviside)\ndef heaviside(x1, x2):\n _check_arraylike(\"heaviside\", x1, x2)\n x1, x2 = _promote_dtypes_inexact(x1, x2)\n zero = lax._const(x1, 0)\n return where(lax.lt(x1, zero), zero,\n where(lax.gt(x1, zero), lax._const(x1, 1), x2))\n\n\n@_wraps(np.hypot)\ndef hypot(x1, x2):\n _check_arraylike(\"hypot\", x1, x2)\n x1, x2 = _promote_dtypes_inexact(x1, x2)\n x1 = lax.abs(x1)\n x2 = lax.abs(x2)\n x1, x2 = maximum(x1, x2), minimum(x1, x2)\n return lax.select(x1 == 0, x1, x1 * lax.sqrt(1 + lax.square(lax.div(x2, lax.select(x1 == 0, ones_like(x1), x1)))))\n\n\n@_wraps(np.reciprocal)\ndef reciprocal(x):\n _check_arraylike(\"reciprocal\", x)\n x, = _promote_dtypes_inexact(x)\n return lax.integer_pow(x, -1)\n\n\n@_wraps(np.sinc, update_doc=False)\ndef sinc(x):\n _check_arraylike(\"sinc\", x)\n x, = _promote_dtypes_inexact(x)\n eq_zero = lax.eq(x, lax._const(x, 0))\n pi_x = lax.mul(lax._const(x, pi), x)\n safe_pi_x = where(eq_zero, lax._const(x, 1), pi_x)\n return where(eq_zero, _sinc_maclaurin(0, pi_x),\n lax.div(lax.sin(safe_pi_x), safe_pi_x))\n\n@partial(custom_jvp, nondiff_argnums=(0,))\ndef _sinc_maclaurin(k, x):\n # compute the kth derivative of x -> sin(x)/x evaluated at zero (since we\n # compute the monomial term in the jvp rule)\n if k % 2:\n return lax.full_like(x, 0)\n else:\n return lax.full_like(x, (-1) ** (k // 2) / (k + 1))\n\n@_sinc_maclaurin.defjvp\ndef _sinc_maclaurin_jvp(k, primals, tangents):\n (x,), (t,) = primals, tangents\n return _sinc_maclaurin(k, x), _sinc_maclaurin(k + 1, x) * t\n\n_ARRAY_VIEW_DOC = \"\"\"\nThe JAX version of this function will return a copy rather than a view of the input.\n\"\"\"\n\n@_wraps(np.transpose, lax_description=_ARRAY_VIEW_DOC)\ndef transpose(a, axes=None):\n _check_arraylike(\"transpose\", a)\n axes = np.arange(ndim(a))[::-1] if axes is None else axes\n return lax.transpose(a, axes)\n\n\n@_wraps(np.rot90, lax_description=_ARRAY_VIEW_DOC)\ndef rot90(m, k=1, axes=(0, 1)):\n _check_arraylike(\"rot90\", m)\n ax1, ax2 = axes\n ax1 = _canonicalize_axis(ax1, ndim(m))\n ax2 = _canonicalize_axis(ax2, ndim(m))\n if ax1 == ax2:\n raise ValueError(\"Axes must be different\") # same as numpy error\n k = k % 4\n if k == 0:\n return m\n elif k == 2:\n return flip(flip(m, ax1), ax2)\n else:\n perm = list(range(m.ndim))\n perm[ax1], perm[ax2] = perm[ax2], perm[ax1]\n if k == 1:\n return transpose(flip(m, ax2), perm)\n else:\n return flip(transpose(m, perm), ax2)\n\n\n@_wraps(np.flip, lax_description=_ARRAY_VIEW_DOC)\ndef flip(m, axis: Optional[Union[int, Tuple[int, ...]]] = None):\n _check_arraylike(\"flip\", m)\n if axis is None:\n return lax.rev(m, list(range(len(shape(m)))))\n axis = _ensure_index_tuple(axis)\n return lax.rev(m, [_canonicalize_axis(ax, ndim(m)) for ax in axis])\n\n\n@_wraps(np.fliplr, lax_description=_ARRAY_VIEW_DOC)\ndef fliplr(m):\n return flip(m, 1)\n\n\n@_wraps(np.flipud, lax_description=_ARRAY_VIEW_DOC)\ndef flipud(m):\n return flip(m, 0)\n\n\n@_wraps(np.conjugate)\ndef conjugate(x):\n _check_arraylike(\"conjugate\", x)\n return lax.conj(x) if iscomplexobj(x) else x\nconj = conjugate\n\n\n@_wraps(np.imag)\ndef imag(val):\n _check_arraylike(\"imag\", val)\n return lax.imag(val) if iscomplexobj(val) else zeros_like(val)\n\n\n@_wraps(np.real)\ndef real(val):\n _check_arraylike(\"real\", val)\n return lax.real(val) if iscomplexobj(val) else val\n\n\n@_wraps(np.iscomplex)\ndef iscomplex(x):\n i = imag(x)\n return lax.ne(i, lax._const(i, 0))\n\n@_wraps(np.isreal)\ndef isreal(x):\n i = imag(x)\n return lax.eq(i, lax._const(i, 0))\n\n@_wraps(np.angle)\ndef angle(z):\n re = real(z)\n im = imag(z)\n dtype = _dtype(re)\n if not issubdtype(dtype, inexact) or (\n issubdtype(_dtype(z), floating) and ndim(z) == 0):\n dtype = dtypes.canonicalize_dtype(float_)\n re = lax.convert_element_type(re, dtype)\n im = lax.convert_element_type(im, dtype)\n return lax.atan2(im, re)\n\n\n@_wraps(np.diff)\ndef diff(a, n=1, axis: int = -1, prepend=None, append=None):\n _check_arraylike(\"diff\", a)\n n = core.concrete_or_error(operator.index, n, \"'n' argument of jnp.diff\")\n axis = core.concrete_or_error(operator.index, axis, \"'axis' argument of jnp.diff\")\n if n == 0:\n return a\n if n < 0:\n raise ValueError(f\"order must be non-negative but got {n}\")\n if ndim(a) == 0:\n raise ValueError(f\"diff requires input that is at least one dimensional; got {a}\")\n\n nd = a.ndim\n axis = _canonicalize_axis(axis, nd)\n\n combined = []\n if prepend is not None:\n _check_arraylike(\"diff\", prepend)\n if isscalar(prepend):\n shape = list(a.shape)\n shape[axis] = 1\n prepend = broadcast_to(prepend, tuple(shape))\n combined.append(prepend)\n\n combined.append(a)\n\n if append is not None:\n _check_arraylike(\"diff\", append)\n if isscalar(append):\n shape = list(a.shape)\n shape[axis] = 1\n append = broadcast_to(append, tuple(shape))\n combined.append(append)\n\n if len(combined) > 1:\n a = concatenate(combined, axis)\n\n slice1 = [slice(None)] * nd\n slice2 = [slice(None)] * nd\n slice1[axis] = slice(1, None)\n slice2[axis] = slice(None, -1)\n slice1_tuple = tuple(slice1)\n slice2_tuple = tuple(slice2)\n\n op = not_equal if a.dtype == np.bool_ else subtract\n for _ in range(n):\n a = op(a[slice1_tuple], a[slice2_tuple])\n\n return a\n\n_EDIFF1D_DOC = \"\"\"\\\nUnlike NumPy's implementation of ediff1d, :py:func:`jax.numpy.ediff1d` will not\nissue an error if casting ``to_end`` or ``to_begin`` to the type of ``ary``\nloses precision.\n\"\"\"\n\n@_wraps(np.ediff1d, lax_description=_EDIFF1D_DOC)\ndef ediff1d(ary, to_end=None, to_begin=None):\n ary = ravel(asarray(ary))\n result = lax.sub(ary[1:], ary[:-1])\n if to_begin is not None:\n result = concatenate((ravel(asarray(to_begin, dtype=ary.dtype)), result))\n if to_end is not None:\n result = concatenate((result, ravel(asarray(to_end, dtype=ary.dtype))))\n return result\n\n\n@partial(jit, static_argnums=2)\ndef _gradient(a, varargs, axis):\n def gradient_along_axis(a, h, axis):\n sliced = partial(lax.slice_in_dim, a, axis=axis)\n a_grad = concatenate((\n (sliced(1, 2) - sliced(0, 1)), # upper edge\n (sliced(2, None) - sliced(None, -2)) * 0.5, # inner\n (sliced(-1, None) - sliced(-2, -1)), # lower edge\n ), axis)\n return a_grad / h\n\n if axis is None:\n axis = range(a.ndim)\n else:\n if isinstance(axis, int):\n axis = (axis,)\n if not isinstance(axis, tuple) and not isinstance(axis, list):\n raise ValueError(\"Give `axis` either as int or iterable\")\n elif len(axis) == 0:\n return []\n axis = [_canonicalize_axis(i, a.ndim) for i in axis]\n\n if _min([s for i, s in enumerate(a.shape) if i in axis]) < 2:\n raise ValueError(\"Shape of array too small to calculate \"\n \"a numerical gradient, \"\n \"at least 2 elements are required.\")\n len_axes = len(axis)\n n = len(varargs)\n if n == 0 or varargs is None:\n # no spacing\n dx = [1.0] * len_axes\n elif n == 1:\n # single value for all axes\n dx = varargs * len_axes\n elif n == len_axes:\n dx = varargs\n else:\n TypeError(\"Invalid number of spacing arguments %d\" % n)\n\n if ndim(dx[0]) != 0:\n raise NotImplementedError(\"Non-constant spacing not implemented\")\n\n # TODO: use jax.lax loop tools if possible\n a_grad = [gradient_along_axis(a, h, ax) for ax, h in zip(axis, dx)]\n\n if len(axis) == 1:\n a_grad = a_grad[0]\n\n return a_grad\n\n\n@_wraps(np.gradient, skip_params=['edge_order'])\ndef gradient(f, *varargs, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n edge_order=None):\n if edge_order is not None:\n raise NotImplementedError(\"The 'edge_order' argument to jnp.gradient is not supported.\")\n return _gradient(f, varargs, axis)\n\n\n@_wraps(np.isrealobj)\ndef isrealobj(x):\n return not iscomplexobj(x)\n\n\n@_wraps(np.reshape, lax_description=_ARRAY_VIEW_DOC)\ndef reshape(a, newshape, order=\"C\"):\n _check_arraylike(\"reshape\", a)\n try:\n return a.reshape(newshape, order=order) # forward to method for ndarrays\n except AttributeError:\n return _reshape(a, newshape, order=order)\n\ndef _compute_newshape(a, newshape):\n \"\"\"Fixes a -1 value in newshape, if present.\"\"\"\n # other errors, like having more than one -1, are caught downstream, in\n # reshape_shape_rule.\n try: iter(newshape)\n except: iterable = False\n else: iterable = True\n newshape = core.canonicalize_shape(newshape if iterable else [newshape])\n return tuple(- core.divide_shape_sizes(np.shape(a), newshape)\n if core.symbolic_equal_dim(d, -1) else d\n for d in newshape)\n\n\ndef _reshape(a, *args, order=\"C\"):\n newshape = _compute_newshape(a, args[0] if len(args) == 1 else args)\n if order == \"C\":\n return lax.reshape(a, newshape, None)\n elif order == \"F\":\n dims = np.arange(ndim(a))[::-1]\n return lax.reshape(a, newshape[::-1], dims).T\n elif order == \"A\":\n raise NotImplementedError(\"np.reshape order=A is not implemented.\")\n else:\n raise ValueError(\"Unexpected value for 'order' argument: {}.\".format(order))\n\ndef _ensure_index_tuple(x: Any) -> Tuple[int, ...]:\n \"\"\"Convert x to a tuple of indices.\"\"\"\n try:\n return (operator.index(x),)\n except TypeError:\n return tuple(map(operator.index, x))\n\ndef _transpose(a, *args):\n if not args:\n axis = None\n elif len(args) == 1:\n axis = args[0] if args[0] is None else _ensure_index_tuple(args[0])\n else:\n axis = _ensure_index_tuple(args)\n return transpose(a, axis)\n\n@_wraps(np.ravel, lax_description=_ARRAY_VIEW_DOC)\ndef ravel(a, order=\"C\"):\n _check_arraylike(\"ravel\", a)\n if order == \"K\":\n raise NotImplementedError(\"Ravel not implemented for order='K'.\")\n return reshape(a, (size(a),), order)\n\n\n@_wraps(np.ravel_multi_index)\ndef ravel_multi_index(multi_index, dims, mode='raise', order='C'):\n assert len(multi_index) == len(dims), f\"len(multi_index)={len(multi_index)} != len(dims)={len(dims)}\"\n dims = tuple(core.concrete_or_error(int, d, \"in `dims` argument of ravel_multi_index().\") for d in dims)\n _check_arraylike(\"ravel_multi_index\", *multi_index)\n for index in multi_index:\n if mode == 'raise':\n core.concrete_or_error(array, index,\n \"The error occurred because ravel_multi_index was jit-compiled\"\n \" with mode='raise'. Use mode='wrap' or mode='clip' instead.\")\n if not issubdtype(_dtype(index), integer):\n raise TypeError(\"only int indices permitted\")\n if mode == \"raise\":\n if _any(any((i < 0) | (i >= d)) for i, d in zip(multi_index, dims)):\n raise ValueError(\"invalid entry in coordinates array\")\n elif mode == \"clip\":\n multi_index = [clip(i, 0, d - 1) for i, d in zip(multi_index, dims)]\n elif mode == \"wrap\":\n multi_index = [i % d for i, d in zip(multi_index, dims)]\n else:\n raise ValueError(f\"invalid mode={mode!r}. Expected 'raise', 'wrap', or 'clip'\")\n\n if order == \"F\":\n strides = np.cumprod((1,) + dims[:-1])\n elif order == \"C\":\n strides = np.cumprod((1,) + dims[1:][::-1])[::-1]\n else:\n raise ValueError(f\"invalid order={order!r}. Expected 'C' or 'F'\")\n\n result = 0\n for i, s in zip(multi_index, strides):\n result = result + i * s\n return result\n\n\n_UNRAVEL_INDEX_DOC = \"\"\"\\\nUnlike numpy's implementation of unravel_index, negative indices are accepted\nand out-of-bounds indices are clipped.\n\"\"\"\n\n@_wraps(np.unravel_index, lax_description=_UNRAVEL_INDEX_DOC)\ndef unravel_index(indices, shape):\n indices = asarray(indices)\n sizes = array(tuple(shape) + (1,))\n cumulative_sizes = cumprod(sizes[::-1])[::-1]\n total_size = cumulative_sizes[0]\n # Clip so raveling and unraveling an oob index will not change the behavior\n clipped_indices = clip(indices, -total_size, total_size - 1)\n # Add enough trailing dims to avoid conflict with clipped_indices\n cumulative_sizes = cumulative_sizes.reshape([-1] + [1] * indices.ndim)\n clipped_indices = expand_dims(clipped_indices, axis=0)\n idx = clipped_indices % cumulative_sizes[:-1] // cumulative_sizes[1:]\n # TODO(jakevdp): return tuple(idx) once it behaves properly (#3821)\n return tuple(lax.index_in_dim(idx, i, keepdims=False) for i in range(idx.shape[0]))\n\n@_wraps(np.resize)\ndef resize(a, new_shape):\n new_shape = _ensure_index_tuple(new_shape)\n\n if _any(dim_length < 0 for dim_length in new_shape):\n raise ValueError(\"all elements of `new_shape` must be non-negative\")\n\n a = ravel(a)\n\n new_size = _prod(new_shape)\n if a.size == 0 or new_size == 0:\n return zeros_like(a, shape=new_shape)\n\n repeats = ceil_of_ratio(new_size, a.size)\n a = tile(a, repeats)[:new_size]\n\n return reshape(a, new_shape)\n\n@_wraps(np.squeeze, lax_description=_ARRAY_VIEW_DOC)\ndef squeeze(a, axis: Optional[Union[int, Tuple[int, ...]]] = None):\n _check_arraylike(\"squeeze\", a)\n if axis is None:\n a_shape = shape(a)\n axis = tuple(i for i, d in enumerate(a_shape) if d == 1)\n elif not isinstance(axis, tuple):\n axis = (axis,)\n return lax.squeeze(a, axis)\n\n\n@_wraps(np.expand_dims)\ndef expand_dims(a, axis: Union[int, Tuple[int, ...]]):\n _check_arraylike(\"expand_dims\", a)\n if not isinstance(axis, tuple):\n axis = (axis,)\n return lax.expand_dims(a, axis)\n\n\n@_wraps(np.swapaxes, lax_description=_ARRAY_VIEW_DOC)\ndef swapaxes(a, axis1: int, axis2: int):\n _check_arraylike(\"swapaxes\", a)\n perm = np.arange(ndim(a))\n perm[axis1], perm[axis2] = perm[axis2], perm[axis1]\n return lax.transpose(a, perm)\n\n\n@_wraps(np.moveaxis, lax_description=_ARRAY_VIEW_DOC)\ndef moveaxis(a, source: Union[int, Sequence[int]],\n destination: Union[int, Sequence[int]]):\n _check_arraylike(\"moveaxis\", a)\n source_axes: Tuple[int, ...]\n destination_axes: Tuple[int, ...]\n try:\n source_axes = (operator.index(source),)\n except TypeError:\n source_axes = tuple(cast(Sequence[int], source))\n try:\n destination_axes = (operator.index(destination),)\n except TypeError:\n destination_axes = tuple(cast(Sequence[int], destination))\n source_axes = tuple(_canonicalize_axis(i, ndim(a)) for i in source_axes)\n destination_axes = tuple(_canonicalize_axis(i, ndim(a))\n for i in destination_axes)\n if len(source_axes) != len(destination_axes):\n raise ValueError(\"Inconsistent number of elements: {} vs {}\"\n .format(len(source_axes), len(destination_axes)))\n perm = [i for i in range(ndim(a)) if i not in source_axes]\n for dest, src in sorted(zip(destination_axes, source_axes)):\n perm.insert(dest, src)\n return lax.transpose(a, perm)\n\n\n@_wraps(np.isclose)\ndef isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):\n a, b = _promote_args(\"isclose\", asarray(a), asarray(b))\n dtype = _dtype(a)\n if issubdtype(dtype, inexact):\n if issubdtype(dtype, complexfloating):\n dtype = _complex_elem_type(dtype)\n rtol = lax.convert_element_type(rtol, dtype)\n atol = lax.convert_element_type(atol, dtype)\n out = lax.le(\n lax.abs(lax.sub(a, b)),\n lax.add(atol, lax.mul(rtol, lax.abs(b))))\n # This corrects the comparisons for infinite and nan values\n a_inf = isinf(a)\n b_inf = isinf(b)\n any_inf = logical_or(a_inf, b_inf)\n both_inf = logical_and(a_inf, b_inf)\n # Make all elements where either a or b are infinite to False\n out = logical_and(out, logical_not(any_inf))\n # Make all elements where both a or b are the same inf to True\n same_value = lax.eq(a, b)\n same_inf = logical_and(both_inf, same_value)\n out = logical_or(out, same_inf)\n\n # Make all elements where either a or b is NaN to False\n a_nan = isnan(a)\n b_nan = isnan(b)\n any_nan = logical_or(a_nan, b_nan)\n out = logical_and(out, logical_not(any_nan))\n if equal_nan:\n # Make all elements where both a and b is NaN to True\n both_nan = logical_and(a_nan, b_nan)\n out = logical_or(out, both_nan)\n return out\n else:\n return lax.eq(a, b)\n\n\n@_wraps(np.interp)\ndef interp(x, xp, fp, left=None, right=None, period=None):\n if shape(xp) != shape(fp) or ndim(xp) != 1:\n raise ValueError(\"xp and fp must be one-dimensional arrays of equal size\")\n x, xp, fp = map(asarray, _promote_dtypes_inexact(x, xp, fp))\n if period is not None:\n if period == 0:\n raise ValueError(f\"period must be a non-zero value; got {period}\")\n period = abs(period)\n x = x % period\n xp = xp % period\n xp, fp = lax.sort_key_val(xp, fp)\n xp = concatenate([xp[-1:] - period, xp, xp[:1] + period])\n fp = concatenate([fp[-1:], fp, fp[:1]])\n\n i = clip(searchsorted(xp, x, side='right'), 1, len(xp) - 1)\n df = fp[i] - fp[i - 1]\n dx = xp[i] - xp[i - 1]\n delta = x - xp[i - 1]\n f = where((dx == 0), fp[i], fp[i - 1] + (delta / dx) * df)\n\n if period is None:\n f = where(x < xp[0], fp[0] if left is None else left, f)\n f = where(x > xp[-1], fp[-1] if right is None else right, f)\n return f\n\n\n@_wraps(np.in1d, lax_description=\"\"\"\nIn the JAX version, the `assume_unique` argument is not referenced.\n\"\"\")\ndef in1d(ar1, ar2, assume_unique=False, invert=False):\n ar1 = ravel(ar1)\n ar2 = ravel(ar2)\n # Note: an algorithm based on searchsorted has better scaling, but in practice\n # is very slow on accelerators because it relies on lax control flow. If XLA\n # ever supports binary search natively, we should switch to this:\n # ar2 = jnp.sort(ar2)\n # ind = jnp.searchsorted(ar2, ar1)\n # if invert:\n # return ar1 != ar2[ind]\n # else:\n # return ar1 == ar2[ind]\n if invert:\n return (ar1[:, None] != ar2).all(-1)\n else:\n return (ar1[:, None] == ar2).any(-1)\n\n@_wraps(np.setdiff1d, lax_description=\"\"\"\nIn the JAX version, the `assume_unique` argument is not referenced.\n\"\"\")\ndef setdiff1d(ar1, ar2, assume_unique=False):\n ar1 = core.concrete_or_error(asarray, ar1, \"The error arose in setdiff1d()\")\n ar2 = core.concrete_or_error(asarray, ar2, \"The error arose in setdiff1d()\")\n\n ar1 = unique(ar1)\n ar2 = unique(ar2)\n\n idx = in1d(ar1, ar2, invert=True)\n return ar1[idx]\n\n\n_UNION1D_DOC = \"\"\"\\\nBecause the size of the output of ``union1d`` is data-dependent, the function is not\ntypically compatible with JIT. The JAX version adds the optional `size` argument which\nspecifies the size of the output array: it must be specified statically for ``jnp.union1d``\nto be traced. If specified, the first `size` unique elements will be returned; if there are\nfewer unique elements than `size` indicates, the return value will be padded with\nthe minimum value of the union.\"\"\"\n\n@_wraps(np.union1d, lax_description=_UNION1D_DOC)\ndef union1d(ar1, ar2, *, size=None):\n # TODO(jakevdp): call _check_arraylike on inputs\n ar1 = asarray(ar1)\n ar2 = asarray(ar2)\n if size is None:\n ar1 = core.concrete_or_error(None, ar1, \"The error arose in union1d()\")\n ar2 = core.concrete_or_error(None, ar2, \"The error arose in union1d()\")\n else:\n size = core.concrete_or_error(operator.index, size, \"The error arose in union1d()\")\n return unique(concatenate((ar1, ar2), axis=None), size=size)\n\n\n@_wraps(np.setxor1d, lax_description=\"\"\"\nIn the JAX version, the input arrays are explicilty flattened regardless\nof assume_unique value.\n\"\"\")\ndef setxor1d(ar1, ar2, assume_unique=False):\n ar1 = core.concrete_or_error(asarray, ar1, \"The error arose in setxor1d()\")\n ar2 = core.concrete_or_error(asarray, ar2, \"The error arose in setxor1d()\")\n\n ar1 = ravel(ar1)\n ar2 = ravel(ar2)\n\n if not assume_unique:\n ar1 = unique(ar1)\n ar2 = unique(ar2)\n\n aux = concatenate((ar1, ar2))\n if aux.size == 0:\n return aux\n\n aux = sort(aux)\n flag = concatenate((array([True]), aux[1:] != aux[:-1], array([True])))\n return aux[flag[1:] & flag[:-1]]\n\n\n@partial(jit, static_argnums=2)\ndef _intersect1d_sorted_mask(ar1, ar2, return_indices=False):\n \"\"\"\n Helper function for intersect1d which is jit-able\n \"\"\"\n ar = concatenate((ar1, ar2))\n if return_indices:\n iota = lax.broadcasted_iota(np.int64, shape(ar), dimension=0)\n aux, indices = lax.sort_key_val(ar, iota)\n else:\n aux = sort(ar)\n\n mask = aux[1:] == aux[:-1]\n if return_indices:\n return aux, mask, indices\n else:\n return aux, mask\n\n\n@_wraps(np.intersect1d)\ndef intersect1d(ar1, ar2, assume_unique=False, return_indices=False):\n ar1 = core.concrete_or_error(asarray, ar1, \"The error arose in intersect1d()\")\n ar2 = core.concrete_or_error(asarray, ar2, \"The error arose in intersect1d()\")\n\n if not assume_unique:\n if return_indices:\n ar1, ind1 = unique(ar1, return_index=True)\n ar2, ind2 = unique(ar2, return_index=True)\n else:\n ar1 = unique(ar1)\n ar2 = unique(ar2)\n else:\n ar1 = ravel(ar1)\n ar2 = ravel(ar2)\n\n if return_indices:\n aux, mask, aux_sort_indices = _intersect1d_sorted_mask(ar1, ar2, return_indices)\n else:\n aux, mask = _intersect1d_sorted_mask(ar1, ar2, return_indices)\n\n int1d = aux[:-1][mask]\n\n if return_indices:\n ar1_indices = aux_sort_indices[:-1][mask]\n ar2_indices = aux_sort_indices[1:][mask] - ar1.size\n if not assume_unique:\n ar1_indices = ind1[ar1_indices]\n ar2_indices = ind2[ar2_indices]\n\n return int1d, ar1_indices, ar2_indices\n else:\n return int1d\n\n\n@_wraps(np.isin, lax_description=\"\"\"\nIn the JAX version, the `assume_unique` argument is not referenced.\n\"\"\")\ndef isin(element, test_elements, assume_unique=False, invert=False):\n result = in1d(element, test_elements, assume_unique=assume_unique, invert=invert)\n return result.reshape(shape(element))\n\n\n# The `jit` on `where` exists to avoid materializing constants in cases like\n# `np.where(np.zeros(1000), 7, 4)`. In op-by-op mode, we don't want to\n# materialize the broadcast forms of scalar arguments.\n@jit\ndef _where(condition, x=None, y=None):\n if x is None or y is None:\n raise ValueError(\"Either both or neither of the x and y arguments should \"\n \"be provided to jax.numpy.where, got {} and {}.\"\n .format(x, y))\n if not issubdtype(_dtype(condition), bool_):\n condition = lax.ne(condition, zeros_like(condition))\n x, y = _promote_dtypes(x, y)\n condition, x, y = broadcast_arrays(condition, x, y)\n return lax.select(condition, x, y) if not core.is_empty_shape(np.shape(x)) else x\n\n\n_WHERE_DOC = \"\"\"\\\nAt present, JAX does not support JIT-compilation of the single-argument form\nof :py:func:`jax.numpy.where` because its output shape is data-dependent. The\nthree-argument form does not have a data-dependent shape and can be JIT-compiled\nsuccessfully. Alternatively, you can specify the optional ``size`` keyword:\nif specified, the first ``size`` True elements will be returned; if there\nare fewer True elements than ``size`` indicates, the index arrays will be\npadded with zeros.\n\"\"\"\n\n@_wraps(np.where, update_doc=False, lax_description=_WHERE_DOC)\ndef where(condition, x=None, y=None, *, size=None):\n if x is None and y is None:\n return nonzero(asarray(condition), size=size)\n else:\n if size is not None:\n raise ValueError(\"size argument cannot be used in three-term where function.\")\n return _where(condition, x, y)\n\n\n@_wraps(np.select)\ndef select(condlist, choicelist, default=0):\n if len(condlist) != len(choicelist):\n msg = \"condlist must have length equal to choicelist ({} vs {})\"\n raise ValueError(msg.format(len(condlist), len(choicelist)))\n if len(condlist) == 0:\n raise ValueError(\"condlist must be non-empty\")\n choices = _promote_dtypes(default, *choicelist)\n choicelist = choices[1:]\n output = choices[0]\n for cond, choice in zip(condlist[::-1], choicelist[::-1]):\n output = where(cond, choice, output)\n return output\n\n\n@_wraps(np.bincount, lax_description=\"\"\"\\\nJax adds the optional `length` parameter which specifies the output length, and\ndefaults to ``x.max() + 1``. It must be specified for bincount to be compilable.\nValues larger than the specified length will be discarded.\n\nAdditionally, while ``np.bincount`` raises an error if the input array contains\nnegative values, ``jax.numpy.bincount`` treats negative values as zero.\n\"\"\")\ndef bincount(x, weights=None, minlength=0, *, length=None):\n _check_arraylike(\"bincount\", x)\n if not issubdtype(_dtype(x), integer):\n msg = f\"x argument to bincount must have an integer type; got {x.dtype}\"\n raise TypeError(msg)\n if ndim(x) != 1:\n raise ValueError(\"only 1-dimensional input supported.\")\n minlength = core.concrete_or_error(operator.index, minlength,\n \"The error occurred because of argument 'minlength' of jnp.bincount.\")\n if length is None:\n x = core.concrete_or_error(asarray, x,\n \"The error occured because of argument 'x' of jnp.bincount. \"\n \"To avoid this error, pass a static `length` argument.\")\n length = max(x, initial=-1) + 1\n else:\n length = core.concrete_or_error(operator.index, length,\n \"The error occurred because of argument 'length' of jnp.bincount.\")\n length = _max(length, minlength)\n if weights is None:\n weights = 1\n elif shape(x) != shape(weights):\n raise ValueError(\"shape of weights must match shape of x.\")\n return zeros(length, _dtype(weights)).at[clip(x, 0)].add(weights)\n\n@_wraps(getattr(np, \"broadcast_shapes\", None))\ndef broadcast_shapes(*shapes):\n if not shapes:\n return ()\n shapes = [(shape,) if np.ndim(shape) == 0 else tuple(shape) for shape in shapes]\n return lax.broadcast_shapes(*shapes)\n\ndef broadcast_arrays(*args):\n \"\"\"Like Numpy's broadcast_arrays but doesn't return views.\"\"\"\n shapes = [shape(arg) for arg in args]\n if len(set(shapes)) == 1:\n return [arg if isinstance(arg, ndarray) or isscalar(arg) else array(arg)\n for arg in args]\n result_shape = lax.broadcast_shapes(*shapes)\n return [broadcast_to(arg, result_shape) for arg in args]\n\n\n@_wraps(np.broadcast_to, lax_description=\"\"\"\\\nThe JAX version does not necessarily return a view of the input.\n\"\"\")\ndef broadcast_to(arr, shape):\n arr = arr if isinstance(arr, ndarray) else array(arr)\n shape = (shape,) if ndim(shape) == 0 else shape\n shape = canonicalize_shape(shape) # check that shape is concrete\n arr_shape = _shape(arr)\n if core.symbolic_equal_shape(arr_shape, shape):\n return arr\n else:\n nlead = len(shape) - len(arr_shape)\n shape_tail = shape[nlead:]\n compatible = _all(core.symbolic_equal_one_of_dim(arr_d, [1, shape_d])\n for arr_d, shape_d in safe_zip(arr_shape, shape_tail))\n if nlead < 0 or not compatible:\n msg = \"Incompatible shapes for broadcasting: {} and requested shape {}\"\n raise ValueError(msg.format(arr_shape, shape))\n diff, = np.where(tuple(not core.symbolic_equal_dim(arr_d, shape_d)\n for arr_d, shape_d in safe_zip(arr_shape, shape_tail)))\n new_dims = tuple(range(nlead)) + tuple(nlead + diff)\n kept_dims = tuple(np.delete(np.arange(len(shape)), new_dims))\n return lax.broadcast_in_dim(squeeze(arr, tuple(diff)), shape, kept_dims)\n\n\ndef _split(op, ary, indices_or_sections, axis=0):\n axis = core.concrete_or_error(int, axis, f\"in jax.numpy.{op} argument `axis`\")\n size = ary.shape[axis]\n if isinstance(indices_or_sections, (tuple, list) + _arraylike_types):\n indices_or_sections = np.array(\n [core.concrete_or_error(np.int64, i_s, f\"in jax.numpy.{op} argument 1\")\n for i_s in indices_or_sections], np.int64)\n split_indices = np.concatenate([[np.int64(0)], indices_or_sections,\n [np.int64(size)]])\n else:\n indices_or_sections = core.concrete_or_error(np.int64, indices_or_sections,\n f\"in jax.numpy.{op} argument 1\")\n part_size, r = _divmod(size, indices_or_sections)\n if r == 0:\n split_indices = np.arange(indices_or_sections + 1,\n dtype=np.int64) * part_size\n elif op == \"array_split\":\n split_indices = np.concatenate(\n [np.arange(r + 1, dtype=np.int64) * (part_size + 1),\n np.arange(indices_or_sections - r, dtype=np.int64) * part_size\n + ((r + 1) * (part_size + 1) - 1)])\n else:\n raise ValueError(\"array split does not result in an equal division\")\n starts, ends = [0] * ndim(ary), shape(ary)\n _subval = lambda x, i, v: subvals(x, [(i, v)])\n return [lax.slice(ary, _subval(starts, axis, start), _subval(ends, axis, end))\n for start, end in zip(split_indices[:-1], split_indices[1:])]\n\n@_wraps(np.split, lax_description=_ARRAY_VIEW_DOC)\ndef split(ary, indices_or_sections, axis: int = 0):\n return _split(\"split\", ary, indices_or_sections, axis=axis)\n\ndef _split_on_axis(np_fun, axis):\n @_wraps(np_fun, update_doc=False)\n def f(ary, indices_or_sections):\n return split(ary, indices_or_sections, axis=axis)\n return f\n\nvsplit = _split_on_axis(np.vsplit, axis=0)\nhsplit = _split_on_axis(np.hsplit, axis=1)\ndsplit = _split_on_axis(np.dsplit, axis=2)\n\n@_wraps(np.array_split)\ndef array_split(ary, indices_or_sections, axis: int = 0):\n return _split(\"array_split\", ary, indices_or_sections, axis=axis)\n\n@_wraps(np.clip, skip_params=['out'])\ndef clip(a, a_min=None, a_max=None, out=None):\n _check_arraylike(\"clip\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.clip is not supported.\")\n if a_min is None and a_max is None:\n raise ValueError(\"At most one of a_min and a_max may be None\")\n if a_min is not None:\n a = maximum(a_min, a)\n if a_max is not None:\n a = minimum(a_max, a)\n return a\n\n@_wraps(np.around, skip_params=['out'])\ndef round(a, decimals=0, out=None):\n _check_arraylike(\"round\", a)\n decimals = core.concrete_or_error(operator.index, decimals, \"'decimals' argument of jnp.round\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.round is not supported.\")\n dtype = _dtype(a)\n if issubdtype(dtype, integer):\n if decimals < 0:\n raise NotImplementedError(\n \"integer np.round not implemented for decimals < 0\")\n return a # no-op on integer types\n\n def _round_float(x):\n if decimals == 0:\n return lax.round(x, lax.RoundingMethod.TO_NEAREST_EVEN)\n\n # TODO(phawkins): the strategy of rescaling the value isn't necessarily a\n # good one since we may be left with an incorrectly rounded value at the\n # end due to precision problems. As a workaround for float16, convert to\n # float32,\n x = lax.convert_element_type(x, np.float32) if dtype == np.float16 else x\n factor = _constant_like(x, 10 ** decimals)\n out = lax.div(lax.round(lax.mul(x, factor),\n lax.RoundingMethod.TO_NEAREST_EVEN), factor)\n return lax.convert_element_type(out, dtype) if dtype == np.float16 else out\n\n if issubdtype(dtype, complexfloating):\n return lax.complex(_round_float(lax.real(a)), _round_float(lax.imag(a)))\n else:\n return _round_float(a)\naround = round\nround_ = round\n\n\n@_wraps(np.fix, skip_params=['out'])\ndef fix(x, out=None):\n _check_arraylike(\"fix\", x)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.fix is not supported.\")\n zero = lax._const(x, 0)\n return where(lax.ge(x, zero), floor(x), ceil(x))\n\n\n@_wraps(np.modf, skip_params=['out'])\ndef modf(x, out=None):\n _check_arraylike(\"modf\", x)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.modf is not supported.\")\n whole = fix(x)\n return x - whole, whole\n\n\n@_wraps(np.isfinite)\ndef isfinite(x):\n _check_arraylike(\"isfinite\", x)\n dtype = _dtype(x)\n if issubdtype(dtype, floating):\n return lax.is_finite(x)\n elif issubdtype(dtype, complexfloating):\n return lax.bitwise_and(lax.is_finite(real(x)), lax.is_finite(imag(x)))\n else:\n return full_like(x, True, dtype=bool_)\n\n@_wraps(np.isinf)\ndef isinf(x):\n _check_arraylike(\"isinf\", x)\n dtype = _dtype(x)\n if issubdtype(dtype, floating):\n return lax.eq(lax.abs(x), _constant_like(x, inf))\n elif issubdtype(dtype, complexfloating):\n re = lax.real(x)\n im = lax.imag(x)\n return lax.bitwise_or(lax.eq(lax.abs(re), _constant_like(re, inf)),\n lax.eq(lax.abs(im), _constant_like(im, inf)))\n else:\n return full_like(x, False, dtype=bool_)\n\ndef _isposneginf(infinity, x, out):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to isneginf/isposinf is not supported.\")\n dtype = _dtype(x)\n if issubdtype(dtype, floating):\n return lax.eq(x, _constant_like(x, infinity))\n elif issubdtype(dtype, complexfloating):\n raise ValueError(\"isposinf/isneginf are not well defined for complex types\")\n else:\n return full_like(x, False, dtype=bool_)\n\nisposinf = _wraps(np.isposinf, skip_params=['out'])(\n lambda x, out=None: _isposneginf(inf, x, out)\n)\n\nisneginf = _wraps(np.isneginf, skip_params=['out'])(\n lambda x, out=None: _isposneginf(-inf, x, out)\n)\n\n@_wraps(np.isnan)\ndef isnan(x):\n _check_arraylike(\"isnan\", x)\n return lax.ne(x, x)\n\n@_wraps(np.nan_to_num)\ndef nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):\n del copy\n _check_arraylike(\"nan_to_num\", x)\n dtype = _dtype(x)\n if issubdtype(dtype, complexfloating):\n return lax.complex(\n nan_to_num(lax.real(x), nan=nan, posinf=posinf, neginf=neginf),\n nan_to_num(lax.imag(x), nan=nan, posinf=posinf, neginf=neginf))\n info = finfo(dtypes.canonicalize_dtype(dtype))\n posinf = info.max if posinf is None else posinf\n neginf = info.min if neginf is None else neginf\n x = where(isnan(x), _constant_like(x, nan), x)\n x = where(isposinf(x), _constant_like(x, posinf), x)\n x = where(isneginf(x), _constant_like(x, neginf), x)\n return x\n\n### Reducers\n\ndef _reduction(a, name, np_fun, op, init_val, has_identity=True,\n preproc=None, bool_op=None, upcast_f16_for_computation=False,\n axis=None, dtype=None, out=None, keepdims=False, initial=None,\n where_=None, parallel_reduce=None):\n bool_op = bool_op or op\n # Note: we must accept out=None as an argument, because numpy reductions delegate to\n # object methods. For example `np.sum(x)` will call `x.sum()` if the `sum()` method\n # exists, passing along all its arguments.\n if out is not None:\n raise NotImplementedError(f\"The 'out' argument to jnp.{name} is not supported.\")\n _check_arraylike(name, a)\n lax._check_user_dtype_supported(dtype, name)\n axis = core.concrete_or_error(None, axis, f\"axis argument to jnp.{name}().\")\n\n if initial is None and not has_identity:\n if not _all(core.greater_equal_dim(d, 1) for d in np.shape(a)):\n raise ValueError(f\"zero-size array to reduction operation {name} which has no identity\")\n if where_ is not None:\n raise ValueError(f\"reduction operation {name} does not have an identity, so to use a \"\n f\"where mask one has to specify 'initial'\")\n\n a = a if isinstance(a, ndarray) else asarray(a)\n a = preproc(a) if preproc else a\n pos_dims, dims = _reduction_dims(a, axis)\n result_dtype = dtypes.canonicalize_dtype(dtype or _dtype(np_fun(np.ones((), dtype=_dtype(a)))))\n if upcast_f16_for_computation and issubdtype(result_dtype, inexact):\n computation_dtype = promote_types(result_dtype, float32)\n else:\n computation_dtype = result_dtype\n a = lax.convert_element_type(a, computation_dtype)\n op = op if computation_dtype != np.bool_ else bool_op\n # NB: in XLA, init_val must be an identity for the op, so the user-specified\n # initial value must be applied afterward.\n init_val = _reduction_init_val(a, init_val)\n if where_ is not None:\n a = where(where_, a, init_val)\n if pos_dims is not dims:\n if parallel_reduce is None:\n raise NotImplementedError(f\"Named reductions not implemented for jnp.{name}()\")\n result = parallel_reduce(a, dims)\n else:\n result = lax.reduce(a, init_val, op, dims)\n if initial is not None:\n result = op(_reduction_init_val(a, initial), result)\n if keepdims:\n result = expand_dims(result, pos_dims)\n return lax.convert_element_type(result, dtype or result_dtype)\n\ndef _canonicalize_axis_allow_named(x, rank):\n return maybe_named_axis(x, lambda i: _canonicalize_axis(i, rank), lambda name: name)\n\ndef _reduction_dims(a, axis):\n if axis is None:\n return (tuple(range(ndim(a))),) * 2\n elif not isinstance(axis, (np.ndarray, tuple, list)):\n axis = (axis,)\n canon_axis = tuple(_canonicalize_axis_allow_named(x, ndim(a))\n for x in axis)\n if len(canon_axis) != len(set(canon_axis)):\n raise ValueError(f\"duplicate value in 'axis': {axis}\")\n canon_pos_axis = tuple(x for x in canon_axis if isinstance(x, int))\n if len(canon_pos_axis) != len(canon_axis):\n return canon_pos_axis, canon_axis\n else:\n return canon_axis, canon_axis\n\ndef _reduction_init_val(a, init_val):\n a_dtype = dtypes.canonicalize_dtype(_dtype(a))\n if a_dtype == 'bool':\n return np.array(init_val > 0, dtype=a_dtype)\n try:\n return np.array(init_val, dtype=a_dtype)\n except OverflowError:\n assert issubdtype(a_dtype, integer)\n sign, info = np.sign(init_val), iinfo(a_dtype)\n return np.array(info.min if sign < 0 else info.max, dtype=a_dtype)\n\ndef _cast_to_bool(operand):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=np.ComplexWarning)\n return lax.convert_element_type(operand, bool_)\n\n@_wraps(np.sum, skip_params=['out'])\ndef sum(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=None, initial=None, where=None):\n return _reduction(a, \"sum\", np.sum, lax.add, 0,\n bool_op=lax.bitwise_or, upcast_f16_for_computation=True,\n axis=axis, dtype=dtype, out=out, keepdims=keepdims,\n initial=initial, where_=where, parallel_reduce=lax.psum)\n\n@_wraps(np.prod, skip_params=['out'])\ndef prod(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=None, initial=None, where=None):\n return _reduction(a, \"prod\", np.prod, lax.mul, 1,\n bool_op=lax.bitwise_and, upcast_f16_for_computation=True,\n axis=axis, dtype=dtype, out=out, keepdims=keepdims, initial=initial, where_=where)\n\n@_wraps(np.max, skip_params=['out'])\ndef max(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None, initial=None, where=None):\n return _reduction(a, \"max\", np.max, lax.max, -np.inf, has_identity=False,\n axis=axis, out=out, keepdims=keepdims,\n initial=initial, where_=where, parallel_reduce=lax.pmax)\n\n@_wraps(np.min, skip_params=['out'])\ndef min(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None, initial=None, where=None):\n return _reduction(a, \"min\", np.min, lax.min, np.inf, has_identity=False,\n axis=axis, out=out, keepdims=keepdims,\n initial=initial, where_=where, parallel_reduce=lax.pmin)\n\n@_wraps(np.all, skip_params=['out'])\ndef all(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None, *, where=None):\n return _reduction(a, \"all\", np.all, lax.bitwise_and, True, preproc=_cast_to_bool,\n axis=axis, out=out, keepdims=keepdims, where_=where)\n\n@_wraps(np.any, skip_params=['out'])\ndef any(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None, *, where=None):\n return _reduction(a, \"any\", np.any, lax.bitwise_or, False, preproc=_cast_to_bool,\n axis=axis, out=out, keepdims=keepdims, where_=where)\n\nproduct = prod\namin = min\namax = max\nalltrue = all\nsometrue = any\n\ndef _axis_size(a, axis):\n if not isinstance(axis, (tuple, list)):\n axis = (axis,)\n size = 1\n a_shape = shape(a)\n for a in axis:\n size *= maybe_named_axis(a, lambda i: a_shape[i], lambda name: lax.psum(1, name))\n return size\n\n@_wraps(np.mean, skip_params=['out'])\ndef mean(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=False, *, where=None):\n _check_arraylike(\"mean\", a)\n lax._check_user_dtype_supported(dtype, \"mean\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.mean is not supported.\")\n\n if where is None:\n if axis is None:\n normalizer = size(a)\n else:\n normalizer = _axis_size(a, axis)\n else:\n normalizer = sum(broadcast_to(where, shape(a)), axis, dtype=dtype, keepdims=keepdims)\n\n if dtype is None:\n if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer):\n dtype = float_\n else:\n dtype = _dtype(a)\n dtype = dtypes.canonicalize_dtype(dtype)\n\n return lax.div(\n sum(a, axis, dtype=dtype, keepdims=keepdims, where=where),\n lax.convert_element_type(normalizer, dtype))\n\n@_wraps(np.average)\ndef average(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, weights=None,\n returned=False):\n a = asarray(a)\n\n if weights is None: # Treat all weights as 1\n avg = mean(a, axis=axis)\n if axis is None:\n weights_sum = full((), size(a), dtype=avg.dtype)\n else:\n weights_sum = full_like(avg, a.shape[axis], dtype=avg.dtype)\n else:\n weights = asarray(weights)\n\n if issubdtype(a.dtype, inexact):\n out_dtype = result_type(a.dtype, weights.dtype)\n else:\n out_dtype = result_type(a.dtype, weights.dtype, float_)\n out_dtype = dtypes.canonicalize_dtype(out_dtype)\n\n a_shape = shape(a)\n a_ndim = len(a_shape)\n weights_shape = shape(weights)\n axis = None if axis is None else _canonicalize_axis(axis, a_ndim)\n\n if a_shape != weights_shape:\n # Make sure the dimensions work out\n if axis is None:\n raise ValueError(\"Axis must be specified when shapes of a and \"\n \"weights differ.\")\n if len(weights_shape) != 1:\n raise ValueError(\"1D weights expected when shapes of a and \"\n \"weights differ.\")\n if weights_shape[0] != a_shape[axis]:\n raise ValueError(\"Length of weights not \"\n \"compatible with specified axis.\")\n\n weights = broadcast_to(weights, (a_ndim - 1) * (1,) + weights_shape)\n weights = moveaxis(weights, -1, axis)\n\n weights_sum = sum(weights, axis=axis, dtype=out_dtype)\n avg = sum(multiply(a, weights), axis=axis, dtype=out_dtype) / weights_sum\n\n if returned:\n if avg.shape != weights_sum.shape:\n weights_sum = broadcast_to(weights_sum, avg.shape)\n return avg, weights_sum\n return avg\n\n\n@_wraps(np.var, skip_params=['out'])\ndef var(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, ddof=0, keepdims=False, *, where=None):\n _check_arraylike(\"var\", a)\n lax._check_user_dtype_supported(dtype, \"var\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.var is not supported.\")\n\n a_dtype, dtype = _var_promote_types(_dtype(a), dtype)\n a_mean = mean(a, axis, dtype=a_dtype, keepdims=True, where=where)\n centered = a - a_mean\n if issubdtype(centered.dtype, complexfloating):\n centered = lax.real(lax.mul(centered, lax.conj(centered)))\n else:\n centered = lax.square(centered)\n\n if where is None:\n if axis is None:\n normalizer = size(a)\n else:\n normalizer = _axis_size(a, axis)\n else:\n normalizer = sum(broadcast_to(where, shape(a)), axis, dtype=dtype, keepdims=keepdims)\n normalizer = normalizer - ddof\n\n result = sum(centered, axis, keepdims=keepdims, where=where)\n out = lax.div(result, lax.convert_element_type(normalizer, result.dtype))\n return lax.convert_element_type(out, dtype)\n\n\ndef _var_promote_types(a_dtype, dtype):\n if dtype:\n if (not issubdtype(dtype, complexfloating) and\n issubdtype(a_dtype, complexfloating)):\n msg = (\"jax.numpy.var does not yet support real dtype parameters when \"\n \"computing the variance of an array of complex values. The \"\n \"semantics of numpy.var seem unclear in this case. Please comment \"\n \"on https://github.com/google/jax/issues/2283 if this behavior is \"\n \"important to you.\")\n raise ValueError(msg)\n a_dtype = promote_types(a_dtype, dtype)\n else:\n if not issubdtype(a_dtype, inexact):\n dtype = a_dtype = dtypes.canonicalize_dtype(float_)\n else:\n dtype = _complex_elem_type(a_dtype)\n a_dtype = promote_types(a_dtype, float32)\n return a_dtype, dtype\n\n\n@_wraps(np.std, skip_params=['out'])\ndef std(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, ddof=0, keepdims=False, *, where=None):\n _check_arraylike(\"std\", a)\n lax._check_user_dtype_supported(dtype, \"std\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.std is not supported.\")\n return sqrt(var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, where=where))\n\n\n@_wraps(np.ptp, skip_params=['out'])\ndef ptp(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=False):\n _check_arraylike(\"ptp\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.ptp is not supported.\")\n x = amax(a, axis=axis, keepdims=keepdims)\n y = amin(a, axis=axis, keepdims=keepdims)\n return lax.sub(x, y)\n\n\n@_wraps(np.allclose)\ndef allclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):\n return all(isclose(a, b, rtol, atol, equal_nan))\n\n\n@_wraps(np.count_nonzero)\ndef count_nonzero(a, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n keepdims=False):\n _check_arraylike(\"count_nonzero\", a)\n return sum(lax.ne(a, _constant_like(a, 0)), axis=axis,\n dtype=dtypes.canonicalize_dtype(np.int_), keepdims=keepdims)\n\n\n_NONZERO_DOC = \"\"\"\\\nBecause the size of the output of ``nonzero`` is data-dependent, the function is not\ntypically compatible with JIT. The JAX version adds the optional `size` argument which\nspecifies the size of the output arrays: it must be specified statically for ``jnp.nonzero``\nto be traced. If specified, the first `size` nonzero elements will be returned; if there\nare fewer nonzero elements than `size` indicates, the index arrays will be zero-padded.\n\"\"\"\n\n@_wraps(np.nonzero, lax_description=_NONZERO_DOC)\ndef nonzero(a, *, size=None):\n a = atleast_1d(a)\n mask = a != 0\n if size is None:\n size = mask.sum()\n size = core.concrete_or_error(int, size,\n \"The size argument of jnp.nonzero must be statically specified \"\n \"to use jnp.nonzero within JAX transformations.\")\n if a.size == 0 or size == 0:\n return tuple(zeros(size, int) for dim in a.shape)\n flat_indices = cumsum(bincount(cumsum(mask), length=size))\n strides = np.cumprod(a.shape[::-1])[::-1] // a.shape\n return tuple((flat_indices // stride) % size for stride, size in zip(strides, a.shape))\n\n@_wraps(np.flatnonzero, lax_description=_NONZERO_DOC)\ndef flatnonzero(a, *, size=None):\n return nonzero(ravel(a), size=size)[0]\n\n\ndef _nan_reduction(a, name, jnp_reduction, init_val, nan_if_all_nan,\n axis=None, keepdims=None, **kwargs):\n _check_arraylike(name, a)\n if not issubdtype(_dtype(a), inexact):\n return jnp_reduction(a, axis=axis, keepdims=keepdims, **kwargs)\n\n out = jnp_reduction(where(isnan(a), _reduction_init_val(a, init_val), a),\n axis=axis, keepdims=keepdims, **kwargs)\n if nan_if_all_nan:\n return where(all(isnan(a), axis=axis, keepdims=keepdims),\n _constant_like(a, nan), out)\n else:\n return out\n\n@_wraps(np.nanmin, skip_params=['out'])\ndef nanmin(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None):\n return _nan_reduction(a, 'nanmin', min, inf, nan_if_all_nan=True,\n axis=axis, out=out, keepdims=keepdims)\n\n@_wraps(np.nanmax, skip_params=['out'])\ndef nanmax(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n keepdims=None):\n return _nan_reduction(a, 'nanmax', max, -inf, nan_if_all_nan=True,\n axis=axis, out=out, keepdims=keepdims)\n\n@_wraps(np.nansum, skip_params=['out'])\ndef nansum(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=None):\n return _nan_reduction(a, 'nansum', sum, 0, nan_if_all_nan=False,\n axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n@_wraps(np.nanprod, skip_params=['out'])\ndef nanprod(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=None):\n return _nan_reduction(a, 'nanprod', prod, 1, nan_if_all_nan=False,\n axis=axis, dtype=dtype, out=out, keepdims=keepdims)\n\n@_wraps(np.nanmean, skip_params=['out'])\ndef nanmean(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, keepdims=False):\n _check_arraylike(\"nanmean\", a)\n lax._check_user_dtype_supported(dtype, \"nanmean\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.nanmean is not supported.\")\n if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer):\n return mean(a, axis, dtype, out, keepdims)\n if dtype is None:\n dtype = _dtype(a)\n nan_mask = logical_not(isnan(a))\n normalizer = sum(nan_mask, axis=axis, dtype=int32, keepdims=keepdims)\n normalizer = lax.convert_element_type(normalizer, dtype)\n td = lax.div(nansum(a, axis, dtype=dtype, keepdims=keepdims), normalizer)\n return td\n\n\n@_wraps(np.nanvar, skip_params=['out'])\ndef nanvar(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, ddof=0, keepdims=False):\n _check_arraylike(\"nanvar\", a)\n lax._check_user_dtype_supported(dtype, \"nanvar\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.nanvar is not supported.\")\n\n a_dtype, dtype = _var_promote_types(_dtype(a), dtype)\n a_mean = nanmean(a, axis, dtype=a_dtype, keepdims=True)\n centered = a - a_mean\n if issubdtype(centered.dtype, complexfloating):\n centered = lax.real(lax.mul(centered, lax.conj(centered)))\n else:\n centered = lax.square(centered)\n\n normalizer = sum(logical_not(isnan(a)), axis=axis, keepdims=keepdims)\n normalizer = normalizer - ddof\n normalizer_mask = lax.le(normalizer, 0)\n result = nansum(centered, axis, keepdims=keepdims)\n result = where(normalizer_mask, nan, result)\n divisor = where(normalizer_mask, 1, normalizer)\n out = lax.div(result, lax.convert_element_type(divisor, result.dtype))\n return lax.convert_element_type(out, dtype)\n\n\n@_wraps(np.nanstd, skip_params=['out'])\ndef nanstd(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, dtype=None,\n out=None, ddof=0, keepdims=False):\n _check_arraylike(\"nanstd\", a)\n lax._check_user_dtype_supported(dtype, \"nanstd\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.nanstd is not supported.\")\n return sqrt(nanvar(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims))\n\n\ndef _make_cumulative_reduction(np_reduction, reduction, fill_nan=False, fill_value=0):\n # We want to allow XLA to fuse the pad and reduce-window operators to\n # avoid materializing the padded output.\n # Consider removing `jit` once again if reduce-window is generalized to\n # support arbitrary padding.\n @partial(jit, static_argnums=(1, 2))\n def _cumulative_reduction(a, axis, dtype):\n if axis is None or isscalar(a):\n a = ravel(a)\n axis = 0\n\n a_shape = list(shape(a))\n num_dims = len(a_shape)\n axis = _canonicalize_axis(axis, num_dims)\n\n if fill_nan:\n a = where(isnan(a), _constant_like(a, fill_value), a)\n\n if not dtype and _dtype(a) == bool_:\n dtype = int_\n if dtype:\n a = lax.convert_element_type(a, dtype)\n\n return reduction(a, axis)\n\n @_wraps(np_reduction, skip_params=['out'])\n def cumulative_reduction(a,\n axis: Optional[Union[int, Tuple[int, ...]]] = None,\n dtype=None, out=None):\n _check_arraylike(np_reduction.__name__, a)\n if out is not None:\n raise NotImplementedError(f\"The 'out' argument to jnp.{np_reduction.__name__} \"\n f\"is not supported.\")\n lax._check_user_dtype_supported(dtype, np_reduction.__name__)\n # jit doesn't support kwargs as static_args.\n return _cumulative_reduction(a, axis, dtype)\n return cumulative_reduction\n\n\ncumsum = _make_cumulative_reduction(np.cumsum, lax.cumsum, fill_nan=False)\ncumprod = _make_cumulative_reduction(np.cumprod, lax.cumprod, fill_nan=False)\ncumproduct = cumprod\nnancumsum = _make_cumulative_reduction(np.nancumsum, lax.cumsum,\n fill_nan=True, fill_value=0)\nnancumprod = _make_cumulative_reduction(np.nancumprod, lax.cumprod,\n fill_nan=True, fill_value=1)\n\n\n@_wraps(np.unwrap)\ndef unwrap(p, discont=pi, axis: int = -1):\n _check_arraylike(\"unwrap\", p)\n dd = diff(p, axis=axis)\n ddmod = mod(dd + pi, 2 * pi) - pi\n ddmod = where((ddmod == -pi) & (dd > 0), pi, ddmod)\n\n ph_correct = where(abs(dd) < discont, 0, ddmod - dd)\n\n up = concatenate((\n lax.slice_in_dim(p, 0, 1, axis=axis),\n lax.slice_in_dim(p, 1, None, axis=axis) + cumsum(ph_correct, axis=axis)\n ), axis=axis)\n\n return up\n\n\n### Array-creation functions\n\ndef _check_no_padding(axis_padding, mode):\n if (axis_padding[0] > 0 or axis_padding[1] > 0):\n msg = \"Cannot apply '{}' padding to empty axis\"\n raise ValueError(msg.format(mode))\n\n\ndef _pad_constant(array, pad_width, constant_values):\n nd = ndim(array)\n constant_values = broadcast_to(asarray(constant_values), (nd, 2))\n constant_values = lax.convert_element_type(constant_values, array.dtype)\n for i in range(nd):\n widths = [(0, 0, 0)] * nd\n widths[i] = (pad_width[i, 0], 0, 0)\n array = lax.pad(array, constant_values[i, 0], widths)\n widths[i] = (0, pad_width[i, 1], 0)\n array = lax.pad(array, constant_values[i, 1], widths)\n return array\n\n\ndef _pad_wrap(array, pad_width):\n for i in range(ndim(array)):\n if array.shape[i] == 0:\n _check_no_padding(pad_width[i], \"wrap\")\n continue\n size = array.shape[i]\n repeats, (left_remainder, right_remainder) = _divmod(pad_width[i], size)\n total_repeats = repeats.sum() + 1\n parts = []\n if left_remainder:\n parts += [lax.slice_in_dim(array, size - left_remainder, size, axis=i)]\n parts += total_repeats * [array]\n if right_remainder:\n parts += [lax.slice_in_dim(array, 0, right_remainder, axis=i)]\n array = lax.concatenate(parts, dimension=i)\n return array\n\n\ndef _pad_symmetric_or_reflect(array, pad_width, mode, reflect_type):\n assert mode in (\"symmetric\", \"reflect\")\n assert reflect_type in (\"even\", \"odd\")\n\n for i in range(ndim(array)):\n if array.shape[i] == 0:\n _check_no_padding(pad_width[i], mode)\n continue\n\n n = array.shape[i]\n offset = 1 if (mode == \"reflect\" and n > 1) else 0\n\n def build_padding(array, padding, before):\n if before:\n edge = lax.slice_in_dim(array, 0, 1, axis=i)\n else:\n edge = lax.slice_in_dim(array, -1, None, axis=i)\n\n while padding > 0:\n curr_pad = _min(padding, n - offset)\n padding -= curr_pad\n\n if before:\n start = offset\n stop = offset + curr_pad\n else:\n start = -(curr_pad + offset)\n stop = None if (mode == \"symmetric\" or n == 1) else -1\n\n x = lax.slice_in_dim(array, start, stop, axis=i)\n x = flip(x, axis=i)\n\n if reflect_type == 'odd':\n x = 2 * edge - x\n if n > 1:\n if before:\n edge = lax.slice_in_dim(x, 0, 1, axis=i)\n else:\n edge = lax.slice_in_dim(x, -1, None, axis=i)\n\n if before:\n array = lax.concatenate([x, array], dimension=i)\n else:\n array = lax.concatenate([array, x], dimension=i)\n return array\n\n array = build_padding(array, pad_width[i, 0], before=True)\n array = build_padding(array, pad_width[i, 1], before=False)\n return array\n\n\ndef _pad_edge(array, pad_width):\n nd = ndim(array)\n for i in range(nd):\n if array.shape[i] == 0:\n _check_no_padding(pad_width[i], \"edge\")\n continue\n\n n = array.shape[i]\n npad_before, npad_after = pad_width[i]\n\n edge_before = lax.slice_in_dim(array, 0, 1, axis=i)\n pad_before = repeat(edge_before, npad_before, axis=i)\n\n edge_after = lax.slice_in_dim(array, n-1, n, axis=i)\n pad_after = repeat(edge_after, npad_after, axis=i)\n\n array = lax.concatenate([pad_before, array, pad_after], dimension=i)\n return array\n\n\ndef _pad_linear_ramp(array, pad_width, end_values):\n for axis in range(ndim(array)):\n edge_before = lax.slice_in_dim(array, 0, 1, axis=axis)\n edge_after = lax.slice_in_dim(array, -1, None, axis=axis)\n ramp_before = linspace(\n start=end_values[axis][0],\n stop=edge_before.squeeze(axis), # Dimension is replaced by linspace\n num=pad_width[axis][0],\n endpoint=False,\n dtype=array.dtype,\n axis=axis\n )\n ramp_after = linspace(\n start=end_values[axis][1],\n stop=edge_after.squeeze(axis), # Dimension is replaced by linspace\n num=pad_width[axis][1],\n endpoint=False,\n dtype=array.dtype,\n axis=axis\n )\n\n # Reverse linear space in appropriate dimension\n ramp_after = flip(ramp_after, axis)\n\n array = lax.concatenate([ramp_before, array, ramp_after], dimension=axis)\n return array\n\n\ndef _pad_stats(array, pad_width, stat_length, stat_func):\n nd = ndim(array)\n for i in range(nd):\n if stat_length is None:\n stat_before = stat_func(array, axis=i, keepdims=True)\n stat_after = stat_before\n else:\n array_length = array.shape[i]\n length_before, length_after = stat_length[i]\n if length_before == 0 or length_after == 0:\n raise ValueError(\"stat_length of 0 yields no value for padding\")\n\n # Limit stat_length to length of array.\n length_before = _min(length_before, array_length)\n length_after = _min(length_after, array_length)\n\n slice_before = lax.slice_in_dim(array, 0, length_before, axis=i)\n slice_after = lax.slice_in_dim(array, -length_after, None, axis=i)\n stat_before = stat_func(slice_before, axis=i, keepdims=True)\n stat_after = stat_func(slice_after, axis=i, keepdims=True)\n\n if np.issubdtype(array.dtype, np.integer):\n stat_before = round(stat_before)\n stat_after = round(stat_after)\n\n stat_before = stat_before.astype(array.dtype)\n stat_after = stat_after.astype(array.dtype)\n\n npad_before, npad_after = pad_width[i]\n pad_before = repeat(stat_before, npad_before, axis=i)\n pad_after = repeat(stat_after, npad_after, axis=i)\n\n array = lax.concatenate([pad_before, array, pad_after], dimension=i)\n return array\n\n\ndef _pad_empty(array, pad_width):\n # Note: jax.numpy.empty = jax.numpy.zeros\n for i in range(ndim(array)):\n shape_before = array.shape[:i] + (pad_width[i][0],) + array.shape[i + 1:]\n pad_before = empty(shape_before, dtype=array.dtype)\n\n shape_after = array.shape[:i] + (pad_width[i][1],) + array.shape[i + 1:]\n pad_after = empty(shape_after, dtype=array.dtype)\n array = lax.concatenate([pad_before, array, pad_after], dimension=i)\n return array\n\n\ndef _pad_func(array, pad_width, func, **kwargs):\n pad_width = _broadcast_to_pairs(pad_width, ndim(array), \"pad_width\")\n padded = _pad_constant(array, np.array(pad_width), 0)\n for axis in range(ndim(padded)):\n padded = apply_along_axis(func, axis, padded, pad_width[axis], axis, kwargs)\n return padded\n\n\ndef _broadcast_to_pairs(nvals, nd, name):\n nvals = np.asarray(tree_map(\n lambda x: core.concrete_or_error(np.array, x, context=f\"{name} argument of jnp.pad\"),\n nvals))\n if nvals.dtype.kind == 'O':\n raise TypeError(f'`{name}` entries must be the same shape.')\n\n if nvals.shape == (nd, 2):\n # ((before_1, after_1), ..., (before_N, after_N))\n return tuple(tuple(nval) for nval in nvals)\n elif nvals.shape == (1, 2):\n # ((before, after),)\n return tuple(tuple(nvals[0]) for i in range(nd))\n elif nvals.shape == (2,):\n # (before, after) (not in the numpy docstring but works anyway)\n return tuple(tuple(nvals) for i in range(nd))\n elif nvals.shape == (1,):\n # (pad,)\n return tuple((nvals[0], nvals[0]) for i in range(nd))\n elif nvals.shape == ():\n # pad\n return tuple((nvals.flat[0], nvals.flat[0]) for i in range(nd))\n else:\n raise ValueError(f\"jnp.pad: {name} with nd={nd} has unsupported shape {nvals.shape}. \"\n f\"Valid shapes are ({nd}, 2), (1, 2), (2,), (1,), or ().\")\n\n\n@partial(jit, static_argnums=(1, 2, 4, 5, 6))\ndef _pad(array, pad_width, mode, constant_values, stat_length, end_values, reflect_type):\n array = asarray(array)\n nd = ndim(array)\n\n if nd == 0:\n return array\n\n stat_funcs = {\"maximum\": amax, \"minimum\": amin,\n \"mean\": mean, \"median\": median}\n\n pad_width = _broadcast_to_pairs(pad_width, nd, \"pad_width\")\n pad_width = np.array(pad_width)\n assert pad_width.shape == (nd, 2), pad_width\n\n if np.any(pad_width < 0):\n raise ValueError(\"index can't contain negative values\")\n\n if mode == \"constant\":\n return _pad_constant(array, pad_width, constant_values)\n\n elif mode == \"wrap\":\n return _pad_wrap(array, pad_width)\n\n elif mode in (\"symmetric\", \"reflect\"):\n return _pad_symmetric_or_reflect(array, pad_width, mode, reflect_type)\n\n elif mode == \"edge\":\n return _pad_edge(array, pad_width)\n\n elif mode == \"linear_ramp\":\n end_values = _broadcast_to_pairs(end_values, nd, \"end_values\")\n return _pad_linear_ramp(array, pad_width, end_values)\n\n elif mode in stat_funcs:\n if stat_length is not None:\n stat_length = _broadcast_to_pairs(stat_length, nd, \"stat_length\")\n return _pad_stats(array, pad_width, stat_length, stat_funcs[mode])\n\n elif mode == \"empty\":\n return _pad_empty(array, pad_width)\n\n else:\n assert False, (\"Should not be reached since pad already handled unsupported and\"\n \"not implemented modes\")\n\n\n@_wraps(np.pad, lax_description=\"\"\"\\\nUnlike numpy, JAX \"function\" mode's argument (which is another function) should return\nthe modified array. This is because Jax arrays are immutable.\n(In numpy, \"function\" mode's argument should modify a rank 1 array in-place.)\n\"\"\")\ndef pad(array, pad_width, mode=\"constant\", **kwargs):\n _check_arraylike(\"pad\", array)\n pad_width = _broadcast_to_pairs(pad_width, ndim(array), \"pad_width\")\n if pad_width and np.array(pad_width).dtype.kind != 'i':\n raise TypeError('`pad_width` must be of integral type.')\n\n if callable(mode):\n return _pad_func(array, pad_width, mode, **kwargs)\n\n allowed_kwargs = {\n 'empty': [], 'edge': [], 'wrap': [],\n 'constant': ['constant_values'],\n 'linear_ramp': ['end_values'],\n 'maximum': ['stat_length'],\n 'mean': ['stat_length'],\n 'median': ['stat_length'],\n 'minimum': ['stat_length'],\n 'reflect': ['reflect_type'],\n 'symmetric': ['reflect_type'],\n }\n try:\n unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])\n except KeyError:\n msg = \"Unimplemented padding mode '{}' for np.pad.\"\n raise NotImplementedError(msg.format(mode))\n if unsupported_kwargs:\n raise ValueError(\"unsupported keyword arguments for mode '{}': {}\"\n .format(mode, unsupported_kwargs))\n # Set default value if not given.\n constant_values = kwargs.get('constant_values', 0)\n stat_length = kwargs.get('stat_length', None)\n end_values = kwargs.get('end_values', 0)\n reflect_type = kwargs.get('reflect_type', \"even\")\n\n return _pad(array, pad_width, mode, constant_values, stat_length, end_values, reflect_type)\n\n\n@_wraps(np.stack, skip_params=['out'])\ndef stack(arrays, axis: int =0, out=None):\n if not len(arrays):\n raise ValueError(\"Need at least one array to stack.\")\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.stack is not supported.\")\n if isinstance(arrays, ndarray):\n axis = _canonicalize_axis(axis, arrays.ndim)\n return concatenate(expand_dims(arrays, axis + 1), axis=axis)\n else:\n _check_arraylike(\"stack\", *arrays)\n shape0 = shape(arrays[0])\n axis = _canonicalize_axis(axis, len(shape0) + 1)\n new_arrays = []\n for a in arrays:\n if shape(a) != shape0:\n raise ValueError(\"All input arrays must have the same shape.\")\n new_arrays.append(expand_dims(a, axis))\n return concatenate(new_arrays, axis=axis)\n\n@_wraps(np.tile)\ndef tile(A, reps):\n _check_arraylike(\"tile\", A)\n try:\n iter(reps)\n except TypeError:\n reps = (reps,)\n reps = tuple(operator.index(rep) if core.is_constant_dim(rep) else rep\n for rep in reps)\n A_shape = (1,) * (len(reps) - ndim(A)) + shape(A)\n reps = (1,) * (len(A_shape) - len(reps)) + reps\n result = broadcast_to(reshape(A, [j for i in A_shape for j in [1, i]]),\n [k for pair in zip(reps, A_shape) for k in pair])\n return reshape(result, tuple(np.multiply(A_shape, reps)))\n\ndef _concatenate_array(arr, axis: int):\n # Fast path for concatenation when the input is an ndarray rather than a list.\n arr = asarray(arr)\n if arr.ndim == 0 or arr.shape[0] == 0:\n raise ValueError(\"Need at least one array to concatenate.\")\n if axis is None:\n return lax.reshape(arr, (arr.size,))\n if arr.ndim == 1:\n raise ValueError(\"Zero-dimensional arrays cannot be concatenated.\")\n axis = _canonicalize_axis(axis, arr.ndim - 1)\n shape = arr.shape[1:axis + 1] + (arr.shape[0] * arr.shape[axis + 1],) + arr.shape[axis + 2:]\n dimensions = [*range(1, axis + 1), 0, *range(axis + 1, arr.ndim)]\n return lax.reshape(arr, shape, dimensions)\n\n@_wraps(np.concatenate)\ndef concatenate(arrays, axis: int = 0):\n if isinstance(arrays, ndarray):\n return _concatenate_array(arrays, axis)\n _check_arraylike(\"concatenate\", *arrays)\n if not len(arrays):\n raise ValueError(\"Need at least one array to concatenate.\")\n if ndim(arrays[0]) == 0:\n raise ValueError(\"Zero-dimensional arrays cannot be concatenated.\")\n if axis is None:\n return concatenate([ravel(a) for a in arrays], axis=0)\n axis = _canonicalize_axis(axis, ndim(arrays[0]))\n arrays = _promote_dtypes(*arrays)\n # lax.concatenate can be slow to compile for wide concatenations, so form a\n # tree of concatenations as a workaround especially for op-by-op mode.\n # (https://github.com/google/jax/issues/653).\n k = 16\n if len(arrays) == 1:\n return asarray(arrays[0])\n else:\n while len(arrays) > 1:\n arrays = [lax.concatenate(arrays[i:i+k], axis)\n for i in range(0, len(arrays), k)]\n return arrays[0]\n\n\n@_wraps(np.vstack)\ndef vstack(tup):\n if isinstance(tup, ndarray):\n arrs = jax.vmap(atleast_2d)(tup)\n else:\n arrs = [atleast_2d(m) for m in tup]\n return concatenate(arrs, axis=0)\nrow_stack = vstack\n\n\n@_wraps(np.hstack)\ndef hstack(tup):\n if isinstance(tup, ndarray):\n arrs = jax.vmap(atleast_1d)(tup)\n arr0_ndim = arrs.ndim - 1\n else:\n arrs = [atleast_1d(m) for m in tup]\n arr0_ndim = arrs[0].ndim\n return concatenate(arrs, axis=0 if arr0_ndim == 1 else 1)\n\n\n@_wraps(np.dstack)\ndef dstack(tup):\n if isinstance(tup, ndarray):\n arrs = jax.vmap(atleast_3d)(tup)\n else:\n arrs = [atleast_3d(m) for m in tup]\n return concatenate(arrs, axis=2)\n\n\n@_wraps(np.column_stack)\ndef column_stack(tup):\n if isinstance(tup, ndarray):\n arrs = jax.vmap(lambda x: atleast_2d(x).T)(tup) if tup.ndim < 3 else tup\n else:\n arrs = [atleast_2d(arr).T if arr.ndim < 2 else arr for arr in map(asarray, tup)]\n return concatenate(arrs, 1)\n\n\n@_wraps(np.choose, skip_params=['out'])\ndef choose(a, choices, out=None, mode='raise'):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.choose is not supported.\")\n _check_arraylike('choose', a, *choices)\n if not issubdtype(_dtype(a), integer):\n raise ValueError(\"`a` array must be integer typed\")\n N = len(choices)\n\n if mode == 'raise':\n a = core.concrete_or_error(asarray, a,\n \"The error occurred because jnp.choose was jit-compiled\"\n \" with mode='raise'. Use mode='wrap' or mode='clip' instead.\")\n if any((a < 0) | (a >= N)):\n raise ValueError(\"invalid entry in choice array\")\n elif mode == 'wrap':\n a = a % N\n elif mode == 'clip':\n a = clip(a, 0, N - 1)\n else:\n raise ValueError(f\"mode={mode!r} not understood. Must be 'raise', 'wrap', or 'clip'\")\n\n a, *choices = broadcast_arrays(a, *choices)\n return array(choices)[(a,) + indices(a.shape, sparse=True)]\n\n\ndef _atleast_nd(x, n):\n m = ndim(x)\n return lax.broadcast(x, (1,) * (n - m)) if m < n else x\n\ndef _block(xs):\n if isinstance(xs, tuple):\n raise ValueError(\"jax.numpy.block does not allow tuples, got {}\"\n .format(xs))\n elif isinstance(xs, list):\n if len(xs) == 0:\n raise ValueError(\"jax.numpy.block does not allow empty list arguments\")\n xs, depths = unzip2([_block(x) for x in xs])\n if _any(d != depths[0] for d in depths[1:]):\n raise ValueError(\"Mismatched list depths in jax.numpy.block\")\n rank = _max(depths[0], _max(ndim(x) for x in xs))\n xs = [_atleast_nd(x, rank) for x in xs]\n return concatenate(xs, axis=-depths[0]), depths[0] + 1\n else:\n return asarray(xs), 1\n\n@_wraps(np.block)\n@jit\ndef block(arrays):\n out, _ = _block(arrays)\n return out\n\n@_wraps(np.atleast_1d, update_doc=False, lax_description=_ARRAY_VIEW_DOC)\ndef atleast_1d(*arys):\n if len(arys) == 1:\n arr = asarray(arys[0])\n return arr if ndim(arr) >= 1 else reshape(arr, -1)\n else:\n return [atleast_1d(arr) for arr in arys]\n\n\n@_wraps(np.atleast_2d, update_doc=False, lax_description=_ARRAY_VIEW_DOC)\ndef atleast_2d(*arys):\n if len(arys) == 1:\n arr = asarray(arys[0])\n if ndim(arr) >= 2:\n return arr\n elif ndim(arr) == 1:\n return expand_dims(arr, axis=0)\n else:\n return expand_dims(arr, axis=(0, 1))\n else:\n return [atleast_2d(arr) for arr in arys]\n\n\n@_wraps(np.atleast_3d, update_doc=False, lax_description=_ARRAY_VIEW_DOC)\ndef atleast_3d(*arys):\n if len(arys) == 1:\n arr = asarray(arys[0])\n if ndim(arr) == 0:\n arr = expand_dims(arr, axis=(0, 1, 2))\n elif ndim(arr) == 1:\n arr = expand_dims(arr, axis=(0, 2))\n elif ndim(arr) == 2:\n arr = expand_dims(arr, axis=2)\n return arr\n else:\n return [atleast_3d(arr) for arr in arys]\n\n\n@_wraps(np.array)\ndef array(object, dtype=None, copy=True, order=\"K\", ndmin=0):\n if order is not None and order != \"K\":\n raise NotImplementedError(\"Only implemented for order='K'\")\n\n # check if the given dtype is compatible with JAX\n lax._check_user_dtype_supported(dtype, \"array\")\n\n weak_type = dtype is None and dtypes.is_weakly_typed(object)\n dtype = dtype and dtypes.canonicalize_dtype(dtype)\n\n if _can_call_numpy_array(object):\n if dtypes.is_python_scalar(object):\n object = dtypes.coerce_to_array(object, dtype)\n # TODO(jakevdp): falling back to numpy here fails to overflow for lists containing\n # large integers; see discussion in https://github.com/google/jax/pull/6047.\n object = _np_array(object, dtype=dtype, ndmin=ndmin, copy=False)\n\n assert type(object) not in dtypes.python_scalar_dtypes\n\n if type(object) is np.ndarray:\n _inferred_dtype = object.dtype and dtypes.canonicalize_dtype(object.dtype)\n lax._check_user_dtype_supported(_inferred_dtype, \"array\")\n out = _device_put_raw(object, weak_type=weak_type)\n if dtype: assert _dtype(out) == dtype\n elif isinstance(object, (DeviceArray, core.Tracer)):\n if isinstance(object, DeviceArray) and copy:\n # We perform a copy by bouncing back to the host\n # TODO(phawkins): add a device runtime function to copy a buffer\n out = _device_put_raw(_np_asarray(object), weak_type=weak_type)\n else:\n out = object\n elif isinstance(object, (list, tuple)):\n if object:\n out = stack([asarray(elt, dtype=dtype) for elt in object])\n else:\n out = _device_put_raw(_np_array([], dtype=dtype))\n else:\n try:\n view = memoryview(object)\n except TypeError:\n pass # `object` does not support the buffer interface.\n else:\n return array(_np_asarray(view), dtype, copy)\n\n raise TypeError(\"Unexpected input type for array: {}\".format(type(object)))\n\n out = lax._convert_element_type(out, dtype, weak_type=weak_type)\n\n if ndmin > ndim(out):\n out = lax.broadcast(out, (1,) * (ndmin - ndim(out)))\n return out\n\ndef _can_call_numpy_array(x):\n return _all(not isinstance(l, (core.Tracer, DeviceArray))\n for l in tree_leaves(x))\n\n\n@_wraps(np.asarray)\ndef asarray(a, dtype=None, order=None):\n lax._check_user_dtype_supported(dtype, \"asarray\")\n dtype = dtypes.canonicalize_dtype(dtype) if dtype is not None else dtype\n return array(a, dtype=dtype, copy=False, order=order)\n\n\n@_wraps(np.zeros_like)\ndef zeros_like(a, dtype=None, shape=None):\n _check_arraylike(\"zeros_like\", a)\n lax._check_user_dtype_supported(dtype, \"zeros_like\")\n if np.isscalar(shape):\n shape = (shape,)\n return lax.full_like(a, 0, dtype, shape)\n\n\n@_wraps(np.ones_like)\ndef ones_like(a, dtype=None, shape=None):\n _check_arraylike(\"ones_like\", a)\n lax._check_user_dtype_supported(dtype, \"ones_like\")\n if np.isscalar(shape):\n shape = (shape,)\n return lax.full_like(a, 1, dtype, shape)\n\n\n@_wraps(np.full)\ndef full(shape, fill_value, dtype=None):\n lax._check_user_dtype_supported(dtype, \"full\")\n _check_arraylike(\"full\", fill_value)\n if ndim(fill_value) == 0:\n shape = (shape,) if ndim(shape) == 0 else shape\n return lax.full(shape, fill_value, dtype)\n else:\n return broadcast_to(asarray(fill_value, dtype=dtype), shape)\n\n\n@_wraps(np.full_like)\ndef full_like(a, fill_value, dtype=None, shape=None):\n lax._check_user_dtype_supported(dtype, \"full_like\")\n _check_arraylike(\"full_like\", a, fill_value)\n if shape is not None:\n shape = (shape,) if ndim(shape) == 0 else shape\n if ndim(fill_value) == 0:\n return lax.full_like(a, fill_value, dtype, shape)\n else:\n shape = np.shape(a) if shape is None else shape\n dtype = _dtype(a) if dtype is None else dtype\n return broadcast_to(asarray(fill_value, dtype=dtype), shape)\n\n\n@_wraps(np.zeros)\ndef zeros(shape, dtype=None):\n if isinstance(shape, types.GeneratorType):\n raise TypeError(\"expected sequence object with len >= 0 or a single integer\")\n lax._check_user_dtype_supported(dtype, \"zeros\")\n dtype = float_ if dtype is None else dtype\n shape = (shape,) if ndim(shape) == 0 else shape\n return lax.full(shape, 0, dtype)\n\n@_wraps(np.ones)\ndef ones(shape, dtype=None):\n if isinstance(shape, types.GeneratorType):\n raise TypeError(\"expected sequence object with len >= 0 or a single integer\")\n lax._check_user_dtype_supported(dtype, \"ones\")\n dtype = float_ if dtype is None else dtype\n shape = (shape,) if ndim(shape) == 0 else shape\n return lax.full(shape, 1, dtype)\n\n\n@_wraps(np.array_equal)\ndef array_equal(a1, a2, equal_nan=False):\n try:\n a1, a2 = asarray(a1), asarray(a2)\n except Exception:\n return False\n if shape(a1) != shape(a2):\n return False\n eq = asarray(a1 == a2)\n if equal_nan:\n eq = logical_or(eq, logical_and(isnan(a1), isnan(a2)))\n return all(eq)\n\n\n@_wraps(np.array_equiv)\ndef array_equiv(a1, a2):\n try:\n a1, a2 = asarray(a1), asarray(a2)\n except Exception:\n return False\n try:\n eq = equal(a1, a2)\n except ValueError:\n # shapes are not broadcastable\n return False\n return all(eq)\n\n\n# We can't create uninitialized arrays in XLA; use zeros for empty.\nempty_like = zeros_like\nempty = zeros\n\n\n@_wraps(np.eye)\ndef eye(N, M=None, k=0, dtype=None):\n lax._check_user_dtype_supported(dtype, \"eye\")\n dtype = float_ if dtype is None else dtype\n N = core.concrete_or_error(operator.index, N, \"'N' argument of jnp.eye()\")\n M = N if M is None else core.concrete_or_error(\n operator.index, M, \"'M' argument of jnp.eye()\")\n if N < 0 or M < 0:\n raise ValueError(f\"negative dimensions are not allowed, got {N} and {M}\")\n k = operator.index(k)\n return lax._eye(dtype, (N, M), k)\n\n\n@_wraps(np.identity)\ndef identity(n, dtype=None):\n lax._check_user_dtype_supported(dtype, \"identity\")\n return eye(n, dtype=dtype)\n\n\n@_wraps(np.arange)\ndef arange(start, stop=None, step=None, dtype=None):\n lax._check_user_dtype_supported(dtype, \"arange\")\n require = partial(core.concrete_or_error, _np_asarray)\n msg = \"It arose in jax.numpy.arange argument `{}`.\".format\n if stop is None and step is None:\n start = require(start, msg(\"stop\"))\n dtype = dtype or _dtype(start)\n return lax.iota(dtype, np.ceil(start).astype(int)) # avoids materializing\n else:\n start = require(start, msg(\"start\"))\n stop = None if stop is None else require(stop, msg(\"stop\"))\n step = None if step is None else require(step, msg(\"step\"))\n if dtype is None:\n dtype = _dtype(start, *(x for x in [stop, step] if x is not None))\n return array(np.arange(start, stop=stop, step=step, dtype=dtype))\n\n\ndef _wrap_numpy_nullary_function(f):\n \"\"\"Adapts `f` to return a DeviceArray instead of an np.ndarray.\n\n `f` cannot have any non-static array arguments.\n \"\"\"\n @_wraps(f, update_doc=False)\n def wrapper(*args, **kwargs):\n return asarray(f(*args, **kwargs))\n return wrapper\n\n\n@_wraps(np.linspace)\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,\n axis: int = 0):\n \"\"\"Implementation of linspace differentiable in start and stop args.\"\"\"\n lax._check_user_dtype_supported(dtype, \"linspace\")\n num = core.concrete_or_error(operator.index, num, \"'num' argument of jnp.linspace\")\n axis = core.concrete_or_error(operator.index, axis, \"'axis' argument of jnp.linspace\")\n if num < 0:\n raise ValueError(f\"Number of samples, {num}, must be non-negative.\")\n\n dtype = dtype or result_type(start, stop, dtypes.canonicalize_dtype(float_))\n computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))\n start = asarray(start, dtype=computation_dtype)\n stop = asarray(stop, dtype=computation_dtype)\n\n bounds_shape = list(lax.broadcast_shapes(shape(start), shape(stop)))\n broadcast_start = broadcast_to(start, bounds_shape)\n broadcast_stop = broadcast_to(stop, bounds_shape)\n axis = len(bounds_shape) + axis + 1 if axis < 0 else axis\n bounds_shape.insert(axis, 1)\n iota_shape = [1,] * len(bounds_shape)\n iota_shape[axis] = num\n div = (num - 1) if endpoint else num\n if num > 1:\n delta = lax.convert_element_type(stop - start, computation_dtype) / div\n if issubdtype(dtype, integer):\n # This is similar to how numpy computes linspace, but it\n # can fail to recover the endpoints in float32 arithmetic.\n out = (reshape(broadcast_start, bounds_shape) +\n reshape(lax.iota(dtype, num), iota_shape) *\n reshape(delta, bounds_shape))\n out = lax.floor(out)\n else:\n # This approach recovers the endpoints with float32 arithmetic,\n # but can lead to rounding errors for integer outputs.\n step = reshape(lax.iota(computation_dtype, num), iota_shape) / div\n out = (reshape(broadcast_start, bounds_shape) * (1 - step) +\n reshape(broadcast_stop, bounds_shape) * step)\n elif num == 1:\n delta = nan if endpoint else stop - start\n out = reshape(broadcast_start, bounds_shape)\n else: # num == 0 degenerate case, match numpy behavior\n empty_shape = list(lax.broadcast_shapes(shape(start), shape(stop)))\n empty_shape.insert(axis, 0)\n delta = nan\n out = reshape(array([], dtype=dtype), empty_shape)\n if retstep:\n return lax.convert_element_type(out, dtype), delta\n else:\n return lax.convert_element_type(out, dtype)\n\n\n@_wraps(np.logspace)\ndef logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,\n axis: int = 0):\n \"\"\"Implementation of logspace differentiable in start and stop args.\"\"\"\n lax._check_user_dtype_supported(dtype, \"logspace\")\n dtype = dtype or result_type(start, stop, dtypes.canonicalize_dtype(float_))\n computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))\n start = asarray(start, dtype=computation_dtype)\n stop = asarray(stop, dtype=computation_dtype)\n lin = linspace(start, stop, num,\n endpoint=endpoint, retstep=False, dtype=None, axis=axis)\n return lax.convert_element_type(power(base, lin), dtype)\n\n\n@_wraps(np.geomspace)\ndef geomspace(start, stop, num=50, endpoint=True, dtype=None, axis: int = 0):\n \"\"\"Implementation of geomspace differentiable in start and stop args.\"\"\"\n lax._check_user_dtype_supported(dtype, \"geomspace\")\n dtype = dtype or result_type(start, stop, dtypes.canonicalize_dtype(float_))\n computation_dtype = promote_types(dtype, dtypes.canonicalize_dtype(float_))\n start = asarray(start, dtype=computation_dtype)\n stop = asarray(stop, dtype=computation_dtype)\n # follow the numpy geomspace convention for negative and complex endpoints\n signflip = 1 - (1 - sign(real(start))) * (1 - sign(real(stop))) // 2\n res = signflip * logspace(log10(signflip * start),\n log10(signflip * stop), num,\n endpoint=endpoint, base=10.0,\n dtype=computation_dtype, axis=0)\n if axis != 0:\n res = moveaxis(res, 0, axis)\n return lax.convert_element_type(res, dtype)\n\n\n@_wraps(np.meshgrid, lax_description=_ARRAY_VIEW_DOC)\ndef meshgrid(*args, **kwargs):\n indexing = kwargs.get(\"indexing\", \"xy\")\n sparse = kwargs.get(\"sparse\", False)\n copy = kwargs.get(\"copy\", True)\n if not copy:\n raise ValueError(\"jax.numpy.meshgrid only supports copy=True\")\n\n args = list(args)\n if indexing == \"xy\":\n if len(args) >= 2:\n args[0], args[1] = args[1], args[0]\n elif indexing != \"ij\":\n raise ValueError(\"Valid values for indexing are 'xy' and 'ij', got {}\"\n .format(indexing))\n\n shape = []\n for i, a in enumerate(args):\n args[i] = a = asarray(a)\n if len(a.shape) != 1:\n msg = \"Arguments to jax.numpy.meshgrid must be 1D, got shape {}\"\n raise ValueError(msg.format(a.shape))\n shape.append(1 if sparse else a.shape[0])\n\n output = []\n for i, a in enumerate(args):\n a = asarray(a)\n s = shape\n if sparse:\n s = list(s)\n s[i] = a.shape[0]\n output.append(lax.broadcast_in_dim(a, s, (i,)))\n\n if indexing == \"xy\" and len(args) >= 2:\n output[0], output[1] = output[1], output[0]\n\n return output\n\n\nclass _IndexGrid:\n def __getitem__(self, key):\n single_slice = isinstance(key, slice)\n if single_slice:\n key = (key,)\n output = []\n for k in key:\n start = core.concrete_or_error(None, k.start,\n \"slice start of jnp.mgrid\") or 0\n stop = core.concrete_or_error(None, k.stop, \"slice stop of jnp.mgrid\")\n step = core.concrete_or_error(None, k.step,\n \"slice step of jnp.mgrid\") or 1\n if np.iscomplex(step):\n output.append(linspace(start, stop, int(_abs(step))))\n else:\n output.append(arange(start, stop, step))\n if single_slice:\n return output[0]\n output = meshgrid(*output, indexing='ij', sparse=self.sparse)\n return output if self.sparse else stack(output, 0)\n\n\nclass _Mgrid(_IndexGrid):\n \"\"\"Return dense multi-dimensional \"meshgrid\".\n\n LAX-backend implementation of :obj:`numpy.mgrid`. This is a convenience wrapper for\n functionality provided by :func:`jax.numpy.meshgrid` with ``sparse=False``.\n\n See Also:\n jnp.ogrid: open/sparse version of jnp.mgrid\n\n Examples:\n Pass ``[start:stop:step]`` to generate values similar to :func:`jax.numpy.arange`:\n\n >>> jnp.mgrid[0:4:1]\n DeviceArray([0, 1, 2, 3], dtype=int32)\n\n Passing an imaginary step generates values similar to :func:`jax.numpy.linspace`:\n\n >>> jnp.mgrid[0:1:4j]\n DeviceArray([0. , 0.33333334, 0.6666667 , 1. ], dtype=float32)\n\n Multiple slices can be used to create broadcasted grids of indices:\n\n >>> jnp.mgrid[:2, :3]\n DeviceArray([[[0, 0, 0],\n [1, 1, 1]],\n [[0, 1, 2],\n [0, 1, 2]]], dtype=int32)\n \"\"\"\n sparse = False\n\nmgrid = _Mgrid()\n\n\nclass _Ogrid(_IndexGrid):\n \"\"\"Return open multi-dimensional \"meshgrid\".\n\n LAX-backend implementation of :obj:`numpy.ogrid`. This is a convenience wrapper for\n functionality provided by :func:`jax.numpy.meshgrid` with ``sparse=True``.\n\n See Also:\n jnp.mgrid: dense version of jnp.ogrid\n\n Examples:\n Pass ``[start:stop:step]`` to generate values similar to :func:`jax.numpy.arange`:\n\n >>> jnp.ogrid[0:4:1]\n DeviceArray([0, 1, 2, 3], dtype=int32)\n\n Passing an imaginary step generates values similar to :func:`jax.numpy.linspace`:\n\n >>> jnp.ogrid[0:1:4j]\n DeviceArray([0. , 0.33333334, 0.6666667 , 1. ], dtype=float32)\n\n Multiple slices can be used to create sparse grids of indices:\n\n >>> jnp.ogrid[:2, :3]\n [DeviceArray([[0],\n [1]], dtype=int32),\n DeviceArray([[0, 1, 2]], dtype=int32)]\n \"\"\"\n sparse = True\n\n\nogrid = _Ogrid()\n\n\ndef _make_1d_grid_from_slice(s: slice):\n start = s.start or 0\n stop = s.stop\n step = s.step or 1\n if np.iscomplex(step):\n newobj = linspace(start, stop, int(_abs(step)))\n else:\n newobj = arange(start, stop, step)\n\n return newobj\n\n\nclass _AxisConcat:\n \"\"\"Concatenates slices, scalars and array-like objects along a given axis.\"\"\"\n def __getitem__(self, key):\n if not isinstance(key, tuple):\n key = (key,)\n\n params = [self.axis, self.ndmin, self.trans1d, -1]\n\n if isinstance(key[0], str):\n # split off the directive\n directive, *key = key\n # check two special cases: matrix directives\n if directive == \"r\":\n params[-1] = 0\n elif directive == \"c\":\n params[-1] = 1\n else:\n vec = directive.split(\",\")\n k = len(vec)\n if k < 4:\n vec += params[k:]\n else:\n # ignore everything after the first three comma-separated ints\n vec = vec[:3] + params[-1]\n try:\n params = list(map(int, vec))\n except ValueError as err:\n raise ValueError(\n \"could not understand directive {!r}\".format(directive)\n ) from err\n\n axis, ndmin, trans1d, matrix = params\n\n output = []\n for item in key:\n if isinstance(item, slice):\n newobj = _make_1d_grid_from_slice(item)\n elif isinstance(item, str):\n raise ValueError(\"string directive must be placed at the beginning\")\n else:\n newobj = item\n\n newobj = array(newobj, copy=False, ndmin=ndmin)\n\n if trans1d != -1 and ndmin - ndim(item) > 0:\n shape_obj = list(range(ndmin))\n # Calculate number of left shifts, with overflow protection by mod\n num_lshifts = ndmin - _abs(ndmin + trans1d + 1) % ndmin\n shape_obj = tuple(shape_obj[num_lshifts:] + shape_obj[:num_lshifts])\n\n newobj = transpose(newobj, shape_obj)\n\n output.append(newobj)\n\n res = concatenate(tuple(output), axis=axis)\n\n if matrix != -1 and res.ndim == 1:\n # insert 2nd dim at axis 0 or 1\n res = expand_dims(res, matrix)\n\n return res\n\n def __len__(self):\n return 0\n\n\nclass RClass(_AxisConcat):\n \"\"\"Concatenate slices, scalars and array-like objects along the first axis.\n\n LAX-backend implementation of :obj:`numpy.r_`.\n\n See Also:\n ``jnp.c_``: Concatenates slices, scalars and array-like objects along the last axis.\n\n Examples:\n Passing slices in the form ``[start:stop:step]`` generates ``jnp.arange`` objects:\n\n >>> jnp.r_[-1:5:1, 0, 0, jnp.array([1,2,3])]\n DeviceArray([-1, 0, 1, 2, 3, 4, 0, 0, 1, 2, 3], dtype=int32)\n\n An imaginary value for ``step`` will create a ``jnp.linspace`` object instead,\n which includes the right endpoint:\n\n >>> jnp.r_[-1:1:6j, 0, jnp.array([1,2,3])]\n DeviceArray([-1. , -0.6 , -0.20000002, 0.20000005,\n 0.6 , 1. , 0. , 1. ,\n 2. , 3. ], dtype=float32)\n\n Use a string directive of the form ``\"axis,dims,trans1d\"`` as the first argument to\n specify concatenation axis, minimum number of dimensions, and the position of the\n upgraded array's original dimensions in the resulting array's shape tuple:\n\n >>> jnp.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, 2D output\n DeviceArray([[1, 2, 3],\n [4, 5, 6]], dtype=int32)\n\n >>> jnp.r_['0,2,0', [1,2,3], [4,5,6]] # push last input axis to the front\n DeviceArray([[1],\n [2],\n [3],\n [4],\n [5],\n [6]], dtype=int32)\n\n Negative values for ``trans1d`` offset the last axis towards the start\n of the shape tuple:\n\n >>> jnp.r_['0,2,-2', [1,2,3], [4,5,6]]\n DeviceArray([[1],\n [2],\n [3],\n [4],\n [5],\n [6]], dtype=int32)\n\n Use the special directives ``\"r\"`` or ``\"c\"`` as the first argument on flat inputs\n to create an array with an extra row or column axis, respectively:\n\n >>> jnp.r_['r',[1,2,3], [4,5,6]]\n DeviceArray([[1, 2, 3, 4, 5, 6]], dtype=int32)\n\n >>> jnp.r_['c',[1,2,3], [4,5,6]]\n DeviceArray([[1],\n [2],\n [3],\n [4],\n [5],\n [6]], dtype=int32)\n\n For higher-dimensional inputs (``dim >= 2``), both directives ``\"r\"`` and ``\"c\"``\n give the same result.\n \"\"\"\n axis = 0\n ndmin = 1\n trans1d = -1\n\n\nr_ = RClass()\n\n\nclass CClass(_AxisConcat):\n \"\"\"Concatenate slices, scalars and array-like objects along the last axis.\n\n LAX-backend implementation of :obj:`numpy.c_`.\n\n See Also:\n ``jnp.r_``: Concatenates slices, scalars and array-like objects along the first axis.\n\n Examples:\n\n >>> a = jnp.arange(6).reshape((2,3))\n >>> jnp.c_[a,a]\n DeviceArray([[0, 1, 2, 0, 1, 2],\n [3, 4, 5, 3, 4, 5]], dtype=int32)\n\n Use a string directive of the form ``\"axis:dims:trans1d\"`` as the first argument to specify\n concatenation axis, minimum number of dimensions, and the position of the upgraded array's\n original dimensions in the resulting array's shape tuple:\n\n >>> jnp.c_['0,2', [1,2,3], [4,5,6]]\n DeviceArray([[1],\n [2],\n [3],\n [4],\n [5],\n [6]], dtype=int32)\n\n >>> jnp.c_['0,2,-1', [1,2,3], [4,5,6]]\n DeviceArray([[1, 2, 3],\n [4, 5, 6]], dtype=int32)\n\n Use the special directives ``\"r\"`` or ``\"c\"`` as the first argument on flat inputs\n to create an array with inputs stacked along the last axis:\n\n >>> jnp.c_['r',[1,2,3], [4,5,6]]\n DeviceArray([[1, 4],\n [2, 5],\n [3, 6]], dtype=int32)\n \"\"\"\n axis = -1\n ndmin = 2\n trans1d = 0\n\n\nc_ = CClass()\n\n\n@_wraps(np.i0)\ndef i0(x):\n x_orig = x\n x, = _promote_args_inexact(\"i0\", x)\n if not issubdtype(x.dtype, np.floating):\n raise ValueError(f\"Unsupported input type to jax.numpy.i0: {_dtype(x_orig)}\")\n x = lax.abs(x)\n return lax.mul(lax.exp(x), lax.bessel_i0e(x))\n\n\n@_wraps(np.ix_)\ndef ix_(*args):\n n = len(args)\n output = []\n for i, a in enumerate(args):\n a = asarray(a)\n if len(a.shape) != 1:\n msg = \"Arguments to jax.numpy.ix_ must be 1-dimensional, got shape {}\"\n raise ValueError(msg.format(a.shape))\n if _dtype(a) == bool_:\n raise NotImplementedError(\n \"Boolean arguments to jax.numpy.ix_ are not implemented\")\n shape = [1] * n\n shape[i] = a.shape[0]\n if a.size == 0:\n # Numpy uses an integer index type for empty arrays.\n output.append(lax.full(shape, np.zeros((), np.intp)))\n else:\n output.append(lax.broadcast_in_dim(a, shape, (i,)))\n return tuple(output)\n\n\n@_wraps(np.indices)\ndef indices(dimensions, dtype=int32, sparse=False):\n dimensions = tuple(\n core.concrete_or_error(int, d, \"dimensions argument of jnp.indices\")\n for d in dimensions)\n N = len(dimensions)\n output = []\n s = dimensions\n for i, dim in enumerate(dimensions):\n idx = lax.iota(dtype, dim)\n if sparse:\n s = (1,)*i + (dim,) + (1,)*(N - i - 1)\n output.append(lax.broadcast_in_dim(idx, s, (i,)))\n if sparse:\n return tuple(output)\n return stack(output, 0) if output else array([], dtype=dtype)\n\n\n_TOTAL_REPEAT_LENGTH_DOC = \"\"\"\\\nJax adds the optional `total_repeat_length` parameter which specifies the total\nnumber of repeat, and defaults to sum(repeats). It must be specified for repeat\nto be compilable. If `sum(repeats)` is larger than the specified\n`total_repeat_length` the remaining values will be discarded. In the case of\n`sum(repeats)` being smaller than the specified target length, the final value\nwill be repeated.\n\"\"\"\n\n\n@_wraps(np.repeat, lax_description=_TOTAL_REPEAT_LENGTH_DOC)\ndef repeat(a, repeats, axis: Optional[int] = None, *, total_repeat_length=None):\n _check_arraylike(\"repeat\", a, repeats)\n\n if axis is None:\n a = ravel(a)\n axis = 0\n\n axis = core.concrete_or_error(operator.index, axis, \"'axis' argument of jnp.repeat()\")\n assert isinstance(axis, int) # to appease mypy\n\n # If total_repeat_length is not given, can't compile, use a default.\n if total_repeat_length is None:\n repeats = core.concrete_or_error(np.array, repeats,\n \"When jit-compiling jnp.repeat, the total number of repeats must be static. \"\n \"To fix this, either specify a static value for `repeats`, or pass a static \"\n \"value to `total_repeat_length`.\")\n\n # Fast path for when repeats is a scalar.\n if np.ndim(repeats) == 0 and ndim(a) != 0:\n input_shape = a.shape\n aux_axis = axis if axis < 0 else axis + 1\n a = expand_dims(a, aux_axis)\n reps = [1] * len(a.shape)\n reps[aux_axis] = repeats\n a = tile(a, reps)\n result_shape = list(input_shape)\n result_shape[axis] *= repeats\n return reshape(a, result_shape)\n\n repeats = np.ravel(repeats)\n if ndim(a) != 0:\n repeats = np.broadcast_to(repeats, [a.shape[axis]])\n total_repeat_length = np.sum(repeats)\n else:\n repeats = ravel(repeats)\n if ndim(a) != 0:\n repeats = broadcast_to(repeats, [a.shape[axis]])\n\n # Special case when a is a scalar.\n if ndim(a) == 0:\n if repeats.shape == (1,):\n return full([total_repeat_length], a)\n else:\n raise ValueError('`repeat` with a scalar parameter `a` is only '\n 'implemented for scalar values of the parameter `repeats`.')\n\n # Special case if total_repeat_length is zero.\n if total_repeat_length == 0:\n result_shape = list(a.shape)\n result_shape[axis] = 0\n return reshape(array([], dtype=a.dtype), result_shape)\n\n # If repeats is on a zero sized axis, then return the array.\n if a.shape[axis] == 0:\n return a\n\n # This implementation of repeat avoid having to instantiate a large.\n # intermediate tensor.\n\n # Modify repeats from e.g. [1,2,0,5] -> [0,1,2,0] for exclusive repeat.\n exclusive_repeats = roll(repeats, shift=1).at[0].set(0)\n # Cumsum to get indices of new number in repeated tensor, e.g. [0, 1, 3, 3]\n scatter_indices = cumsum(exclusive_repeats)\n # Scatter these onto a zero buffer, e.g. [1,1,0,2,0,0,0,0]\n block_split_indicators = ops.index_add(\n x=zeros([total_repeat_length], dtype=int32),\n idx=scatter_indices,\n y=1)\n # Cumsum again to get scatter indices for repeat, e.g. [0,1,1,3,3,3,3,3]\n gather_indices = cumsum(block_split_indicators) - 1\n return take(a, gather_indices, axis=axis)\n\n\n@_wraps(np.tri)\ndef tri(N, M=None, k=0, dtype=None):\n lax._check_user_dtype_supported(dtype, \"tri\")\n M = M if M is not None else N\n dtype = dtype or float32\n return lax._tri(dtype, (N, M), k)\n\n\n@_wraps(np.tril)\ndef tril(m, k=0):\n _check_arraylike(\"tril\", m)\n m_shape = shape(m)\n if len(m_shape) < 2:\n raise ValueError(\"Argument to jax.numpy.tril must be at least 2D\")\n mask = tri(*m_shape[-2:], k=k, dtype=bool)\n return lax.select(lax.broadcast(mask, m_shape[:-2]), m, zeros_like(m))\n\n\n@_wraps(np.triu, update_doc=False)\ndef triu(m, k=0):\n _check_arraylike(\"triu\", m)\n m_shape = shape(m)\n if len(m_shape) < 2:\n raise ValueError(\"Argument to jax.numpy.triu must be at least 2D\")\n mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)\n return lax.select(lax.broadcast(mask, m_shape[:-2]), zeros_like(m), m)\n\n\n@_wraps(np.trace, skip_params=['out'])\ndef trace(a, offset=0, axis1: int = 0, axis2: int = 1, dtype=None, out=None):\n _check_arraylike(\"trace\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.trace is not supported.\")\n lax._check_user_dtype_supported(dtype, \"trace\")\n\n axis1 = _canonicalize_axis(axis1, ndim(a))\n axis2 = _canonicalize_axis(axis2, ndim(a))\n\n a_shape = shape(a)\n if dtype is None:\n dtype = _dtype(a)\n if issubdtype(dtype, integer):\n default_int = dtypes.canonicalize_dtype(np.int_)\n if iinfo(dtype).bits < iinfo(default_int).bits:\n dtype = default_int\n\n # Move the axis? dimensions to the end.\n perm = [i for i in range(len(a_shape)) if i != axis1 and i != axis2]\n perm = perm + [axis1, axis2]\n a = lax.transpose(a, perm)\n\n # Mask out the diagonal and reduce.\n a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),\n a, zeros_like(a))\n return sum(a, axis=(-2, -1), dtype=dtype)\n\n\ndef _wrap_indices_function(f):\n @_wraps(f, update_doc=False)\n def wrapper(*args, **kwargs):\n return tuple(asarray(x) for x in f(*args, **kwargs))\n return wrapper\n\ntril_indices = _wrap_indices_function(np.tril_indices)\ntriu_indices = _wrap_indices_function(np.triu_indices)\nmask_indices = _wrap_indices_function(np.mask_indices)\n\n\n@_wraps(np.triu_indices_from)\ndef triu_indices_from(arr, k=0):\n return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])\n\n\n@_wraps(np.tril_indices_from)\ndef tril_indices_from(arr, k=0):\n return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])\n\n\n@_wraps(np.diag_indices)\ndef diag_indices(n, ndim=2):\n n = core.concrete_or_error(operator.index, n, \"'n' argument of jnp.diag_indices()\")\n ndim = core.concrete_or_error(operator.index, ndim, \"'ndim' argument of jnp.diag_indices()\")\n if n < 0:\n raise ValueError(\"n argument to diag_indices must be nonnegative, got {}\"\n .format(n))\n if ndim < 0:\n raise ValueError(\"ndim argument to diag_indices must be nonnegative, got {}\"\n .format(ndim))\n return (lax.iota(int_, n),) * ndim\n\n@_wraps(np.diag_indices_from)\ndef diag_indices_from(arr):\n _check_arraylike(\"diag_indices_from\", arr)\n if not arr.ndim >= 2:\n raise ValueError(\"input array must be at least 2-d\")\n\n if len(set(arr.shape)) != 1:\n raise ValueError(\"All dimensions of input must be of equal length\")\n\n return diag_indices(arr.shape[0], ndim=arr.ndim)\n\n@_wraps(np.diagonal, lax_description=_ARRAY_VIEW_DOC)\ndef diagonal(a, offset=0, axis1: int = 0, axis2: int = 1):\n _check_arraylike(\"diagonal\", a)\n a_shape = shape(a)\n a_ndims = len(a_shape)\n offset = core.concrete_or_error(operator.index, offset, \"'offset' argument of jnp.diagonal()\")\n\n # Move the two dimensions to the end.\n axis1 = _canonicalize_axis(axis1, a_ndims)\n axis2 = _canonicalize_axis(axis2, a_ndims)\n perm = [i for i in range(a_ndims) if i != axis1 and i != axis2]\n perm = perm + [axis1, axis2]\n a = lax.transpose(a, perm)\n\n # Mask out the diagonal and reduce over one of the axes\n a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),\n a, zeros_like(a))\n reduce_axis = -2 if offset < 0 else -1\n d = sum(a, axis=reduce_axis, dtype=_dtype(a))\n\n # Slice out the correct diagonal size.\n diag_size = _max(0, _min(a_shape[axis1] + _min(offset, 0),\n a_shape[axis2] - _max(offset, 0)))\n return lax.slice_in_dim(d, 0, diag_size, axis=-1)\n\n\n@_wraps(np.diag, lax_description=_ARRAY_VIEW_DOC)\ndef diag(v, k=0):\n _check_arraylike(\"diag\", v)\n v_shape = shape(v)\n if len(v_shape) == 1:\n zero = lambda x: lax.full_like(x, shape=(), fill_value=0)\n n = v_shape[0] + _abs(k)\n v = lax.pad(v, zero(v), ((_max(0, k), _max(0, -k), 0),))\n return where(eye(n, k=k, dtype=bool), v, zeros_like(v))\n elif len(v_shape) == 2:\n return diagonal(v, offset=k)\n else:\n raise ValueError(\"diag input must be 1d or 2d\")\n\n_SCALAR_VALUE_DOC=\"\"\"\\\nThis differs from np.diagflat for some scalar values of v,\njax always returns a two-dimensional array, whereas numpy may\nreturn a scalar depending on the type of v.\n\"\"\"\n\n@_wraps(np.diagflat, lax_description=_SCALAR_VALUE_DOC)\ndef diagflat(v, k=0):\n _check_arraylike(\"diagflat\", v)\n v = ravel(v)\n v_length = len(v)\n adj_length = v_length + _abs(k)\n res = zeros(adj_length*adj_length, dtype=v.dtype)\n i = arange(0, adj_length-_abs(k))\n if (k >= 0):\n fi = i+k+i*adj_length\n else:\n fi = i+(i-k)*adj_length\n res = ops.index_update(res, ops.index[fi], v)\n res = res.reshape(adj_length,adj_length)\n return res\n\n_POLY_DOC=\"\"\"\\\nThis differs from np.poly when an integer array is given.\nnp.poly returns a result with dtype float64 in this case.\njax returns a result with an inexact type, but not necessarily\nfloat64.\n\nThis also differs from np.poly when the input array strictly\ncontains pairs of complex conjugates, e.g. [1j, -1j, 1-1j, 1+1j].\nnp.poly returns an array with a real dtype in such cases.\njax returns an array with a complex dtype in such cases.\n\"\"\"\n\n@_wraps(np.poly, lax_description=_POLY_DOC)\ndef poly(seq_of_zeros):\n _check_arraylike('poly', seq_of_zeros)\n seq_of_zeros, = _promote_dtypes_inexact(seq_of_zeros)\n seq_of_zeros = atleast_1d(seq_of_zeros)\n\n sh = seq_of_zeros.shape\n if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:\n # import at runtime to avoid circular import\n from . import linalg\n seq_of_zeros = linalg.eigvals(seq_of_zeros)\n\n if seq_of_zeros.ndim != 1:\n raise ValueError(\"input must be 1d or non-empty square 2d array.\")\n\n dt = seq_of_zeros.dtype\n if len(seq_of_zeros) == 0:\n return ones((), dtype=dt)\n\n a = ones((1,), dtype=dt)\n for k in range(len(seq_of_zeros)):\n a = convolve(a, array([1, -seq_of_zeros[k]], dtype=dt), mode='full')\n\n return a\n\n\n@_wraps(np.polyval)\ndef polyval(p, x):\n if isinstance(p, np.poly1d):\n p = np.asarray(p)\n if isinstance(x, np.poly1d):\n y = 0\n else:\n y = zeros_like(x)\n for i in range(len(p)):\n y = y * x + p[i]\n return y\n\n@_wraps(np.polyadd)\ndef polyadd(a1, a2):\n a1 = asarray(a1)\n a2 = asarray(a2)\n\n if a2.shape[0] <= a1.shape[0]:\n return a1.at[-a2.shape[0]:].add(a2)\n else:\n return a2.at[-a1.shape[0]:].add(a1)\n\n\n@_wraps(np.polyint)\ndef polyint(p, m=1, k=None):\n m = core.concrete_or_error(operator.index, m, \"'m' argument of jnp.polyint\")\n p = asarray(p)\n if m < 0:\n raise ValueError(\"Order of integral must be positive (see polyder)\")\n if k is None:\n k = zeros(m)\n k = atleast_1d(k)\n if len(k) == 1:\n k = full((m,), k[0])\n if len(k) != m or k.ndim > 1:\n raise ValueError(\"k must be a scalar or a rank-1 array of length 1 or m.\")\n if m == 0:\n return p\n else:\n coeff = maximum(1, arange(len(p) + m, 0, -1) - 1 - arange(m)[:, newaxis]).prod(0)\n return true_divide(concatenate((p, k)), coeff)\n\n\n@_wraps(np.polyder)\ndef polyder(p, m=1):\n m = core.concrete_or_error(operator.index, m, \"'m' argument of jnp.polyder\")\n p = asarray(p)\n if m < 0:\n raise ValueError(\"Order of derivative must be positive\")\n if m == 0:\n return p\n coeff = (arange(len(p), m, -1) - 1 - arange(m)[:, newaxis]).prod(0)\n return p[:-m] * coeff\n\n@_wraps(np.trim_zeros)\ndef trim_zeros(filt, trim='fb'):\n filt = core.concrete_or_error(asarray, filt,\n \"Error arose in the `filt` argument of trim_zeros()\")\n nz = asarray(filt) == 0\n if all(nz):\n return empty(0, _dtype(filt))\n start = argmin(nz) if 'f' in trim.lower() else 0\n end = argmin(nz[::-1]) if 'b' in trim.lower() else 0\n return filt[start:len(filt) - end]\n\n_LEADING_ZEROS_DOC=\"\"\"\\\nSetting trim_leading_zeros=True makes the output match that of numpy.\nBut prevents the function from being able to be used in compiled code.\n\"\"\"\n\n@_wraps(np.polymul, lax_description=_LEADING_ZEROS_DOC)\ndef polymul(a1, a2, *, trim_leading_zeros=False):\n if isinstance(a1, np.poly1d):\n a1 = asarray(a1)\n if isinstance(a2, np.poly1d):\n a2 = asarray(a2)\n if trim_leading_zeros and (len(a1) > 1 or len(a2) > 1):\n a1, a2 = trim_zeros(a1, trim='f'), trim_zeros(a2, trim='f')\n if len(a1) == 0:\n a1 = asarray([0.])\n if len(a2) == 0:\n a2 = asarray([0.])\n val = convolve(a1, a2, mode='full')\n return val\n\n@_wraps(np.polysub)\ndef polysub(a1, a2):\n return polyadd(asarray(a1), -asarray(a2))\n\n\n@_wraps(np.append)\ndef append(arr, values, axis: Optional[int] = None):\n if axis is None:\n return concatenate([ravel(arr), ravel(values)], 0)\n else:\n return concatenate([arr, values], axis=axis)\n\n\n@_wraps(np.delete)\ndef delete(arr, obj, axis=None):\n _check_arraylike(\"delete\", arr)\n if axis is None:\n arr = ravel(arr)\n axis = 0\n axis = _canonicalize_axis(axis, arr.ndim)\n\n # Case 1: obj is a static integer.\n try:\n obj = operator.index(obj)\n obj = _canonicalize_axis(obj, arr.shape[axis])\n except TypeError:\n pass\n else:\n idx = tuple(slice(None) for i in range(axis))\n return concatenate([arr[idx + (slice(0, obj),)], arr[idx + (slice(obj + 1, None),)]], axis=axis)\n\n # Case 2: obj is a static slice.\n if isinstance(obj, slice):\n # TODO(jakevdp): we should be able to do this dynamically with care.\n indices = np.delete(np.arange(arr.shape[axis]), obj)\n return take(arr, indices, axis=axis)\n\n # Case 3: obj is an array\n # NB: pass both arrays to check for appropriate error message.\n _check_arraylike(\"delete\", arr, obj)\n obj = core.concrete_or_error(np.asarray, obj, \"'obj' array argument of jnp.delete()\")\n\n if issubdtype(obj.dtype, integer):\n # TODO(jakevdp): in theory this could be done dynamically if obj has no duplicates,\n # but this would require the complement of lax.gather.\n mask = np.ones(arr.shape[axis], dtype=bool)\n mask[obj] = False\n elif obj.dtype == bool:\n if obj.shape != (arr.shape[axis],):\n raise ValueError(\"np.delete(arr, obj): for boolean indices, obj must be one-dimensional \"\n \"with length matching specified axis.\")\n mask = ~obj\n else:\n raise ValueError(f\"np.delete(arr, obj): got obj.dtype={obj.dtype}; must be integer or bool.\")\n return arr[tuple(slice(None) for i in range(axis)) + (mask,)]\n\n\n@_wraps(np.apply_along_axis)\ndef apply_along_axis(func1d, axis: int, arr, *args, **kwargs):\n num_dims = ndim(arr)\n axis = _canonicalize_axis(axis, num_dims)\n func = lambda arr: func1d(arr, *args, **kwargs)\n for i in range(1, num_dims - axis):\n func = jax.vmap(func, in_axes=i, out_axes=-1)\n for i in range(axis):\n func = jax.vmap(func, in_axes=0, out_axes=0)\n return func(arr)\n\n\n@_wraps(np.apply_over_axes)\ndef apply_over_axes(func, a, axes):\n for axis in axes:\n b = func(a, axis=axis)\n if b.ndim == a.ndim:\n a = b\n elif b.ndim == a.ndim - 1:\n a = expand_dims(b, axis)\n else:\n raise ValueError(\"function is not returning an array of the correct shape\")\n return a\n\n\n### Tensor contraction operations\n\n\n@_wraps(np.dot, lax_description=_PRECISION_DOC)\ndef dot(a, b, *, precision=None): # pylint: disable=missing-docstring\n _check_arraylike(\"dot\", a, b)\n a, b = _promote_dtypes(a, b)\n a_ndim, b_ndim = ndim(a), ndim(b)\n if a_ndim == 0 or b_ndim == 0:\n return lax.mul(a, b)\n if _max(a_ndim, b_ndim) <= 2:\n return lax.dot(a, b, precision=precision)\n\n if b_ndim == 1:\n contract_dims = ((a_ndim - 1,), (0,))\n else:\n contract_dims = ((a_ndim - 1,), (b_ndim - 2,))\n batch_dims = ((), ())\n return lax.dot_general(a, b, (contract_dims, batch_dims), precision)\n\n\n@_wraps(np.matmul, lax_description=_PRECISION_DOC)\ndef matmul(a, b, *, precision=None): # pylint: disable=missing-docstring\n _check_arraylike(\"matmul\", a, b)\n for i, x in enumerate((a, b)):\n if ndim(x) < 1:\n msg = (f\"matmul input operand {i} must have ndim at least 1, \"\n f\"but it has ndim {ndim(x)}\")\n raise ValueError(msg)\n\n a, b = _promote_dtypes(a, b)\n\n a_is_mat, b_is_mat = (ndim(a) > 1), (ndim(b) > 1)\n a_batch_dims = shape(a)[:-2] if a_is_mat else ()\n b_batch_dims = shape(b)[:-2] if b_is_mat else ()\n num_batch_dims = _max(len(a_batch_dims), len(b_batch_dims))\n a_batch_dims = (None,) * (num_batch_dims - len(a_batch_dims)) + a_batch_dims\n b_batch_dims = (None,) * (num_batch_dims - len(b_batch_dims)) + b_batch_dims\n\n # Dimensions to squeeze from the inputs.\n a_squeeze = []\n b_squeeze = []\n\n # Positions of batch dimensions in squeezed inputs.\n a_batch = []\n b_batch = []\n\n # Desired index in final output of each kind of dimension, in the order that\n # lax.dot_general will emit them.\n idx_batch = []\n idx_a_other = [] # other = non-batch, non-contracting.\n idx_b_other = []\n for i, (ba, bb) in enumerate(zip(a_batch_dims, b_batch_dims)):\n if ba is None:\n idx_b_other.append(i)\n elif bb is None:\n idx_a_other.append(i)\n elif core.symbolic_equal_dim(ba, 1):\n idx_b_other.append(i)\n a_squeeze.append(len(idx_batch) + len(idx_a_other) + len(a_squeeze))\n elif core.symbolic_equal_dim(bb, 1):\n idx_a_other.append(i)\n b_squeeze.append(len(idx_batch) + len(idx_b_other) + len(b_squeeze))\n elif core.symbolic_equal_dim(ba, bb):\n a_batch.append(len(idx_batch) + len(idx_a_other))\n b_batch.append(len(idx_batch) + len(idx_b_other))\n idx_batch.append(i)\n else:\n raise ValueError(\"Incompatible shapes for matmul arguments: {} and {}\"\n .format(shape(a), shape(b)))\n\n if a_is_mat: idx_a_other.append(num_batch_dims)\n if b_is_mat: idx_b_other.append(num_batch_dims + a_is_mat)\n perm = np.argsort(np.concatenate([idx_batch, idx_a_other, idx_b_other]))\n\n a = lax.squeeze(a, tuple(a_squeeze))\n b = lax.squeeze(b, tuple(b_squeeze))\n out = lax.dot_general(\n a, b, (((ndim(a) - 1,), (ndim(b) - 1 - b_is_mat,)), (a_batch, b_batch)),\n precision=precision)\n return lax.transpose(out, perm)\n\n\n@_wraps(np.vdot, lax_description=_PRECISION_DOC)\ndef vdot(a, b, *, precision=None):\n _check_arraylike(\"vdot\", a, b)\n if issubdtype(_dtype(a), complexfloating):\n a = conj(a)\n return dot(a.ravel(), b.ravel(), precision=precision)\n\n\n@_wraps(np.tensordot, lax_description=_PRECISION_DOC)\ndef tensordot(a, b, axes=2, *, precision=None):\n _check_arraylike(\"tensordot\", a, b)\n a_ndim = ndim(a)\n b_ndim = ndim(b)\n\n a, b = _promote_dtypes(a, b)\n if type(axes) is int:\n if axes > _min(a_ndim, b_ndim):\n msg = \"Number of tensordot axes (axes {}) exceeds input ranks ({} and {})\"\n raise TypeError(msg.format(axes, a.shape, b.shape))\n contracting_dims = tuple(range(a_ndim - axes, a_ndim)), tuple(range(axes))\n elif type(axes) in (list, tuple) and len(axes) == 2:\n ax1, ax2 = axes\n if type(ax1) == type(ax2) == int:\n contracting_dims = ((_canonicalize_axis(ax1, a_ndim),),\n (_canonicalize_axis(ax2, b_ndim),))\n elif type(ax1) in (list, tuple) and type(ax2) in (list, tuple):\n if len(ax1) != len(ax2):\n msg = \"tensordot requires axes lists to have equal length, got {} and {}.\"\n raise TypeError(msg.format(ax1, ax2))\n contracting_dims = (tuple(_canonicalize_axis(i, a_ndim) for i in ax1),\n tuple(_canonicalize_axis(i, b_ndim) for i in ax2))\n else:\n msg = (\"tensordot requires both axes lists to be either ints, tuples or \"\n \"lists, got {} and {}\")\n raise TypeError(msg.format(ax1, ax2))\n else:\n msg = (\"tensordot axes argument must be an int, a pair of ints, or a pair \"\n \"of lists/tuples of ints.\")\n raise TypeError(msg)\n return lax.dot_general(a, b, (contracting_dims, ((), ())),\n precision=precision)\n\n\n@_wraps(np.einsum, lax_description=_PRECISION_DOC, skip_params=['out'])\ndef einsum(*operands, out=None, optimize='greedy', precision=None,\n _use_xeinsum=False):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.einsum is not supported.\")\n\n if (_use_xeinsum or isinstance(operands[0], str) and '{' in operands[0] and\n len(operands[1:]) == 2):\n return lax.xeinsum(*operands)\n\n optimize = 'greedy' if optimize is True else optimize\n # using einsum_call=True here is an internal api for opt_einsum\n\n # Allow handling of shape polymorphism\n non_constant_dim_types = {\n type(d) for op in operands if not isinstance(op, str)\n for d in np.shape(op) if not core.is_constant_dim(d)\n }\n if not non_constant_dim_types:\n einsum_contract_path_fn = opt_einsum.contract_path\n else:\n einsum_contract_path_fn = _polymorphic_einsum_contract_path_handlers[next(iter(non_constant_dim_types))]\n operands, contractions = einsum_contract_path_fn(\n *operands, einsum_call=True, use_blas=True, optimize=optimize)\n\n contractions = tuple((a, frozenset(b), c) for a, b, c, *_ in contractions)\n return _einsum(operands, contractions, precision)\n\n# Enable other modules to override einsum_contact_path.\n# Indexed by the type of the non constant dimension\n_polymorphic_einsum_contract_path_handlers = {} # type: ignore\n\n@_wraps(np.einsum_path)\ndef einsum_path(subscripts, *operands, optimize='greedy'):\n # using einsum_call=True here is an internal api for opt_einsum\n return opt_einsum.contract_path(subscripts, *operands, optimize=optimize)\n\ndef _removechars(s, chars):\n return s.translate(str.maketrans(dict.fromkeys(chars)))\n\n@partial(jit, static_argnums=(1, 2))\ndef _einsum(operands: Sequence,\n contractions: Sequence[Tuple[Tuple[int, ...], FrozenSet[str], str]],\n precision):\n operands = list(_promote_dtypes(*operands))\n def sum(x, axes):\n return lax.reduce(x, np.array(0, x.dtype),\n lax.add if x.dtype != bool_ else lax.bitwise_or, axes)\n\n def sum_uniques(operand, names, uniques):\n if uniques:\n axes = [names.index(name) for name in uniques]\n operand = sum(operand, axes)\n names = _removechars(names, uniques)\n return operand, names\n\n def sum_repeats(operand, names, counts, keep_names):\n for name, count in counts.items():\n if count > 1:\n axes = [i for i, n in enumerate(names) if n == name]\n eye = lax._delta(operand.dtype, operand.shape, axes)\n if name not in keep_names:\n operand = sum(operand * eye, axes)\n names = names.replace(name, '')\n else:\n operand = sum(operand * eye, axes[:-1])\n names = names.replace(name, '', count - 1)\n return operand, names\n\n def filter_singleton_dims(operand, names, other_shape, other_names):\n s = shape(operand)\n new_shape = []\n new_names = []\n for i, d in enumerate(names):\n other_i = other_names.find(d)\n if not core.symbolic_equal_dim(s[i], 1) or other_i == -1 or core.symbolic_equal_dim(other_shape[other_i], 1):\n new_shape.append(s[i])\n new_names.append(d)\n return reshape(operand, tuple(new_shape)), \"\".join(new_names)\n\n for operand_indices, contracted_names_set, einstr in contractions:\n contracted_names = sorted(contracted_names_set)\n input_str, result_names = einstr.split('->')\n input_names = input_str.split(',')\n\n # switch on the number of operands to be processed in this loop iteration.\n # every case here sets 'operand' and 'names'.\n if len(operand_indices) == 1:\n operand = operands.pop(operand_indices[0])\n names, = input_names\n counts = collections.Counter(names)\n\n # sum out unique contracted indices with a single reduce-sum\n uniques = [name for name in contracted_names if counts[name] == 1]\n operand, names = sum_uniques(operand, names, uniques)\n\n # for every repeated index, do a contraction against an identity matrix\n operand, names = sum_repeats(operand, names, counts, result_names)\n\n elif len(operand_indices) == 2:\n lhs, rhs = map(operands.pop, operand_indices)\n lhs_names, rhs_names = input_names\n\n # handle cases where one side of a contracting or batch dimension is 1\n # but its counterpart is not.\n lhs, lhs_names = filter_singleton_dims(lhs, lhs_names, shape(rhs),\n rhs_names)\n rhs, rhs_names = filter_singleton_dims(rhs, rhs_names, shape(lhs),\n lhs_names)\n\n lhs_counts = collections.Counter(lhs_names)\n rhs_counts = collections.Counter(rhs_names)\n\n # sum out unique contracted indices in lhs and rhs\n lhs_uniques = [name for name in contracted_names\n if lhs_counts[name] == 1 and rhs_counts[name] == 0]\n lhs, lhs_names = sum_uniques(lhs, lhs_names, lhs_uniques)\n\n rhs_uniques = [name for name in contracted_names\n if rhs_counts[name] == 1 and lhs_counts[name] == 0]\n rhs, rhs_names = sum_uniques(rhs, rhs_names, rhs_uniques)\n\n # for every repeated index, contract against an identity matrix\n lhs, lhs_names = sum_repeats(lhs, lhs_names, lhs_counts,\n result_names + rhs_names)\n rhs, rhs_names = sum_repeats(rhs, rhs_names, rhs_counts,\n result_names + lhs_names)\n\n lhs_or_rhs_names = set(lhs_names) | set(rhs_names)\n contracted_names = [x for x in contracted_names if x in lhs_or_rhs_names]\n lhs_and_rhs_names = set(lhs_names) & set(rhs_names)\n batch_names = [x for x in result_names if x in lhs_and_rhs_names]\n\n lhs_batch, rhs_batch = unzip2((lhs_names.find(n), rhs_names.find(n))\n for n in batch_names)\n\n # NOTE(mattjj): this can fail non-deterministically in python3, maybe\n # due to opt_einsum\n assert _all(\n name in lhs_names and name in rhs_names and\n lhs.shape[lhs_names.index(name)] == rhs.shape[rhs_names.index(name)]\n for name in contracted_names)\n\n # contract using lax.dot_general\n batch_names_str = ''.join(batch_names)\n lhs_cont, rhs_cont = unzip2((lhs_names.index(n), rhs_names.index(n))\n for n in contracted_names)\n deleted_names = batch_names_str + ''.join(contracted_names)\n remaining_lhs_names = _removechars(lhs_names, deleted_names)\n remaining_rhs_names = _removechars(rhs_names, deleted_names)\n # Try both orders of lhs and rhs, in the hope that one of them means we\n # don't need an explicit transpose. opt_einsum likes to contract from\n # right to left, so we expect (rhs,lhs) to have the best chance of not\n # needing a transpose.\n names = batch_names_str + remaining_rhs_names + remaining_lhs_names\n if names == result_names:\n dimension_numbers = ((rhs_cont, lhs_cont), (rhs_batch, lhs_batch))\n operand = lax.dot_general(rhs, lhs, dimension_numbers, precision)\n else:\n names = batch_names_str + remaining_lhs_names + remaining_rhs_names\n dimension_numbers = ((lhs_cont, rhs_cont), (lhs_batch, rhs_batch))\n operand = lax.dot_general(lhs, rhs, dimension_numbers, precision)\n else:\n raise NotImplementedError # if this is actually reachable, open an issue!\n\n # the resulting 'operand' with axis labels 'names' should be a permutation\n # of the desired result\n assert len(names) == len(result_names) == len(set(names))\n assert set(names) == set(result_names)\n if names != result_names:\n perm = tuple([names.index(name) for name in result_names])\n operand = lax.transpose(operand, perm)\n operands.append(operand) # used in next iteration\n\n return operands[0]\n\n\ndef _movechars(s, src, dst):\n \"\"\"Helper for einsum string munging, like moveaxis on identifier strings.\"\"\"\n chars = [c for i, c in enumerate(s) if i not in src]\n for i, j in sorted(zip(dst, src)):\n chars.insert(i, s[j])\n return ''.join(chars)\n\n\n@_wraps(np.inner, lax_description=_PRECISION_DOC)\ndef inner(a, b, *, precision=None):\n if ndim(a) == 0 or ndim(b) == 0:\n return a * b\n return tensordot(a, b, (-1, -1), precision=precision)\n\n\n@_wraps(np.outer, skip_params=['out'])\ndef outer(a, b, out=None):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.outer is not supported.\")\n a, b = _promote_dtypes(a, b)\n return ravel(a)[:, None] * ravel(b)[None, :]\n\n@partial(jit, static_argnums=(2, 3, 4))\ndef _cross(a, b, axisa, axisb, axisc):\n a = moveaxis(a, axisa, -1)\n b = moveaxis(b, axisb, -1)\n\n if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):\n raise ValueError(\"Dimension must be either 2 or 3 for cross product\")\n\n if a.shape[-1] == 2 and b.shape[-1] == 2:\n return a[..., 0] * b[..., 1] - a[..., 1] * b[..., 0]\n\n a0 = a[..., 0]\n a1 = a[..., 1]\n a2 = a[..., 2] if a.shape[-1] == 3 else zeros_like(a0)\n b0 = b[..., 0]\n b1 = b[..., 1]\n b2 = b[..., 2] if b.shape[-1] == 3 else zeros_like(b0)\n c = array([a1 * b2 - a2 * b1, a2 * b0 - a0 * b2, a0 * b1 - a1 * b0])\n return moveaxis(c, 0, axisc)\n\n@_wraps(np.cross)\ndef cross(a, b, axisa: int = -1, axisb: int = -1, axisc: int = -1,\n axis: Optional[int] = None):\n if axis is not None:\n axisa = axis\n axisb = axis\n axisc = axis\n return _cross(a, b, axisa, axisb, axisc)\n\n@_wraps(np.kron)\ndef kron(a, b):\n a, b = _promote_dtypes(a, b)\n if ndim(a) < ndim(b):\n a = reshape(a, (1,) * (ndim(b) - ndim(a)) + shape(a))\n elif ndim(b) < ndim(a):\n b = reshape(b, (1,) * (ndim(a) - ndim(b)) + shape(b))\n a_reshaped = reshape(a, [i for d in shape(a) for i in (d, 1)])\n b_reshaped = reshape(b, [i for d in shape(b) for i in (1, d)])\n out_shape = tuple(np.multiply(shape(a), shape(b)))\n return reshape(lax.mul(a_reshaped, b_reshaped), out_shape)\n\n\n@_wraps(np.vander)\ndef vander(x, N=None, increasing=False):\n x = asarray(x)\n dtype = _dtype(x)\n if ndim(x) != 1:\n raise ValueError(\"x must be a one-dimensional array\")\n x_shape = shape(x)\n N = x_shape[0] if N is None else core.concrete_or_error(\n operator.index, N, \"'N' argument of jnp.vander()\")\n if N < 0:\n raise ValueError(\"N must be nonnegative\")\n\n iota = lax.iota(dtype, N)\n if not increasing:\n iota = lax.sub(lax._const(iota, N - 1), iota)\n\n return power(x[..., None], iota)\n\n\n### Misc\n\n_ARGWHERE_DOC = \"\"\"\\\nBecause the size of the output of ``argwhere`` is data-dependent, the function is not\ntypically compatible with JIT. The JAX version adds the optional ``size`` argument, which\nspecifies the size of the leading dimension of the output - it must be specified statically\nfor ``jnp.argwhere`` to be traced. If ``size`` is specified, the indices of the first ``size``\nTrue elements will be returned; if there are fewer nonzero elements than `size` indicates,\nthe index arrays will be zero-padded.\n\"\"\"\n\n@_wraps(np.argwhere, lax_description=_ARGWHERE_DOC)\ndef argwhere(a, *, size=None):\n result = transpose(vstack(nonzero(a, size=size)))\n if ndim(a) == 0:\n return result[:0].reshape(result.shape[0], 0)\n return result.reshape(result.shape[0], ndim(a))\n\n\n@_wraps(np.argmax, skip_params=['out'])\ndef argmax(a, axis: Optional[int] = None, out=None):\n _check_arraylike(\"argmax\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.argmax is not supported.\")\n if axis is None:\n a = ravel(a)\n axis = 0\n if a.shape[axis] == 0:\n raise ValueError(\"attempt to get argmax of an empty sequence\")\n return lax.argmax(a, _canonicalize_axis(axis, a.ndim), int64)\n\n@_wraps(np.argmin, skip_params=['out'])\ndef argmin(a, axis: Optional[int] = None, out=None):\n _check_arraylike(\"argmin\", a)\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.argmin is not supported.\")\n if axis is None:\n a = ravel(a)\n axis = 0\n if a.shape[axis] == 0:\n raise ValueError(\"attempt to get argmin of an empty sequence\")\n return lax.argmin(a, _canonicalize_axis(axis, a.ndim), int64)\n\n\n_NANARG_DOC = \"\"\"\\\nWarning: jax.numpy.arg{} returns -1 for all-NaN slices and does not raise\nan error.\n\"\"\"\n\n@_wraps(np.nanargmax, lax_description=_NANARG_DOC.format(\"max\"))\ndef nanargmax(a, axis: Optional[int] = None):\n _check_arraylike(\"nanargmax\", a)\n if not issubdtype(_dtype(a), inexact):\n return argmax(a, axis=axis)\n nan_mask = isnan(a)\n a = where(nan_mask, -inf, a)\n res = argmax(a, axis=axis)\n return where(all(nan_mask, axis=axis), -1, res)\n\n@_wraps(np.nanargmin, lax_description=_NANARG_DOC.format(\"min\"))\ndef nanargmin(a, axis: Optional[int] = None):\n _check_arraylike(\"nanargmin\", a)\n if not issubdtype(_dtype(a), inexact):\n return argmin(a, axis=axis)\n nan_mask = isnan(a)\n a = where(nan_mask, inf, a)\n res = argmin(a, axis=axis)\n return where(all(nan_mask, axis=axis), -1, res)\n\n\n@_wraps(np.sort)\ndef sort(a, axis: Optional[int] = -1, kind='quicksort', order=None):\n _check_arraylike(\"sort\", a)\n if kind != 'quicksort':\n warnings.warn(\"'kind' argument to sort is ignored.\")\n if order is not None:\n raise ValueError(\"'order' argument to sort is not supported.\")\n\n if axis is None:\n return lax.sort(a.ravel(), dimension=0)\n else:\n return lax.sort(a, dimension=_canonicalize_axis(axis, ndim(a)))\n\n@_wraps(np.sort_complex)\ndef sort_complex(a):\n _check_arraylike(\"sort_complex\", a)\n a = lax.sort(a, dimension=0)\n return lax.convert_element_type(a, result_type(a, dtypes.canonicalize_dtype(complex_)))\n\n@_wraps(np.lexsort)\ndef lexsort(keys, axis=-1):\n keys = tuple(keys)\n if len(keys) == 0:\n raise TypeError(\"need sequence of keys with len > 0 in lexsort\")\n if len({shape(key) for key in keys}) > 1:\n raise ValueError(\"all keys need to be the same shape\")\n if ndim(keys[0]) == 0:\n return np.int64(0)\n axis = _canonicalize_axis(axis, ndim(keys[0]))\n iota = lax.broadcasted_iota(np.int64, shape(keys[0]), axis)\n return lax.sort((*keys[::-1], iota), dimension=axis, num_keys=len(keys))[-1]\n\n\n@_wraps(np.argsort)\ndef argsort(a, axis: Optional[int] = -1, kind='quicksort', order=None):\n _check_arraylike(\"argsort\", a)\n if kind != 'quicksort':\n warnings.warn(\"'kind' argument to argsort is ignored.\")\n if order is not None:\n raise ValueError(\"'order' argument to argsort is not supported.\")\n\n if axis is None:\n return argsort(a.ravel(), 0)\n else:\n axis_num = _canonicalize_axis(axis, ndim(a))\n iota = lax.broadcasted_iota(np.int64, shape(a), axis_num)\n _, perm = lax.sort_key_val(a, iota, dimension=axis_num)\n return perm\n\n\n@_wraps(np.msort)\ndef msort(a):\n return sort(a, axis=0)\n\n\n@partial(jit, static_argnums=(2,))\ndef _roll(a, shift, axis):\n a = asarray(a)\n a_shape = shape(a)\n if axis is None:\n return lax.reshape(roll(ravel(a), shift, axis=0), a_shape)\n\n a_ndim = len(a_shape)\n shift = asarray(shift)\n axis = np.asarray(axis)\n b_shape = lax.broadcast_shapes(shift.shape, axis.shape, (1,))\n if len(b_shape) != 1:\n msg = \"'shift' and 'axis' arguments to roll must be scalars or 1D arrays\"\n raise ValueError(msg)\n\n for x, i in zip(broadcast_to(shift, b_shape),\n np.broadcast_to(axis, b_shape)):\n i = _canonicalize_axis(i, a_ndim)\n x = remainder(x, (a_shape[i] or 1))\n a = lax.concatenate((a, a), i)\n a = lax.dynamic_slice_in_dim(a, a_shape[i] - x, a_shape[i], axis=i)\n return a\n\n\n@_wraps(np.roll)\ndef roll(a, shift, axis: Optional[Union[int, Sequence[int]]] = None):\n if isinstance(axis, list):\n axis = tuple(axis)\n return _roll(a, shift, axis)\n\n\n@_wraps(np.rollaxis, lax_description=_ARRAY_VIEW_DOC)\ndef rollaxis(a, axis: int, start=0):\n _check_arraylike(\"rollaxis\", a)\n start = core.concrete_or_error(operator.index, start, \"'start' argument of jnp.rollaxis()\")\n a_ndim = ndim(a)\n axis = _canonicalize_axis(axis, a_ndim)\n if not (-a_ndim <= start <= a_ndim):\n raise ValueError(f\"start={start} must satisfy {-a_ndim}<=start<={a_ndim}\")\n if start < 0:\n start += a_ndim\n if start > axis:\n start -= 1\n return moveaxis(a, axis, start)\n\n\n@_wraps(np.packbits)\ndef packbits(a, axis: Optional[int] = None, bitorder='big'):\n a = asarray(a)\n if not (issubdtype(dtype(a), integer) or issubdtype(dtype(a), bool_)):\n raise TypeError('Expected an input array of integer or boolean data type')\n if bitorder not in ['little', 'big']:\n raise ValueError(\"'order' must be either 'little' or 'big'\")\n a = (a > 0).astype('uint8')\n bits = arange(8, dtype='uint8')\n if bitorder == 'big':\n bits = bits[::-1]\n if axis is None:\n a = ravel(a)\n axis = 0\n a = swapaxes(a, axis, -1)\n\n remainder = a.shape[-1] % 8\n if remainder:\n a = lax.pad(a, np.uint8(0),\n (a.ndim - 1) * [(0, 0, 0)] + [(0, 8 - remainder, 0)])\n\n a = a.reshape(a.shape[:-1] + (a.shape[-1] // 8, 8))\n packed = (a << bits).sum(-1).astype('uint8')\n return swapaxes(packed, axis, -1)\n\n\n@_wraps(np.unpackbits)\ndef unpackbits(a, axis: Optional[int] = None, count=None, bitorder='big'):\n a = asarray(a)\n if dtype(a) != uint8:\n raise TypeError(\"Expected an input array of unsigned byte data type\")\n if bitorder not in ['little', 'big']:\n raise ValueError(\"'order' must be either 'little' or 'big'\")\n bits = asarray(1) << arange(8, dtype='uint8')\n if bitorder == 'big':\n bits = bits[::-1]\n if axis is None:\n a = a.ravel()\n axis = 0\n a = swapaxes(a, axis, -1)\n unpacked = ((a[..., None] & bits) > 0).astype('uint8')\n unpacked = unpacked.reshape(unpacked.shape[:-2] + (-1,))[..., :count]\n return swapaxes(unpacked, axis, -1)\n\n\n@_wraps(np.take, skip_params=['out'])\ndef take(a, indices, axis: Optional[int] = None, out=None, mode=None):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.take is not supported.\")\n\n a = asarray(a)\n indices = asarray(indices)\n\n if axis is None:\n a = ravel(a)\n axis_idx = 0\n else:\n axis_idx = _canonicalize_axis(axis, ndim(a))\n\n if mode is None:\n # lax.gather() does not support negative indices, so we wrap them here\n indices = where(indices < 0, indices + a.shape[axis_idx], indices)\n elif mode == \"raise\":\n # TODO(phawkins): we have no way to report out of bounds errors yet.\n raise NotImplementedError(\"The 'raise' mode to jnp.take is not supported.\")\n elif mode == \"wrap\":\n indices = mod(indices, _constant_like(indices, a.shape[axis_idx]))\n elif mode != \"clip\":\n raise ValueError(\"Invalid mode '{}' for np.take\".format(mode))\n\n index_dims = len(shape(indices))\n slice_sizes = list(shape(a))\n if slice_sizes[axis_idx] == 0:\n if indices.size != 0:\n raise IndexError(\"Cannot do a non-empty jnp.take() from an empty axis.\")\n return a\n\n slice_sizes[axis_idx] = _min(indices.size, 1)\n dnums = lax.GatherDimensionNumbers(\n offset_dims=tuple(\n list(range(axis_idx)) +\n list(range(axis_idx + index_dims, len(a.shape) + index_dims - 1))),\n collapsed_slice_dims=(axis_idx,),\n start_index_map=(axis_idx,))\n return lax.gather(a, indices[..., None], dimension_numbers=dnums,\n slice_sizes=tuple(slice_sizes))\n\n\ndef _normalize_index(index, axis_size):\n \"\"\"Normalizes an index value in the range [-N, N) to the range [0, N).\"\"\"\n return lax.select(\n lax.lt(index, _constant_like(index, 0)),\n lax.add(index, _constant_like(index, axis_size)),\n index)\n\n@partial(jit, static_argnums=(2,))\ndef _take_along_axis(arr, indices, axis):\n if axis is None:\n if ndim(indices) != 1:\n msg = \"take_along_axis indices must be 1D if axis=None, got shape {}\"\n raise ValueError(msg.format(indices.shape))\n return take_along_axis(arr.ravel(), indices, 0)\n rank = ndim(arr)\n if rank != ndim(indices):\n msg = \"indices and arr must have the same number of dimensions; {} vs. {}\"\n raise ValueError(msg.format(ndim(indices), ndim(arr)))\n axis = _canonicalize_axis(axis, rank)\n\n def replace(tup, val):\n lst = list(tup)\n lst[axis] = val\n return tuple(lst)\n\n use_64bit_index = _any([not core.is_constant_dim(d) or d >= (1 << 31) for d in arr.shape])\n index_dtype = int64 if use_64bit_index else int32\n indices = lax.convert_element_type(indices, index_dtype)\n\n bcast_shape = lax.broadcast_shapes(replace(arr.shape, 1), replace(indices.shape, 1))\n indices = broadcast_to(indices, replace(bcast_shape, indices.shape[axis]))\n arr = broadcast_to(arr, replace(bcast_shape, arr.shape[axis]))\n\n axis_size = arr.shape[axis]\n arr_shape = replace(arr.shape, 1)\n idx_shape = indices.shape\n out_shape = lax.broadcast_shapes(idx_shape, arr_shape)\n\n index_dims = [i for i, idx in enumerate(idx_shape) if i == axis or idx != 1]\n\n gather_index_shape = tuple(np.array(out_shape)[index_dims]) + (1,)\n gather_indices = []\n slice_sizes = []\n offset_dims = []\n start_index_map = []\n collapsed_slice_dims = []\n j = 0\n for i in range(rank):\n if i == axis:\n indices = _normalize_index(indices, axis_size)\n gather_indices.append(lax.reshape(indices, gather_index_shape))\n slice_sizes.append(1)\n start_index_map.append(i)\n collapsed_slice_dims.append(i)\n j += 1\n elif idx_shape[i] != 1:\n iota = lax.iota(_dtype(indices), out_shape[i])\n iota = lax.broadcast_in_dim(iota, gather_index_shape, (j,))\n gather_indices.append(iota)\n slice_sizes.append(1)\n start_index_map.append(i)\n collapsed_slice_dims.append(i)\n j += 1\n else:\n # If idx_shape[i] == 1, we can just take the entirety of the arr's axis\n # and avoid forming an iota index.\n offset_dims.append(i)\n slice_sizes.append(arr_shape[i])\n\n gather_indices = lax.concatenate(gather_indices, dimension=j)\n dnums = lax.GatherDimensionNumbers(\n offset_dims=tuple(offset_dims),\n collapsed_slice_dims=tuple(collapsed_slice_dims),\n start_index_map=tuple(start_index_map))\n return lax.gather(arr, gather_indices, dnums, tuple(slice_sizes))\n\n\n@_wraps(np.take_along_axis, update_doc=False)\ndef take_along_axis(arr, indices, axis: Optional[int]):\n _check_arraylike(\"take_along_axis\", arr)\n return _take_along_axis(arr, indices, axis)\n\n\n### SetOps\n\n@partial(jit, static_argnums=1)\ndef _unique1d_sorted_mask(ar, optional_indices=False):\n \"\"\"\n Helper function for unique which is jit-able\n \"\"\"\n\n ar = asarray(ar).flatten()\n\n if optional_indices:\n aux, perm = lax.sort_key_val(ar, lax.iota(int, len(ar)))\n else:\n perm = np.empty(0, dtype=int)\n aux = ar.sort()\n\n mask = ones(aux.shape, dtype=bool_).at[1:].set(aux[1:] != aux[:-1])\n\n return aux, mask, perm\n\ndef _unique1d(ar, return_index=False, return_inverse=False,\n return_counts=False, size=None):\n \"\"\"\n Find the unique elements of an array, ignoring shape.\n \"\"\"\n if ar.size == 0 and size is not None and size > 0:\n raise ValueError(\"jnp.unique(): Cannot pass nonzero size for zero-sized array.\")\n\n aux, mask, perm = _unique1d_sorted_mask(ar, return_index or return_inverse)\n ind = mask if size is None else nonzero(mask, size=size)\n\n ret = (aux[ind],)\n if return_index:\n ret += (perm[ind],)\n if return_inverse:\n imask = cumsum(mask) - 1\n inv_idx = zeros(mask.shape, dtype=dtypes.canonicalize_dtype(int_))\n inv_idx = inv_idx.at[perm].set(imask)\n ret += (inv_idx,)\n if return_counts:\n if size is None:\n idx = append(nonzero(mask)[0], mask.size)\n else:\n idx = nonzero(mask, size=size + 1)[0]\n idx = idx.at[1:].set(where(idx[1:], idx[1:], mask.size))\n ret += (diff(idx),)\n\n return ret\n\n@partial(jit, static_argnums=1)\ndef _unique_axis_sorted_mask(ar, axis):\n aux = moveaxis(ar, axis, 0)\n size, *out_shape = aux.shape\n aux = aux.reshape(size, _prod(out_shape)).T\n if aux.shape[0] == 0:\n perm = zeros(1, dtype=int)\n else:\n perm = lexsort(aux[::-1])\n aux = aux[:, perm]\n if aux.size:\n mask = ones(size, dtype=bool).at[1:].set(any(aux[:, 1:] != aux[:, :-1], 0))\n else:\n mask = zeros(size, dtype=bool)\n return aux, mask, perm, out_shape\n\ndef _unique_axis(ar, axis, return_index=False, return_inverse=False,\n return_counts=False):\n \"\"\"\n Find the unique elements of an array along a particular axis.\n \"\"\"\n aux, mask, perm, out_shape = _unique_axis_sorted_mask(ar, axis)\n result = moveaxis(aux[:, mask].T.reshape(mask.sum() or aux.shape[1], *out_shape), 0, axis)\n\n ret = (result,)\n if return_index:\n if aux.size:\n ret += (perm[mask],)\n else:\n ret += (perm,)\n if return_inverse:\n if aux.size:\n imask = cumsum(mask) - 1\n inv_idx = zeros(mask.shape, dtype=dtypes.canonicalize_dtype(int_))\n inv_idx = inv_idx.at[perm].set(imask)\n else:\n inv_idx = zeros(ar.shape[axis], dtype=int)\n ret += (inv_idx,)\n if return_counts:\n if aux.size:\n idx = concatenate(nonzero(mask) + (array([mask.size]),))\n ret += (diff(idx),)\n elif ar.shape[axis]:\n ret += (array([ar.shape[axis]]),)\n else:\n ret += (empty(0, dtype=int),)\n\n return ret\n\n_UNIQUE_DOC = \"\"\"\\\nBecause the size of the output of ``unique`` is data-dependent, the function is not\ntypically compatible with JIT. The JAX version adds the optional `size` argument which\nspecifies the size of the data-dependent output arrays: it must be specified statically for\n``jnp.unique`` to be traced. If specified, the first `size` unique elements will be returned;\nif there are fewer unique elements than `size` indicates, the return value will be padded with\nthe minimum value in the input array.\n\nThe `size` cannot currently be used with the `axis` argument.\"\"\"\n\n\n@_wraps(np.unique, skip_params=['axis'], lax_description=_UNIQUE_DOC)\ndef unique(ar, return_index=False, return_inverse=False,\n return_counts=False, axis: Optional[int] = None, *, size=None):\n # TODO(jakevdp): call _check_arraylike on input.\n if axis is not None and size is not None:\n # TODO(jakevdp): implement size & axis together.\n raise NotImplementedError(\"jnp.unique `size` and `axis` arguments cannot be used together.\")\n\n ar = asarray(ar)\n\n if size is None:\n ar = core.concrete_or_error(None, ar, \"The error arose for the first argument of jnp.unique()\")\n else:\n size = core.concrete_or_error(operator.index, size, \"The error arose for the size argument of jnp.unique()\")\n\n if axis is None:\n ret = _unique1d(ar, return_index, return_inverse, return_counts, size=size)\n else:\n ret = _unique_axis(ar, axis, return_index, return_inverse, return_counts)\n\n return ret[0] if len(ret) == 1 else ret\n\n### Indexing\n\ndef _rewriting_take(arr, idx):\n # Computes arr[idx].\n # All supported cases of indexing can be implemented as an XLA gather,\n # followed by an optional reverse and broadcast_in_dim.\n arr = asarray(arr)\n treedef, static_idx, dynamic_idx = _split_index_for_jit(idx)\n return _gather(arr, treedef, static_idx, dynamic_idx)\n\n# TODO(phawkins): re-enable jit after fixing excessive recompilation for\n# slice indexes (e.g., slice(0, 5, None), slice(10, 15, None), etc.).\n# @partial(jit, static_argnums=(1, 2))\ndef _gather(arr, treedef, static_idx, dynamic_idx):\n idx = _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx)\n indexer = _index_to_gather(shape(arr), idx) # shared with _scatter_update\n y = arr\n\n # Avoid calling gather if the slice shape is empty, both as a fast path and to\n # handle cases like zeros(0)[array([], int32)].\n if core.is_empty_shape(indexer.slice_shape):\n return zeros_like(y, shape=indexer.slice_shape)\n\n # We avoid generating a gather when indexer.gather_indices.size is empty.\n if not core.is_empty_shape(indexer.gather_indices.shape):\n y = lax.gather(y, indexer.gather_indices, indexer.dnums,\n indexer.gather_slice_shape,\n unique_indices=indexer.unique_indices,\n indices_are_sorted=indexer.indices_are_sorted)\n\n # Reverses axes with negative strides.\n if indexer.reversed_y_dims:\n y = lax.rev(y, indexer.reversed_y_dims)\n\n # This adds np.newaxis/None dimensions.\n return expand_dims(y, indexer.newaxis_dims)\n\n_Indexer = collections.namedtuple(\"_Indexer\", [\n # The expected shape of the slice output.\n \"slice_shape\",\n\n # The slice shape to pass to lax.gather().\n \"gather_slice_shape\",\n\n # The gather indices to use.\n \"gather_indices\",\n\n # A GatherDimensionNumbers object describing the gather to perform.\n \"dnums\",\n\n # Are the gather_indices known to be non-overlapping and/or sorted?\n # (In practice, these translate to \"there no advanced indices\", because\n # only advanced indices could lead to index repetition.)\n \"unique_indices\",\n \"indices_are_sorted\",\n\n # Slice dimensions that have negative strides, and so must be reversed after\n # the gather.\n \"reversed_y_dims\",\n\n # Keep track of any axes created by `newaxis`. These must be inserted for\n # gathers and eliminated for scatters.\n \"newaxis_dims\",\n])\n\ndef _split_index_for_jit(idx):\n \"\"\"Splits indices into necessarily-static and dynamic parts.\n\n Used to pass indices into `jit`-ted function.\n \"\"\"\n # Convert list indices to tuples in cases (deprecated by NumPy.)\n idx = _eliminate_deprecated_list_indexing(idx)\n\n # Expand any (concrete) boolean indices. We can then use advanced integer\n # indexing logic to handle them.\n idx = _expand_bool_indices(idx)\n\n leaves, treedef = tree_flatten(idx)\n dynamic = [None] * len(leaves)\n static = [None] * len(leaves)\n for i, x in enumerate(leaves):\n if x is Ellipsis:\n static[i] = x\n elif isinstance(x, slice):\n # slice objects aren't hashable.\n static[i] = (x.start, x.stop, x.step)\n else:\n dynamic[i] = x\n return treedef, tuple(static), dynamic\n\ndef _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx):\n \"\"\"Recombines indices that were split by _split_index_for_jit.\"\"\"\n idx = []\n for s, d in zip(static_idx, dynamic_idx):\n if d is not None:\n idx.append(d)\n elif isinstance(s, tuple):\n idx.append(slice(s[0], s[1], s[2]))\n else:\n idx.append(s)\n return treedef.unflatten(idx)\n\ndef _int(aval):\n return not aval.shape and issubdtype(aval.dtype, integer)\n\ndef _index_to_gather(x_shape, idx, normalize_indices=True):\n # Remove ellipses and add trailing slice(None)s.\n idx = _canonicalize_tuple_index(len(x_shape), idx)\n\n # Check for advanced indexing:\n # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing\n\n # Do the advanced indexing axes appear contiguously? If not, NumPy semantics\n # move the advanced axes to the front.\n advanced_axes_are_contiguous = False\n\n advanced_indexes = None\n\n # The positions of the advanced indexing axes in `idx`.\n idx_advanced_axes = []\n\n # The positions of the advanced indexes in x's shape.\n # collapsed, after None axes have been removed. See below.\n x_advanced_axes = None\n\n if _is_advanced_int_indexer(idx):\n idx_no_nones = [(i, d) for i, d in enumerate(idx) if d is not None]\n advanced_pairs = (\n (asarray(e), i, j) for j, (i, e) in enumerate(idx_no_nones)\n if isscalar(e) or isinstance(e, (Sequence, ndarray)))\n if normalize_indices:\n advanced_pairs = ((_normalize_index(e, x_shape[j]), i, j)\n for e, i, j in advanced_pairs)\n advanced_indexes, idx_advanced_axes, x_advanced_axes = zip(*advanced_pairs)\n advanced_axes_are_contiguous = np.all(np.diff(idx_advanced_axes) == 1)\n\n x_axis = 0 # Current axis in x.\n y_axis = 0 # Current axis in y, before collapsing. See below.\n collapsed_y_axis = 0 # Current axis in y, after collapsing.\n\n # Scatter dimension numbers.\n offset_dims = []\n collapsed_slice_dims = []\n start_index_map = []\n\n use_64bit_index = _any([not core.is_constant_dim(d) or d >= (1 << 31) for d in x_shape])\n index_dtype = int64 if use_64bit_index else int32\n\n # Gather indices.\n # Pairs of (array, start_dim) values. These will be broadcast into\n # gather_indices_shape, with the array dimensions aligned to start_dim, and\n # then concatenated.\n gather_indices = []\n gather_indices_shape = []\n\n # We perform three transformations to y before the scatter op, in order:\n # First, y is broadcast to slice_shape. In general `y` only need broadcast to\n # the right shape.\n slice_shape = []\n\n # Next, y is squeezed to remove newaxis_dims. This removes np.newaxis/`None`\n # indices, which the scatter cannot remove itself.\n newaxis_dims = []\n\n # Finally, we reverse reversed_y_dims to handle slices with negative strides.\n reversed_y_dims = []\n\n gather_slice_shape = []\n\n for idx_pos, i in enumerate(idx):\n # Handle the advanced indices here if:\n # * the advanced indices were not contiguous and we are the start.\n # * we are at the position of the first advanced index.\n if (advanced_indexes is not None and\n (advanced_axes_are_contiguous and idx_pos == idx_advanced_axes[0] or\n not advanced_axes_are_contiguous and idx_pos == 0)):\n advanced_indexes = broadcast_arrays(*advanced_indexes)\n shape = advanced_indexes[0].shape\n ndim = len(shape)\n\n start_dim = len(gather_indices_shape)\n gather_indices += ((lax.convert_element_type(a, index_dtype), start_dim)\n for a in advanced_indexes)\n gather_indices_shape += shape\n\n start_index_map.extend(x_advanced_axes)\n collapsed_slice_dims.extend(x_advanced_axes)\n slice_shape.extend(shape)\n y_axis += ndim\n collapsed_y_axis += ndim\n\n # Per-index bookkeeping for advanced indexes.\n if idx_pos in idx_advanced_axes:\n x_axis += 1\n gather_slice_shape.append(1)\n continue\n\n try:\n abstract_i = core.get_aval(i)\n except TypeError:\n abstract_i = None\n # Handle basic int indexes.\n if isinstance(abstract_i, (ConcreteArray,ShapedArray)) and _int(abstract_i):\n if x_shape[x_axis] == 0:\n # XLA gives error when indexing into an axis of size 0\n raise IndexError(f\"index is out of bounds for axis {x_axis} with size 0\")\n i = _normalize_index(i, x_shape[x_axis]) if normalize_indices else i\n i = lax.convert_element_type(i, index_dtype)\n gather_indices.append((i, len(gather_indices_shape)))\n collapsed_slice_dims.append(x_axis)\n gather_slice_shape.append(1)\n start_index_map.append(x_axis)\n x_axis += 1\n # Handle np.newaxis (None)\n elif i is None:\n slice_shape.append(1)\n newaxis_dims.append(y_axis)\n y_axis += 1\n # Handle slice(None)\n elif _is_slice_none(i):\n slice_shape.append(x_shape[x_axis])\n gather_slice_shape.append(x_shape[x_axis])\n offset_dims.append(collapsed_y_axis)\n collapsed_y_axis += 1\n y_axis += 1\n x_axis += 1\n # Handle slice index (only static, otherwise an error is raised)\n elif isinstance(i, slice):\n if not _all(elt is None\n or type(core.get_aval(elt)) is ConcreteArray\n for elt in (i.start, i.stop, i.step)):\n msg = (\"Array slice indices must have static start/stop/step to be used \"\n \"with NumPy indexing syntax. To index a statically sized \"\n \"array at a dynamic position, try lax.dynamic_slice/\"\n \"dynamic_update_slice (JAX does not support dynamically sized \"\n \"arrays within JIT compiled functions).\")\n raise IndexError(msg)\n start, limit, stride, needs_rev = _static_idx(i, x_shape[x_axis])\n if needs_rev:\n reversed_y_dims.append(collapsed_y_axis)\n if stride == 1:\n i = lax.convert_element_type(start, index_dtype)\n gather_indices.append((i, len(gather_indices_shape)))\n slice_shape.append(limit - start)\n gather_slice_shape.append(limit - start)\n offset_dims.append(collapsed_y_axis)\n start_index_map.append(x_axis)\n else:\n i = arange(start, limit, stride, dtype=index_dtype)\n size = i.shape[0]\n slice_shape.append(size)\n gather_slice_shape.append(1)\n gather_indices.append((i, len(gather_indices_shape)))\n gather_indices_shape.append(size)\n\n start_index_map.append(x_axis)\n collapsed_slice_dims.append(x_axis)\n\n collapsed_y_axis += 1\n y_axis += 1\n x_axis += 1\n else:\n if (abstract_i is not None and\n not (issubdtype(abstract_i.dtype, integer) or issubdtype(abstract_i.dtype, bool_))):\n msg = (\"Indexer must have integer or boolean type, got indexer \"\n \"with type {} at position {}, indexer value {}\")\n raise TypeError(msg.format(abstract_i.dtype.name, idx_pos, i))\n\n msg = \"Indexing mode not yet supported. Open a feature request!\\n{}\"\n raise IndexError(msg.format(idx))\n\n if len(gather_indices) == 0:\n gather_indices_array = np.zeros((0,), dtype=index_dtype)\n elif len(gather_indices) == 1:\n g, _ = gather_indices[0]\n gather_indices_array = lax.expand_dims(g, (g.ndim,))\n else:\n last_dim = len(gather_indices_shape)\n gather_indices_shape.append(1)\n gather_indices_array = lax.concatenate([\n lax.broadcast_in_dim(g, gather_indices_shape, tuple(range(i, i + g.ndim)))\n for g, i in gather_indices],\n last_dim)\n\n dnums = lax.GatherDimensionNumbers(\n offset_dims = tuple(offset_dims),\n collapsed_slice_dims = tuple(sorted(collapsed_slice_dims)),\n start_index_map = tuple(start_index_map)\n )\n return _Indexer(\n slice_shape=slice_shape,\n newaxis_dims=tuple(newaxis_dims),\n gather_slice_shape=gather_slice_shape,\n reversed_y_dims=reversed_y_dims,\n dnums=dnums,\n gather_indices=gather_indices_array,\n unique_indices=advanced_indexes is None,\n indices_are_sorted=advanced_indexes is None)\n\ndef _should_unpack_list_index(x):\n \"\"\"Helper for _eliminate_deprecated_list_indexing.\"\"\"\n return (isinstance(x, ndarray) and np.ndim(x) != 0\n or isinstance(x, (Sequence, slice))\n or x is Ellipsis or x is None)\n\ndef _eliminate_deprecated_list_indexing(idx):\n # \"Basic slicing is initiated if the selection object is a non-array,\n # non-tuple sequence containing slice objects, [Ellipses, or newaxis\n # objects]\". Detects this and raises a TypeError.\n if not isinstance(idx, tuple):\n if isinstance(idx, Sequence) and not isinstance(idx, ndarray):\n # As of numpy 1.16, some non-tuple sequences of indices result in a warning, while\n # others are converted to arrays, based on a set of somewhat convoluted heuristics\n # (See https://github.com/numpy/numpy/blob/v1.19.2/numpy/core/src/multiarray/mapping.c#L179-L343)\n # In JAX, we raise an informative TypeError for *all* non-tuple sequences.\n if _any(_should_unpack_list_index(i) for i in idx):\n msg = (\"Using a non-tuple sequence for multidimensional indexing is not allowed; \"\n \"use `arr[tuple(seq)]` instead of `arr[seq]`. \"\n \"See https://github.com/google/jax/issues/4564 for more information.\")\n else:\n msg = (\"Using a non-tuple sequence for multidimensional indexing is not allowed; \"\n \"use `arr[array(seq)]` instead of `arr[seq]`. \"\n \"See https://github.com/google/jax/issues/4564 for more information.\")\n raise TypeError(msg)\n else:\n idx = (idx,)\n return idx\n\ndef _expand_bool_indices(idx):\n \"\"\"Converts concrete bool indexes into advanced integer indexes.\"\"\"\n out = []\n for i in idx:\n try:\n abstract_i = core.get_aval(i)\n except TypeError:\n abstract_i = None\n if (isinstance(abstract_i, ShapedArray) and issubdtype(abstract_i.dtype, bool_)\n or isinstance(i, list) and _all(_is_scalar(e) and issubdtype(_dtype(e), np.bool_) for e in i)):\n if isinstance(i, list):\n i = array(i)\n abstract_i = core.get_aval(i)\n\n if not type(abstract_i) is ConcreteArray:\n # TODO(mattjj): improve this error by tracking _why_ the indices are not concrete\n raise errors.NonConcreteBooleanIndexError(abstract_i)\n else:\n out.extend(np.where(i))\n else:\n out.append(i)\n return tuple(out)\n\ndef _is_slice_none(idx):\n \"\"\"Return True if idx is equal to slice(None), False otherwise.\"\"\"\n if isinstance(idx, slice):\n return idx.start is None and idx.stop is None and idx.step is None\n\n# TODO(mattjj): clean up this logic\ndef _is_advanced_int_indexer(idx):\n \"\"\"Returns True if idx should trigger int array indexing, False otherwise.\"\"\"\n # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing\n assert isinstance(idx, tuple)\n if _all(e is None or e is Ellipsis or isinstance(e, slice)\n or _is_scalar(e) and issubdtype(_dtype(e), np.integer) for e in idx):\n return False\n return _all(e is None or e is Ellipsis or isinstance(e, slice)\n or _is_int_arraylike(e) for e in idx)\n\ndef _is_int_arraylike(x):\n \"\"\"Returns True if x is array-like with integer dtype, False otherwise.\"\"\"\n return (isinstance(x, int) and not isinstance(x, bool)\n or issubdtype(getattr(x, \"dtype\", None), np.integer)\n or isinstance(x, (list, tuple)) and _all(_is_int_arraylike(e) for e in x))\n\ndef _is_scalar(x):\n \"\"\"Checks if a Python or NumPy scalar.\"\"\"\n return np.isscalar(x) or (isinstance(x, ndarray) and np.ndim(x) == 0)\n\ndef _canonicalize_tuple_index(arr_ndim, idx):\n \"\"\"Helper to remove Ellipsis and add in the implicit trailing slice(None).\"\"\"\n len_without_none = _sum(1 for e in idx if e is not None and e is not Ellipsis)\n if len_without_none > arr_ndim:\n msg = \"Too many indices for array: {} non-None/Ellipsis indices for dim {}.\"\n raise IndexError(msg.format(len_without_none, arr_ndim))\n ellipses = (i for i, elt in enumerate(idx) if elt is Ellipsis)\n ellipsis_index = next(ellipses, None)\n if ellipsis_index is not None:\n if next(ellipses, None) is not None:\n msg = \"Multiple ellipses (...) not supported: {}.\"\n raise IndexError(msg.format(list(map(type, idx))))\n colons = (slice(None),) * (arr_ndim - len_without_none)\n idx = idx[:ellipsis_index] + colons + idx[ellipsis_index + 1:]\n elif len_without_none < arr_ndim:\n colons = (slice(None),) * (arr_ndim - len_without_none)\n idx = tuple(idx) + colons\n return idx\n\ndef _static_idx(idx: slice, size: core.DimSize):\n \"\"\"Helper function to compute the static slice start/limit/stride values.\"\"\"\n if isinstance(size, int):\n start, stop, step = idx.indices(size)\n else:\n raise TypeError(size)\n\n if (step < 0 and stop >= start) or (step > 0 and start >= stop):\n return 0, 0, 1, False # sliced to size zero\n\n if step > 0:\n return start, stop, step, False\n else:\n k = (start - stop - 1) % (-step)\n return stop + k + 1, start + 1, -step, True\n\n\nblackman = _wrap_numpy_nullary_function(np.blackman)\nbartlett = _wrap_numpy_nullary_function(np.bartlett)\nhamming = _wrap_numpy_nullary_function(np.hamming)\nhanning = _wrap_numpy_nullary_function(np.hanning)\n# TODO: lower `kaiser` via lax to allow non-constant beta values.\nkaiser = _wrap_numpy_nullary_function(np.kaiser)\n\ndef _gcd_cond_fn(xs):\n x1, x2 = xs\n return any(x2 != 0)\n\ndef _gcd_body_fn(xs):\n x1, x2 = xs\n x1, x2 = (where(x2 != 0, x2, x1),\n where(x2 != 0, lax.rem(x1, x2), lax._const(x2, 0)))\n return (where(x1 < x2, x2, x1), where(x1 < x2, x1, x2))\n\n@_wraps(np.gcd)\ndef gcd(x1, x2):\n _check_arraylike(\"gcd\", x1, x2)\n if (not issubdtype(_dtype(x1), integer) or\n not issubdtype(_dtype(x2), integer)):\n raise ValueError(\"Arguments to jax.numpy.gcd must be integers.\")\n x1, x2 = _promote_dtypes(x1, x2)\n x1, x2 = broadcast_arrays(x1, x2)\n gcd, _ = lax.while_loop(_gcd_cond_fn, _gcd_body_fn, (abs(x1), abs(x2)))\n return gcd\n\n\n@_wraps(np.lcm)\ndef lcm(x1, x2):\n _check_arraylike(\"lcm\", x1, x2)\n x1, x2 = _promote_dtypes(x1, x2)\n d = gcd(x1, x2)\n return where(d == 0, lax._const(d, 0),\n abs(multiply(x1, floor_divide(x2, d))))\n\n\n@_wraps(np.extract)\ndef extract(condition, arr):\n return compress(ravel(condition), ravel(arr))\n\n\n@_wraps(np.compress, skip_params=['out'])\ndef compress(condition, a, axis: Optional[int] = None, out=None):\n if out is not None:\n raise NotImplementedError(\"The 'out' argument to jnp.compress is not supported.\")\n if ndim(condition) != 1:\n raise ValueError(\"condition must be a 1D array\")\n condition = asarray(condition).astype(bool)\n a = asarray(a)\n if axis is None:\n axis = 0\n a = ravel(a)\n else:\n a = moveaxis(a, axis, 0)\n condition, extra = condition[:a.shape[0]], condition[a.shape[0]:]\n if any(extra):\n raise ValueError(\"condition contains entries that are out of bounds\")\n a = a[:condition.shape[0]]\n return moveaxis(a[condition], 0, axis)\n\n\n@_wraps(np.cov)\ndef cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,\n aweights=None):\n if y is not None:\n m, y = _promote_args_inexact(\"cov\", m, y)\n if y.ndim > 2:\n raise ValueError(\"y has more than 2 dimensions\")\n else:\n m, = _promote_args_inexact(\"cov\", m)\n\n if m.ndim > 2:\n raise ValueError(\"m has more than 2 dimensions\") # same as numpy error\n\n X = atleast_2d(m)\n if not rowvar and X.shape[0] != 1:\n X = X.T\n if X.shape[0] == 0:\n return array([]).reshape(0, 0)\n\n if y is not None:\n y = atleast_2d(y)\n if not rowvar and y.shape[0] != 1:\n y = y.T\n X = concatenate((X, y), axis=0)\n if ddof is None:\n ddof = 1 if bias == 0 else 0\n\n w = None\n if fweights is not None:\n _check_arraylike(\"cov\", fweights)\n if ndim(fweights) > 1:\n raise RuntimeError(\"cannot handle multidimensional fweights\")\n if shape(fweights)[0] != X.shape[1]:\n raise RuntimeError(\"incompatible numbers of samples and fweights\")\n if not issubdtype(_dtype(fweights), integer):\n raise TypeError(\"fweights must be integer.\")\n # Ensure positive fweights; note that numpy raises an error on negative fweights.\n w = asarray(abs(fweights))\n if aweights is not None:\n _check_arraylike(\"cov\", aweights)\n if ndim(aweights) > 1:\n raise RuntimeError(\"cannot handle multidimensional aweights\")\n if shape(aweights)[0] != X.shape[1]:\n raise RuntimeError(\"incompatible numbers of samples and aweights\")\n # Ensure positive aweights: note that numpy raises an error for negative aweights.\n aweights = abs(aweights)\n w = aweights if w is None else w * aweights\n\n avg, w_sum = average(X, axis=1, weights=w, returned=True)\n w_sum = w_sum[0]\n\n if w is None:\n f = X.shape[1] - ddof\n elif ddof == 0:\n f = w_sum\n elif aweights is None:\n f = w_sum - ddof\n else:\n f = w_sum - ddof * sum(w * aweights) / w_sum\n\n X = X - avg[:, None]\n X_T = X.T if w is None else (X * w).T\n return true_divide(dot(X, X_T.conj()), f).squeeze()\n\n\n@_wraps(np.corrcoef)\ndef corrcoef(x, y=None, rowvar=True):\n _check_arraylike(\"corrcoef\", x)\n c = cov(x, y, rowvar)\n if len(shape(c)) == 0:\n # scalar - this should yield nan for values (nan/nan, inf/inf, 0/0), 1 otherwise\n return divide(c, c)\n d = diag(c)\n stddev = sqrt(real(d))\n c = divide(c, stddev[:,None])\n c = divide(c, stddev[None,:])\n\n real_part = clip(real(c), -1, 1)\n if iscomplexobj(c):\n complex_part = clip(imag(c), -1, 1)\n c = lax.complex(real_part, complex_part)\n else:\n c = real_part\n return c\n\n\n@_wraps(np.quantile, skip_params=['out', 'overwrite_input'])\ndef quantile(a, q, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n overwrite_input=False, interpolation=\"linear\", keepdims=False):\n _check_arraylike(\"quantile\", a, q)\n if overwrite_input or out is not None:\n msg = (\"jax.numpy.quantile does not support overwrite_input=True or \"\n \"out != None\")\n raise ValueError(msg)\n return _quantile(a, q, axis, interpolation, keepdims, False)\n\n@_wraps(np.nanquantile, skip_params=['out', 'overwrite_input'])\ndef nanquantile(a, q, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n out=None, overwrite_input=False, interpolation=\"linear\",\n keepdims=False):\n _check_arraylike(\"nanquantile\", a, q)\n if overwrite_input or out is not None:\n msg = (\"jax.numpy.nanquantile does not support overwrite_input=True or \"\n \"out != None\")\n raise ValueError(msg)\n return _quantile(a, q, axis, interpolation, keepdims, True)\n\n\n@partial(jit, static_argnums=(2, 3, 4, 5))\ndef _quantile(a, q, axis, interpolation, keepdims, squash_nans):\n if interpolation not in [\"linear\", \"lower\", \"higher\", \"midpoint\", \"nearest\"]:\n raise ValueError(\"interpolation can only be 'linear', 'lower', 'higher', \"\n \"'midpoint', or 'nearest'\")\n a = asarray(a, dtype=promote_types(_dtype(a), float32))\n q = asarray(q, dtype=promote_types(_dtype(q), float32))\n if axis is None:\n a = ravel(a)\n axis = 0\n elif isinstance(axis, tuple):\n raise NotImplementedError(\"Tuple values for axis are not implemented\")\n else:\n axis = _canonicalize_axis(axis, ndim(a))\n\n q_shape = shape(q)\n q_ndim = ndim(q)\n if q_ndim > 1:\n raise ValueError(\"q must be have rank <= 1, got shape {}\".format(shape(q)))\n\n a_shape = shape(a)\n\n if squash_nans:\n a = where(isnan(a), nan, a) # Ensure nans are positive so they sort to the end.\n a = lax.sort(a, dimension=axis)\n counts = sum(logical_not(isnan(a)), axis=axis, dtype=q.dtype,\n keepdims=keepdims)\n shape_after_reduction = counts.shape\n q = lax.expand_dims(\n q, tuple(range(q_ndim, len(shape_after_reduction) + q_ndim)))\n counts = lax.expand_dims(counts, tuple(range(q_ndim)))\n q = lax.mul(q, lax.sub(counts, _constant_like(q, 1)))\n low = lax.floor(q)\n high = lax.ceil(q)\n high_weight = lax.sub(q, low)\n low_weight = lax.sub(_constant_like(high_weight, 1), high_weight)\n\n low = lax.max(_constant_like(low, 0), lax.min(low, counts - 1))\n high = lax.max(_constant_like(high, 0), lax.min(high, counts - 1))\n low = lax.convert_element_type(low, int64)\n high = lax.convert_element_type(high, int64)\n out_shape = q_shape + shape_after_reduction\n index = [lax.broadcasted_iota(int64, out_shape, dim + q_ndim)\n for dim in range(len(shape_after_reduction))]\n if keepdims:\n index[axis] = low\n else:\n index.insert(axis, low)\n low_value = a[tuple(index)]\n index[axis] = high\n high_value = a[tuple(index)]\n else:\n a = where(any(isnan(a), axis=axis, keepdims=True), nan, a)\n a = lax.sort(a, dimension=axis)\n n = a_shape[axis]\n q = lax.mul(q, _constant_like(q, n - 1))\n low = lax.floor(q)\n high = lax.ceil(q)\n high_weight = lax.sub(q, low)\n low_weight = lax.sub(_constant_like(high_weight, 1), high_weight)\n\n low = lax.clamp(_constant_like(low, 0), low, _constant_like(low, n - 1))\n high = lax.clamp(_constant_like(high, 0), high, _constant_like(high, n - 1))\n low = lax.convert_element_type(low, int64)\n high = lax.convert_element_type(high, int64)\n\n slice_sizes = list(a_shape)\n slice_sizes[axis] = 1\n dnums = lax.GatherDimensionNumbers(\n offset_dims=tuple(range(\n q_ndim,\n len(a_shape) + q_ndim if keepdims else len(a_shape) + q_ndim - 1)),\n collapsed_slice_dims=() if keepdims else (axis,),\n start_index_map=(axis,))\n low_value = lax.gather(a, low[..., None], dimension_numbers=dnums,\n slice_sizes=slice_sizes)\n high_value = lax.gather(a, high[..., None], dimension_numbers=dnums,\n slice_sizes=slice_sizes)\n if q_ndim == 1:\n low_weight = lax.broadcast_in_dim(low_weight, low_value.shape,\n broadcast_dimensions=(0,))\n high_weight = lax.broadcast_in_dim(high_weight, high_value.shape,\n broadcast_dimensions=(0,))\n\n if interpolation == \"linear\":\n result = lax.add(lax.mul(low_value.astype(q.dtype), low_weight),\n lax.mul(high_value.astype(q.dtype), high_weight))\n elif interpolation == \"lower\":\n result = low_value\n elif interpolation == \"higher\":\n result = high_value\n elif interpolation == \"nearest\":\n pred = lax.le(high_weight, _constant_like(high_weight, 0.5))\n result = lax.select(pred, low_value, high_value)\n elif interpolation == \"midpoint\":\n result = lax.mul(lax.add(low_value, high_value), _constant_like(low_value, 0.5))\n else:\n raise ValueError(f\"interpolation={interpolation!r} not recognized\")\n\n return lax.convert_element_type(result, a.dtype)\n\n\n@partial(jit, static_argnums=2)\n@partial(vectorize, excluded={0, 2})\ndef _searchsorted(a, v, side):\n if len(a) == 0:\n return 0\n op = operator.le if side == 'left' else operator.lt\n\n def body_fun(i, state):\n low, high = state\n mid = (low + high) // 2\n go_left = op(v, a[mid])\n return (where(go_left, low, mid), where(go_left, mid, high))\n\n n_levels = int(np.ceil(np.log2(len(a) + 1)))\n return lax.fori_loop(0, n_levels, body_fun, (0, len(a)))[1]\n\n\n@_wraps(np.searchsorted, skip_params=['sorter'])\ndef searchsorted(a, v, side='left', sorter=None):\n if side not in ['left', 'right']:\n raise ValueError(f\"{side!r} is an invalid value for keyword 'side'\")\n if sorter is not None:\n raise NotImplementedError(\"sorter is not implemented\")\n a = asarray(a)\n v = asarray(v)\n if ndim(a) != 1:\n raise ValueError(\"a should be 1-dimensional\")\n return _searchsorted(a, v, side)\n\n\n@_wraps(np.digitize)\ndef digitize(x, bins, right=False):\n if len(bins) == 0:\n return zeros(x, dtype=dtypes.canonicalize_dtype(int_))\n side = 'right' if not right else 'left'\n return where(\n bins[-1] >= bins[0],\n searchsorted(bins, x, side=side),\n len(bins) - searchsorted(bins[::-1], x, side=side)\n )\n\n_PIECEWISE_DOC = \"\"\"\\\nUnlike `np.piecewise`, :py:func:`jax.numpy.piecewise` requires functions in\n`funclist` to be traceable by JAX, as it is implemeted via :func:`jax.lax.switch`.\nSee the :func:`jax.lax.switch` documentation for more information.\n\"\"\"\n\n@_wraps(np.piecewise, lax_description=_PIECEWISE_DOC)\ndef piecewise(x, condlist, funclist, *args, **kw):\n _check_arraylike(\"piecewise\", x)\n condlist = array(condlist, dtype=bool_)\n nc, nf = len(condlist), len(funclist)\n if nf == nc + 1:\n funclist = funclist[-1:] + funclist[:-1]\n elif nf == nc:\n funclist = [0] + list(funclist)\n else:\n raise ValueError(f\"with {nc} condition(s), either {nc} or {nc+1} functions are expected; got {nf}\")\n indices = argmax(cumsum(concatenate([zeros_like(condlist[:1]), condlist], 0), 0), 0)\n dtype = _dtype(x)\n def _call(f):\n return lambda x: f(x, *args, **kw).astype(dtype)\n def _const(v):\n return lambda x: array(v, dtype=dtype)\n funclist = [_call(f) if callable(f) else _const(f) for f in funclist]\n return vectorize(lax.switch, excluded=(1,))(indices, funclist, x)\n\n\n@_wraps(np.percentile, skip_params=['out', 'overwrite_input'])\ndef percentile(a, q, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n out=None, overwrite_input=False, interpolation=\"linear\",\n keepdims=False):\n _check_arraylike(\"percentile\", a)\n q = true_divide(asarray(q), float32(100.0))\n return quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,\n interpolation=interpolation, keepdims=keepdims)\n\n@_wraps(np.nanpercentile, skip_params=['out', 'overwrite_input'])\ndef nanpercentile(a, q, axis: Optional[Union[int, Tuple[int, ...]]] = None,\n out=None, overwrite_input=False, interpolation=\"linear\",\n keepdims=False):\n _check_arraylike(\"nanpercentile\", a)\n q = true_divide(asarray(q), float32(100.0))\n return nanquantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,\n interpolation=interpolation, keepdims=keepdims)\n\n@_wraps(np.median, skip_params=['out', 'overwrite_input'])\ndef median(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n overwrite_input=False, keepdims=False):\n _check_arraylike(\"median\", a)\n return quantile(a, 0.5, axis=axis, out=out, overwrite_input=overwrite_input,\n keepdims=keepdims, interpolation='midpoint')\n\n@_wraps(np.nanmedian, skip_params=['out', 'overwrite_input'])\ndef nanmedian(a, axis: Optional[Union[int, Tuple[int, ...]]] = None, out=None,\n overwrite_input=False, keepdims=False):\n _check_arraylike(\"nanmedian\", a)\n return nanquantile(a, 0.5, axis=axis, out=out,\n overwrite_input=overwrite_input, keepdims=keepdims,\n interpolation='midpoint')\n\n\ndef _astype(arr, dtype):\n lax._check_user_dtype_supported(dtype, \"astype\")\n return lax.convert_element_type(arr, dtype)\n\n\ndef _nbytes(arr):\n return size(arr) * _dtype(arr).itemsize\n\n\ndef _view(arr, dtype=None, type=None):\n lax._check_user_dtype_supported(dtype, \"view\")\n if type is not None:\n raise NotImplementedError(\"`type` argument of array.view()\")\n if dtype is None:\n return arr\n arr_dtype = _dtype(arr)\n if arr_dtype == dtype:\n return arr\n # bool is implemented as lax:PRED, which is not compatible with lax.bitcast_convert_type.\n # We work around this by casting bool to uint8.\n if arr_dtype == bool_:\n arr = arr.astype(uint8)\n nbits_in = 8 * arr_dtype.itemsize\n nbits_out = 8 * _dtype(dtype).itemsize\n if nbits_in == nbits_out:\n if dtype == bool_:\n return lax.bitcast_convert_type(arr, uint8).astype(dtype)\n return lax.bitcast_convert_type(arr, dtype)\n if nbits_out > nbits_in and (shape(arr)[-1] * nbits_in) % nbits_out != 0:\n raise ValueError(\"When changing to a larger dtype, its size must be a divisor \"\n \"of the total size in bytes of the last axis of the array.\")\n byte_dtypes = {8: uint8, 16: uint16, 32: uint32, 64: uint64}\n if nbits_in not in byte_dtypes:\n raise NotImplementedError(f\"arr.view() for arr.dtype={arr_dtype}\")\n if nbits_out not in byte_dtypes:\n raise NotImplementedError(f\"arr.view(dtype) for dtype={dtype}\")\n dt_in = byte_dtypes[nbits_in]\n dt_out = byte_dtypes[nbits_out]\n arr_bytes = lax.bitcast_convert_type(arr, dt_in)\n if nbits_in < nbits_out:\n shifts = arange(0, nbits_out, nbits_in, dtype=dt_out)\n arr_bytes = arr_bytes.reshape(arr.shape[:-1] + (-1, nbits_out // nbits_in)).astype(dt_out)\n arr_bytes = (arr_bytes << shifts).sum(-1).astype(dt_out)\n else:\n shifts = arange(0, nbits_in, nbits_out, dtype=dt_in)\n arr_bytes = ((arr_bytes[..., newaxis] >> shifts) & iinfo(dt_out).max).astype(dt_out)\n arr_bytes = arr_bytes.reshape(arr_bytes.shape[:-2] + (-1,))\n if dtype == bool_:\n return lax.bitcast_convert_type(arr_bytes, uint8).astype(dtype)\n return lax.bitcast_convert_type(arr_bytes, dtype)\n\n### track unimplemented functions\n\n_NOT_IMPLEMENTED_DESC = \"\"\"\n*** This function is not yet implemented by jax.numpy, and will raise NotImplementedError ***\n\"\"\"\n\ndef _not_implemented(fun):\n @_wraps(fun, update_doc=False, lax_description=_NOT_IMPLEMENTED_DESC)\n def wrapped(*args, **kwargs):\n msg = \"Numpy function {} not yet implemented\"\n raise NotImplementedError(msg.format(fun))\n return wrapped\n\n\n### add method and operator overloads to arraylike classes\n\n# We add operator overloads to DeviceArray and ShapedArray. These method and\n# operator overloads mainly just forward calls to the corresponding lax_numpy\n# functions, which can themselves handle instances from any of these classes.\n\n_scalar_types = (int, float, complex, np.generic)\n\ndef _defer_to_unrecognized_arg(binary_op):\n # Ensure that other array types have the chance to override arithmetic.\n def deferring_binary_op(self, other):\n if not isinstance(other, _scalar_types + _arraylike_types + (core.Tracer,)):\n return NotImplemented\n return binary_op(self, other)\n return deferring_binary_op\n\ndef _swap_args(f):\n return lambda x, y: f(y, x)\n\ndef _unimplemented_setitem(self, i, x):\n msg = (\"'{}' object does not support item assignment. JAX arrays are \"\n \"immutable; perhaps you want jax.ops.index_update or \"\n \"jax.ops.index_add instead?\")\n raise TypeError(msg.format(type(self)))\n\ndef _operator_round(number, ndigits=None):\n out = round(number, decimals=ndigits or 0)\n # If `ndigits` is None, for a builtin float round(7.5) returns an integer.\n return out.astype(int) if ndigits is None else out\n\n_operators = {\n \"getitem\": _rewriting_take,\n \"setitem\": _unimplemented_setitem,\n \"neg\": negative,\n \"pos\": positive,\n \"eq\": _defer_to_unrecognized_arg(equal),\n \"ne\": _defer_to_unrecognized_arg(not_equal),\n \"lt\": _defer_to_unrecognized_arg(less),\n \"le\": _defer_to_unrecognized_arg(less_equal),\n \"gt\": _defer_to_unrecognized_arg(greater),\n \"ge\": _defer_to_unrecognized_arg(greater_equal),\n \"abs\": abs,\n \"add\": _defer_to_unrecognized_arg(add),\n \"radd\": _defer_to_unrecognized_arg(add),\n \"sub\": _defer_to_unrecognized_arg(subtract),\n \"rsub\": _defer_to_unrecognized_arg(_swap_args(subtract)),\n \"mul\": _defer_to_unrecognized_arg(multiply),\n \"rmul\": _defer_to_unrecognized_arg(multiply),\n \"div\": _defer_to_unrecognized_arg(divide),\n \"rdiv\": _defer_to_unrecognized_arg(_swap_args(divide)),\n \"truediv\": _defer_to_unrecognized_arg(true_divide),\n \"rtruediv\": _defer_to_unrecognized_arg(_swap_args(true_divide)),\n \"floordiv\": _defer_to_unrecognized_arg(floor_divide),\n \"rfloordiv\": _defer_to_unrecognized_arg(_swap_args(floor_divide)),\n \"divmod\": _defer_to_unrecognized_arg(divmod),\n \"rdivmod\": _defer_to_unrecognized_arg(_swap_args(divmod)),\n \"mod\": _defer_to_unrecognized_arg(mod),\n \"rmod\": _defer_to_unrecognized_arg(_swap_args(mod)),\n \"pow\": _defer_to_unrecognized_arg(power),\n \"rpow\": _defer_to_unrecognized_arg(_swap_args(power)),\n \"matmul\": _defer_to_unrecognized_arg(matmul),\n \"rmatmul\": _defer_to_unrecognized_arg(_swap_args(matmul)),\n \"and\": _defer_to_unrecognized_arg(bitwise_and),\n \"rand\": _defer_to_unrecognized_arg(bitwise_and),\n \"or\": _defer_to_unrecognized_arg(bitwise_or),\n \"ror\": _defer_to_unrecognized_arg(bitwise_or),\n \"xor\": _defer_to_unrecognized_arg(bitwise_xor),\n \"rxor\": _defer_to_unrecognized_arg(bitwise_xor),\n \"invert\": bitwise_not,\n \"lshift\": _defer_to_unrecognized_arg(left_shift),\n \"rshift\": _defer_to_unrecognized_arg(right_shift),\n \"rlshift\": _defer_to_unrecognized_arg(_swap_args(left_shift)),\n \"rrshift\": _defer_to_unrecognized_arg(_swap_args(right_shift)),\n \"round\": _operator_round,\n}\n\n# These numpy.ndarray methods are just refs to an equivalent numpy function\n_nondiff_methods = [\"all\", \"any\", \"argmax\", \"argmin\", \"argpartition\", \"argsort\",\n \"nonzero\", \"searchsorted\", \"round\"]\n_diff_methods = [\"clip\", \"conj\", \"conjugate\", \"cumprod\", \"cumsum\",\n \"diagonal\", \"dot\", \"max\", \"mean\", \"min\", \"prod\", \"ptp\",\n \"ravel\", \"repeat\", \"sort\", \"squeeze\", \"std\", \"sum\",\n \"swapaxes\", \"take\", \"tile\", \"trace\", \"var\"]\n\n# These methods are mentioned explicitly by nondiff_methods, so we create\n# _not_implemented implementations of them here rather than in __init__.py.\n# TODO(phawkins): implement these.\nargpartition = _not_implemented(np.argpartition)\n_NOT_IMPLEMENTED = ['argpartition']\n\n# Set up operator, method, and property forwarding on Tracer instances containing\n# ShapedArray avals by following the forwarding conventions for Tracer.\n# Forward operators using a single-underscore-prefix naming convention:\nfor operator_name, function in _operators.items():\n setattr(ShapedArray, \"_{}\".format(operator_name), staticmethod(function))\n# Forward methods and properties using core.aval_method and core.aval_property:\nfor method_name in _nondiff_methods + _diff_methods:\n setattr(ShapedArray, method_name, core.aval_method(globals()[method_name]))\nsetattr(ShapedArray, \"reshape\", core.aval_method(_reshape))\nsetattr(ShapedArray, \"transpose\", core.aval_method(_transpose))\nsetattr(ShapedArray, \"flatten\", core.aval_method(ravel))\nsetattr(ShapedArray, \"T\", core.aval_property(transpose))\nsetattr(ShapedArray, \"real\", core.aval_property(real))\nsetattr(ShapedArray, \"imag\", core.aval_property(imag))\nsetattr(ShapedArray, \"astype\", core.aval_method(_astype))\nsetattr(ShapedArray, \"view\", core.aval_method(_view))\nsetattr(ShapedArray, \"nbytes\", core.aval_property(_nbytes))\n\n\n# Forward operators, methods, and properties on DeviceArray to lax_numpy\n# functions (with no Tracers involved; this forwarding is direct)\nfor device_array in [DeviceArray]:\n for operator_name, function in _operators.items():\n setattr(device_array, \"__{}__\".format(operator_name), function)\n for method_name in _nondiff_methods + _diff_methods:\n setattr(device_array, method_name, globals()[method_name])\n setattr(device_array, \"reshape\", _reshape)\n setattr(device_array, \"transpose\", _transpose)\n setattr(device_array, \"flatten\", ravel)\n setattr(device_array, \"T\", property(transpose))\n setattr(device_array, \"real\", property(real))\n setattr(device_array, \"imag\", property(imag))\n setattr(device_array, \"astype\", _astype)\n setattr(device_array, \"view\", _view)\n setattr(device_array, \"nbytes\", property(_nbytes))\n\n\n# Experimental support for NumPy's module dispatch with NEP-37.\n# Currently requires https://github.com/seberg/numpy-dispatch\n_JAX_ARRAY_TYPES = (DeviceArray, core.Tracer)\n_HANDLED_ARRAY_TYPES = _JAX_ARRAY_TYPES + (np.ndarray,)\n\ndef __array_module__(self, types):\n if builtins.all(issubclass(t, _HANDLED_ARRAY_TYPES) for t in types):\n return jax.numpy\n else:\n return NotImplemented\n\nsetattr(ShapedArray, \"_array_module\", staticmethod(__array_module__))\nsetattr(_DeviceArray, \"__array_module__\", __array_module__)\nsetattr(_CppDeviceArray, \"__array_module__\", __array_module__)\n\n\n# Extra methods that are handy\nsetattr(ShapedArray, \"broadcast\", core.aval_method(lax.broadcast))\nsetattr(ShapedArray, \"broadcast_in_dim\", core.aval_method(lax.broadcast_in_dim))\nsetattr(ShapedArray, \"split\", core.aval_method(split))\nfor device_array in [_DeviceArray, _CppDeviceArray]:\n setattr(device_array, \"broadcast\", lax.broadcast)\n setattr(device_array, \"broadcast_in_dim\", lax.broadcast_in_dim)\n setattr(device_array, \"split\", split)\n\ndef _compress_method(a, condition, axis=None, out=None):\n return compress(condition, a, axis, out)\n\nsetattr(ShapedArray, \"compress\", _compress_method)\nsetattr(_DeviceArray, \"compress\", _compress_method)\nsetattr(_CppDeviceArray, \"compress\", _compress_method)\n\n@partial(jit, static_argnums=(1,2,3))\ndef _multi_slice(arr,\n start_indices: Tuple[Tuple[int, ...]],\n limit_indices: Tuple[Tuple[int, ...]],\n removed_dims: Tuple[Tuple[int, ...]]):\n \"\"\"Extracts multiple slices from `arr`.\n\n This is used to shard DeviceArray arguments to pmap. It's implemented as a\n DeviceArray method here to avoid circular imports.\n \"\"\"\n results = []\n for starts, limits, removed in safe_zip(start_indices, limit_indices, removed_dims):\n sliced = lax.slice(arr, starts, limits)\n if removed:\n sliced = sliced.reshape(np.delete(sliced.shape, removed_dims))\n results.append(sliced)\n return results\nsetattr(_DeviceArray, \"_multi_slice\", _multi_slice)\nsetattr(_CppDeviceArray, \"_multi_slice\", _multi_slice)\n\n\n# Syntactic sugar for scatter operations.\nclass _IndexUpdateHelper:\n # Note: this docstring will appear as the docstring for the `at` property.\n \"\"\"Indexable helper object to call indexed update functions.\n\n The ``at`` property is syntactic sugar for calling the indexed update functions\n defined in :mod:`jax.ops`, and acts as a pure equivalent of in-place\n modificatons. For further information, see `Indexed Update Operators\n <https://jax.readthedocs.io/en/latest/jax.ops.html#indexed-update-operators>`_.\n\n In particular:\n\n - ``x = x.at[idx].set(y)`` is a pure equivalent of ``x[idx] = y``.\n - ``x = x.at[idx].add(y)`` is a pure equivalent of ``x[idx] += y``.\n - ``x = x.at[idx].multiply(y)`` (aka ``mul``) is a pure equivalent of\n ``x[idx] *= y``.\n - ``x = x.at[idx].divide(y)`` is a pure equivalent of ``x[idx] /= y``.\n - ``x = x.at[idx].power(y)`` is a pure equivalent of ``x[idx] **= y``.\n - ``x = x.at[idx].min(y)`` is a pure equivalent of\n ``x[idx] = minimum(x[idx], y)``.\n - ``x = x.at[idx].max(y)`` is a pure equivalent of\n ``x[idx] = maximum(x[idx], y)``.\n \"\"\"\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n self.array = array\n\n def __getitem__(self, index):\n return _IndexUpdateRef(self.array, index)\n\n def __repr__(self):\n return f\"_IndexUpdateHelper({repr(self.array)})\"\n\n_power = power\n_divide = divide\n\nclass _IndexUpdateRef:\n \"\"\"Helper object to call indexed update functions for an (advanced) index.\n\n This object references a source array and a specific indexer into that array.\n Methods on this object return copies of the source array that have been\n modified at the positions specified by the indexer.\n \"\"\"\n __slots__ = (\"array\", \"index\")\n\n def __init__(self, array, index):\n self.array = array\n self.index = index\n\n def __repr__(self):\n return f\"_IndexUpdateRef({repr(self.array)}, {repr(self.index)})\"\n\n def set(self, values, indices_are_sorted=False, unique_indices=False):\n \"\"\"Pure equivalent of ``x[idx] = y``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>` ``x[idx] = y``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return scatter._scatter_update(self.array, self.index, values, lax.scatter,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices)\n\n def add(self, values, indices_are_sorted=False, unique_indices=False):\n \"\"\"Pure equivalent of ``x[idx] += y``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>` ``x[idx] += y``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return scatter._scatter_update(self.array, self.index, values,\n lax.scatter_add,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices)\n\n def multiply(self, values, indices_are_sorted=False, unique_indices=False):\n \"\"\"Pure equivalent of ``x[idx] *= y``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>` ``x[idx] *= y``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return scatter._scatter_update(self.array, self.index, values,\n lax.scatter_mul,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices)\n mul = multiply\n\n def divide(self, values, indices_are_sorted=False, unique_indices=False):\n \"\"\"Pure equivalent of ``x[idx] /= y``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>` ``x[idx] /= y``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return _divide(\n self.array,\n scatter._scatter_update(ones_like(self.array), self.index, values,\n lax.scatter_mul,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices))\n\n def power(self, values, indices_are_sorted=False, unique_indices=False):\n \"\"\"Pure equivalent of ``x[idx] **= y``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>` ``x[idx] **= y``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return _power(\n self.array,\n scatter._scatter_update(ones_like(self.array), self.index, values,\n lax.scatter_mul,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices))\n\n def min(self, values, indices_are_sorted=False, unique_indices=False):\n \"\"\"Pure equivalent of ``x[idx] = minimum(x[idx], y)``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>`\n ``x[idx] = minimum(x[idx], y)``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return scatter._scatter_update(self.array, self.index, values,\n lax.scatter_min,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices)\n\n def max(self, values, indices_are_sorted=False, unique_indices=False):\n \"\"\"Pure equivalent of ``x[idx] = maximum(x[idx], y)``.\n\n Returns the value of ``x`` that would result from the NumPy-style\n :mod:indexed assignment <numpy.doc.indexing>`\n ``x[idx] = maximum(x[idx], y)``.\n\n See :mod:`jax.ops` for details.\n \"\"\"\n return scatter._scatter_update(self.array, self.index, values,\n lax.scatter_max,\n indices_are_sorted=indices_are_sorted,\n unique_indices=unique_indices)\n\n\nsetattr(_DeviceArray, \"at\", property(_IndexUpdateHelper))\nsetattr(_CppDeviceArray, \"at\", property(_IndexUpdateHelper))\nsetattr(ShapedArray, \"at\", core.aval_property(_IndexUpdateHelper))\n"
] |
[
[
"numpy.multiply",
"numpy.sign",
"numpy.where",
"numpy.issubdtype",
"numpy.broadcast_to",
"numpy.dtype",
"numpy.concatenate",
"numpy.iscomplex",
"numpy.uint8",
"numpy.empty",
"numpy.log",
"numpy.arange",
"numpy.ndim",
"numpy.array",
"numpy.delete",
"numpy.zeros",
"numpy.shape",
"numpy.diff",
"numpy.isscalar",
"numpy.ceil",
"numpy.cumprod",
"numpy.asarray",
"numpy.sum",
"numpy.ones",
"numpy.any",
"numpy.ravel",
"numpy.int64"
]
] |
AlamiMejjati/controllable_image_synthesis
|
[
"06f81359d5f10854af275cd313023d9f1e0afd4c"
] |
[
"controllable_gan/datasets.py"
] |
[
"import os\nimport numpy as np\nimport torchvision\nfrom torchvision.datasets.vision import VisionDataset\nfrom PIL import Image\nimport glob\nfrom collections import Counter\n\n\nclass ObjectDataset(VisionDataset):\n \"\"\"\n Multiple data directories for varying number of objects per scene.\n Folder structure: root/split/scene/scene_idx.png and\n root/split/scene/scene_depthidx.npy and\n root/split/scene/bboxes.npz\n \"\"\"\n\n def __init__(self, data_dirs, split, transforms=None, nlabels=1):\n # Use multiple root folders\n if not isinstance(data_dirs, list):\n data_dirs = [data_dirs]\n \n # assign label for each root folder\n self.nlabels = nlabels\n if self.nlabels not in [1, 2]:\n raise NotImplementedError\n labels = [self._get_target(ddir) for ddir in data_dirs]\n data_dirs = [os.path.join(ddir, split) for ddir in data_dirs]\n \n if transforms is None:\n transforms = torchvision.transforms.ToTensor() # HxWxC -> CxHxW\n \n # initialize base class\n super(ObjectDataset, self).__init__(root=data_dirs, transform=transforms)\n \n self.filenames = []\n self.labels = []\n \n for ddir, label in zip(self.root, labels):\n if label == 1 and self.nlabels == 1: # do not add pure bg images\n continue\n \n filenames = self._get_filenames(ddir)\n self.filenames.extend(filenames)\n self.labels.extend([label] * len(filenames))\n \n labels = np.array(self.labels)\n if self.nlabels > 1 and split == 'train' and not np.any(labels == 1):\n raise ValueError('No background folder provided!')\n \n if nlabels > 1 and split == 'train': # get equally many pure bg and bg+fg images\n make_equal_label(self.filenames, self.labels)\n\n def __len__(self):\n return len(self.filenames)\n\n @staticmethod\n def _get_filenames(root_dir):\n return glob.glob(f'{root_dir}/*/*.png')\n\n @staticmethod\n def _get_num_obj(path):\n return int(os.path.basename(path).split('_')[0][-1])\n\n def _get_target(self, path):\n \"\"\"\n Args:\n path (string): path to directory\n\n Returns:\n target (int): class label\n\n \"\"\"\n num_obj = self._get_num_obj(path)\n if num_obj == 0:\n return 1 # pure bg has label 1\n return 0 # remaining images have label 0\n\n def __getitem__(self, idx):\n filename = self.filenames[idx]\n label = self.labels[idx]\n img = Image.open(filename)\n img = self.transform(img)\n return img, label\n\n\nclass StampsDataset(VisionDataset):\n \"\"\"\n Multiple data directories for varying number of objects per scene.\n Folder structure: root/split/scene/scene_idx.png and\n root/split/scene/scene_depthidx.npy and\n root/split/scene/bboxes.npz\n \"\"\"\n\n def __init__(self, data_dirs, split, impath, transforms=None, nlabels=1):\n # Use multiple root folders\n if not isinstance(data_dirs, list):\n data_dirs = [data_dirs]\n\n # self.impath = '/home/youssef/Documents/phdYoop/datasets/manuel/beauty/PNG'\n self.impath = impath\n # assign label for each root folder\n self.nlabels = nlabels\n if self.nlabels not in [1, 2]:\n raise NotImplementedError\n labels = [self._get_target(ddir) for ddir in data_dirs]\n data_dirs = [os.path.join(ddir, split) for ddir in data_dirs]\n\n if transforms is None:\n transforms = torchvision.transforms.ToTensor() # HxWxC -> CxHxW\n\n # initialize base class\n super(StampsDataset, self).__init__(root=data_dirs, transform=transforms)\n\n self.filenames = []\n self.labels = []\n\n for ddir, label in zip(self.root, labels):\n if label == 1 and self.nlabels == 1: # do not add pure bg images\n continue\n\n filenames = self._get_filenames(ddir)\n self.filenames.extend(filenames)\n self.labels.extend([label] * len(filenames))\n\n labels = np.array(self.labels)\n if self.nlabels > 1 and split == 'train' and not np.any(labels == 1):\n raise ValueError('No background folder provided!')\n\n if nlabels > 1 and split == 'train': # get equally many pure bg and bg+fg images\n make_equal_label(self.filenames, self.labels)\n\n def __len__(self):\n return len(self.filenames)\n\n @staticmethod\n def _get_filenames(root_dir):\n return glob.glob(f'{root_dir}/*.png')\n\n @staticmethod\n def _get_num_obj(path):\n return int(os.path.basename(path).split('_')[0][-1])\n\n def _get_target(self, path):\n \"\"\"\n Args:\n path (string): path to directory\n\n Returns:\n target (int): class label\n\n \"\"\"\n num_obj = self._get_num_obj(path)\n if num_obj == 0:\n return 1 # pure bg has label 1\n return 0 # remaining images have label 0\n\n def find_bbx(self, maskj):\n\n maskj = np.expand_dims(maskj, axis=-1)\n box = np.array([0, 0, 0, 0])\n\n # Compute Bbx coordinates\n\n margin = 3\n xs = np.nonzero(np.sum(maskj, axis=0))[0]\n ys = np.nonzero(np.sum(maskj, axis=1))[0]\n box[1] = xs.min() - margin\n box[3] = xs.max() + margin\n box[0] = 0\n box[2] = maskj.shape[0]\n\n if box[0] < 0: box[0] = 0\n if box[1] < 0: box[1] = 0\n\n h = box[2] - box[0]\n w = box[3] - box[1]\n if h < w:\n diff = w - h\n half = int(diff / 2)\n box[0] -= half\n if box[0] < 0:\n box[2] -= box[0]\n box[0] = 0\n else:\n box[2] += diff - half\n\n if box[2] > maskj.shape[0]:\n box[2] = maskj.shape[0]\n else:\n diff = h - w\n half = int(diff / 2)\n box[1] -= half\n if box[1] < 0:\n box[3] -= box[1]\n box[1] = 0\n else:\n box[3] += diff - half\n if box[3] > maskj.shape[1]:\n box[3] = maskj.shape[1]\n\n if box[3] == box[1]:\n box[3] += 1\n if box[0] == box[2]:\n box[2] += 1\n\n return box\n\n def __getitem__(self, idx):\n filename = self.filenames[idx]\n label = self.labels[idx]\n mask = Image.open(filename)\n mask = np.array(mask)\n if len(mask.shape)>2:\n mask = mask[:,:,-1]\n box = self.find_bbx(mask)\n \n img = Image.open(os.path.join(self.impath,\n os.path.basename(filename)[:-4]+'_1.png'))\n\n img = np.array(img)\n img = img[box[0]:box[2], box[1]:box[3],:]\n mask = mask[box[0]:box[2], box[1]:box[3]]/255.\n mask = mask[:,:,None]\n # img = img * mask + 255. * (1 - mask) # THIS IS THE ORIGINAL ONE\n img = img*mask + 0.*(1-mask)\n # img = np.stack([img]*3, axis=-1)\n img = Image.fromarray(img.astype(np.uint8))\n img = self.transform(img)\n return img, label\n# utility functions\ndef make_equal_label(filenames, labels):\n \"\"\"\n Duplicate filenames and labels s.t. they have equal numbers of labels.\n Args:\n filenames (list): filenames to duplicate\n labels (list): corresponding label to each filename\n\n \"\"\"\n filenames_array = np.array(filenames)\n labels_array = np.array(labels)\n \n counter = Counter(labels)\n max_cnt = max(counter.values())\n \n for lbl, cnt in counter.items():\n if cnt == max_cnt: continue\n diff = max_cnt - cnt\n idcs = np.where(labels_array == lbl)[0]\n \n replace = diff > len(idcs) # only draw with replacement if necessary\n idcs = np.random.choice(idcs, diff, replace=replace)\n \n filenames.extend(filenames_array[idcs])\n labels.extend(labels_array[idcs])\n"
] |
[
[
"numpy.array",
"numpy.random.choice",
"numpy.sum",
"numpy.any",
"numpy.where",
"numpy.expand_dims"
]
] |
rgalhama/wordrep_cmcl2020
|
[
"074c268314f94f9eaa02b8a4352608a520447f42"
] |
[
"src/evaluation/stats/corr_freq_aoa.py"
] |
[
"import statsmodels.api as sm\nimport statsmodels.formula.api as smf\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.stats import pearsonr\n\ndef plot_with_category(df, title):\n\n fig, ax = plt.subplots()\n sns.scatterplot(x=\"logfreq\", y=\"aoa\", hue=\"lexical_class\", data=df, s=200)\n ax.set_title(title)\n plt.savefig(\"%s_freq_aoa_category.png\"%(title))\n\ndef plot_category_aoa(df, title):\n\n fig, ax = plt.subplots()\n sns.violinplot(x=\"lexical_class\", y=\"aoa\", data=df)\n ax.set_title(title)\n plt.savefig(\"%s_aoa_category.png\"%(title))\n\ndef main(aoa_path, freq_file, plot):\n\n dffreq = pd.read_csv(freq_file, sep=\" \", names=[\"freq\", \"word\"])\n\n print(\"Correlation frequency - AoA (person r):\\n\")\n for measure in [\"understands\", \"produces\"]:\n df=None\n print(measure.upper())\n for category in [\"nouns\", \"verbs\"]:#, \"adjectives\"]:\n df_cat=pd.read_csv(aoa_path%(measure, category), sep=\";\")\n df_cat = pd.merge(df_cat, dffreq, left_on=\"uni_lemma\", right_on=\"word\", how=\"left\")\n df_cat[\"logfreq\"] = np.log(df_cat.freq)\n print(\"\\n\"+category)\n print(df_cat.corr())\n print(\" \")\n print(\"p-values\")\n print(df_cat.corr(method=lambda x, y: pearsonr(x, y)[1])- np.eye(3))\n print(\" \")\n if df is None:\n df = df_cat\n else:\n df=df.append(df_cat)\n print(\"all\")\n print(df.corr())\n print(\"\\n\\n\")\n\n if plot:\n plot_with_category(df, measure)\n plot_category_aoa(df, measure)\n\nif __name__ == \"__main__\":\n\n #Frequency file\n freq_file = \"../../../../03-data/CHILDES_CDS/childes_db_extraction/eng_0_60_cds_wordcounts_stem.txt\"\n #AoA file\n aoa_path = \"../../../../03-data/AoA/wordbank/data_for_study_2/it1/aoa_wordbank_eng_%s_prop0.5_%s_clean_means.csv\"\n\n plot=True\n main(aoa_path, freq_file, plot)"
] |
[
[
"numpy.log",
"pandas.merge",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots",
"numpy.eye",
"scipy.stats.pearsonr",
"pandas.read_csv"
]
] |
ikervazquezlopez/Pylearn2
|
[
"2971e8f64374ffde572d4cf967aad5342beaf5e0",
"2971e8f64374ffde572d4cf967aad5342beaf5e0",
"2971e8f64374ffde572d4cf967aad5342beaf5e0",
"2971e8f64374ffde572d4cf967aad5342beaf5e0"
] |
[
"pylearn2/datasets/tests/test_cifar10.py",
"pylearn2/train_extensions/window_flip.py",
"pylearn2/costs/tests/test_term_1_0_l1_penalty.py",
"pylearn2/expr/tests/test_probabilistic_max_pooling.py"
] |
[
"import unittest\nimport numpy as np\nfrom pylearn2.datasets.cifar10 import CIFAR10\nfrom pylearn2.space import Conv2DSpace\nfrom pylearn2.testing.skip import skip_if_no_data\n\n\nclass TestCIFAR10(unittest.TestCase):\n\n def setUp(self):\n skip_if_no_data()\n self.test = CIFAR10(which_set='test')\n\n def test_topo(self):\n \"\"\"Tests that a topological batch has 4 dimensions\"\"\"\n train = CIFAR10(which_set='train')\n topo = train.get_batch_topo(1)\n assert topo.ndim == 4\n\n def test_topo_c01b(self):\n \"\"\"\n Tests that a topological batch with axes ('c',0,1,'b')\n can be dimshuffled back to match the standard ('b',0,1,'c')\n format.\n \"\"\"\n batch_size = 100\n c01b_test = CIFAR10(which_set='test', axes=('c', 0, 1, 'b'))\n c01b_X = c01b_test.X[0:batch_size, :]\n c01b = c01b_test.get_topological_view(c01b_X)\n assert c01b.shape == (3, 32, 32, batch_size)\n b01c = c01b.transpose(3, 1, 2, 0)\n b01c_X = self.test.X[0:batch_size, :]\n assert c01b_X.shape == b01c_X.shape\n assert np.all(c01b_X == b01c_X)\n b01c_direct = self.test.get_topological_view(b01c_X)\n assert b01c_direct.shape == b01c.shape\n assert np.all(b01c_direct == b01c)\n\n def test_iterator(self):\n # Tests that batches returned by an iterator with topological\n # data_specs are the same as the ones returned by calling\n # get_topological_view on the dataset with the corresponding order\n batch_size = 100\n b01c_X = self.test.X[0:batch_size, :]\n b01c_topo = self.test.get_topological_view(b01c_X)\n b01c_b01c_it = self.test.iterator(\n mode='sequential',\n batch_size=batch_size,\n data_specs=(Conv2DSpace(shape=(32, 32),\n num_channels=3,\n axes=('b', 0, 1, 'c')),\n 'features'))\n b01c_b01c = b01c_b01c_it.next()\n assert np.all(b01c_topo == b01c_b01c)\n\n c01b_test = CIFAR10(which_set='test', axes=('c', 0, 1, 'b'))\n c01b_X = c01b_test.X[0:batch_size, :]\n c01b_topo = c01b_test.get_topological_view(c01b_X)\n c01b_c01b_it = c01b_test.iterator(\n mode='sequential',\n batch_size=batch_size,\n data_specs=(Conv2DSpace(shape=(32, 32),\n num_channels=3,\n axes=('c', 0, 1, 'b')),\n 'features'))\n c01b_c01b = c01b_c01b_it.next()\n assert np.all(c01b_topo == c01b_c01b)\n\n # Also check that samples from iterators with the same data_specs\n # with Conv2DSpace do not depend on the axes of the dataset\n b01c_c01b_it = self.test.iterator(\n mode='sequential',\n batch_size=batch_size,\n data_specs=(Conv2DSpace(shape=(32, 32),\n num_channels=3,\n axes=('c', 0, 1, 'b')),\n 'features'))\n b01c_c01b = b01c_c01b_it.next()\n assert np.all(b01c_c01b == c01b_c01b)\n\n c01b_b01c_it = c01b_test.iterator(\n mode='sequential',\n batch_size=batch_size,\n data_specs=(Conv2DSpace(shape=(32, 32),\n num_channels=3,\n axes=('b', 0, 1, 'c')),\n 'features'))\n c01b_b01c = c01b_b01c_it.next()\n assert np.all(c01b_b01c == b01c_b01c)\n",
"\"\"\" TrainExtensions for doing random spatial windowing and flipping of an\n image dataset on every epoch. TODO: fill out properly.\"\"\"\n\nimport warnings\nimport numpy\nfrom . import TrainExtension\nfrom pylearn2.datasets.preprocessing import CentralWindow\nfrom pylearn2.utils.exc import reraise_as\nfrom pylearn2.utils.rng import make_np_rng\nfrom pylearn2.utils import py_integer_types\n\ntry:\n from ..utils._window_flip import random_window_and_flip_c01b\n from ..utils._window_flip import random_window_and_flip_b01c\nexcept ImportError:\n reraise_as(ImportError(\"Import of Cython module failed. Please make sure \"\n \"you have run 'python setup.py develop' in the \"\n \"pylearn2 directory\"))\n\n__authors__ = \"David Warde-Farley\"\n__copyright__ = \"Copyright 2010-2012, Universite de Montreal\"\n__credits__ = [\"David Warde-Farley\"]\n__license__ = \"3-clause BSD\"\n__maintainer__ = \"David Warde-Farley\"\n__email__ = \"wardefar@iro\"\n\n\ndef _zero_pad(array, amount, axes=(1, 2)):\n \"\"\"\n Returns a copy of <array> with zero-filled padding around the margins.\n\n The new array has the same dimensions as the input array, except for\n the dimensions given by <axes>, which are increased by 2*<amount>.\n\n Parameters\n ----------\n array: numpy.ndarray\n The array to zero-pad.\n\n amount: int\n The number of zeros to append to the beginning and end of each dimension\n in <axes>. (That axis will grow by 2*<amount>).\n\n axes: tuple\n The dimensions to pad. These are indices, not axis names like the 0, 1\n in ('b', 0, 1, 'c').\n \"\"\"\n if amount == 0:\n return array\n new_shape = []\n slices = []\n for i, s in enumerate(array.shape):\n if i in axes:\n new_shape.append(s + 2 * amount)\n slices.append(slice(amount, -amount))\n else:\n new_shape.append(s)\n slices.append(slice(None))\n new_shape = tuple(new_shape)\n slices = tuple(slices)\n new_array = numpy.zeros(new_shape, dtype=array.dtype)\n new_array[slices] = array\n return new_array\n\n\nclass WindowAndFlip(TrainExtension):\n \"\"\"\n An extension that allows an image dataset to be flipped and\n windowed after each epoch of training.\n\n Parameters\n ----------\n window_shape : WRITEME\n randomize : list, optional\n If specified, a list of Datasets to randomly window and\n flip at each epoch.\n randomize_once : list, optional\n If specified, a list of Datasets to randomly window and\n flip once at the start of training.\n center : list, optional\n If specified, a list of Datasets to centrally window\n once at the start of training.\n rng : numpy.random.RandomState object or seed, optional\n A random number generator or seed used to create one.\n Seeded deterministically by default.\n pad_randomized : int, optional\n Amount of padding to add to each side of the images\n in `randomize` and `randomize_once`. Useful if you\n want to do zero-padded windowing with `window_shape`\n the actual size of the dataset, and validate/test on\n full-size images instead of central patches. Default\n is 0.\n flip : bool, optional\n Reflect images on the horizontal axis with probability\n 0.5. `True` by default.\n \"\"\"\n def __init__(self,\n window_shape,\n randomize=None,\n randomize_once=None,\n center=None,\n rng=(2013, 2, 20),\n pad_randomized=0,\n flip=True):\n self._window_shape = tuple(window_shape)\n\n # Defined in setup(). A dict that maps Datasets in self._randomize and\n # self._randomize_once to zero-padded versions of their topological\n # views.\n self._original = None\n\n self._randomize = randomize if randomize else []\n self._randomize_once = randomize_once if randomize_once else []\n self._center = center if center else []\n self._pad_randomized = pad_randomized\n self._flip = flip\n\n assert isinstance(self._randomize, list), (\n \"The 'randomize' parameter of WindowAndFlip should be a list\")\n assert isinstance(self._randomize_once, list), (\n \"The 'randomize_once' parameter of WindowAndFlip should be a list\")\n assert isinstance(self._center, list), (\n \"The 'center' parameter of WindowAndFlip should be a list\")\n assert isinstance(self._pad_randomized, py_integer_types), (\n \"The 'pad_randomized' parameter of WindowAndFlip should be an int\")\n\n if randomize is None and randomize_once is None and center is None:\n warnings.warn(self.__class__.__name__ + \" instantiated without \"\n \"any dataset arguments, and therefore does nothing\",\n stacklevel=2)\n\n self._rng = make_np_rng(rng, which_method=\"random_integers\")\n\n def setup(self, model, dataset, algorithm):\n \"\"\"\n .. todo::\n\n WRITEME\n\n Notes\n -----\n `dataset` argument is ignored\n \"\"\"\n dataset = None\n\n # Central windowing of auxiliary datasets (e.g. validation sets)\n preprocessor = CentralWindow(self._window_shape)\n for data in self._center:\n preprocessor.apply(data)\n\n #\n # Do the initial random windowing\n #\n\n randomize_now = self._randomize + self._randomize_once\n\n # maps each dataset in randomize_now to a zero-padded topological view\n # of its data.\n self._original = dict((data, _zero_pad(\n data.get_topological_view().astype('float32'),\n self._pad_randomized))\n for data in randomize_now)\n\n # For each dataset, for each image, extract a randomly positioned and\n # potentially horizontal-flipped window\n self.randomize_datasets(randomize_now)\n\n def randomize_datasets(self, datasets):\n \"\"\"\n Applies random translations and flips to the selected datasets.\n\n Parameters\n ----------\n datasets : WRITEME\n \"\"\"\n for dataset in datasets:\n if tuple(dataset.view_converter.axes) == ('c', 0, 1, 'b'):\n wf_func = random_window_and_flip_c01b\n elif tuple(dataset.view_converter.axes) == ('b', 0, 1, 'c'):\n wf_func = random_window_and_flip_b01c\n else:\n raise ValueError(\"Axes of dataset is not supported: %s\" %\n (str(dataset.view_converter.axes)))\n arr = wf_func(self._original[dataset],\n self._window_shape,\n rng=self._rng, flip=self._flip)\n dataset.set_topological_view(arr, axes=dataset.view_converter.axes)\n\n def on_monitor(self, model, dataset, algorithm):\n \"\"\"\n .. todo::\n\n WRITEME\n\n Notes\n -----\n All arguments are ignored.\n \"\"\"\n model = None\n dataset = None\n algorithm = None\n\n self.randomize_datasets(self._randomize)\n",
"\"\"\"\nTest term_1_0_l1_penalty\n\"\"\"\nimport numpy as np\nfrom pylearn2.datasets.dense_design_matrix import DenseDesignMatrix\nfrom pylearn2.models.mlp import MLP, Sigmoid\nfrom pylearn2.train import Train\nfrom pylearn2.training_algorithms.sgd import SGD, ExponentialDecay\nfrom pylearn2.termination_criteria import And, EpochCounter, MonitorBased\nfrom pylearn2.costs.cost import SumOfCosts\nfrom pylearn2.costs.mlp import Default, L1WeightDecay\n\n\ndef create_dataset():\n \"\"\"\n Create a fake dataset to initiate the training\n \"\"\"\n x = np.array([[0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0],\n [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 11, 12, 13, 14, 15,\n 16, 17, 18, 19, 20],\n [0.3, 0.1, 0.8, 0.1, 0.2, 0.6, 0.83, 0.45, 0.0, 0.67, 0.3,\n 0.74, 0.8, 0.1, 0.2, 0.46, 0.83, 0.45, 0.0, 0.67]])\n\n y = np.array([0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0,\n 1, 0, 1, 0, 0, 0, 0, 0]).reshape(20, 1)\n\n x_train = x[:7, :]\n x_valid = x[7:, :]\n y_train = y[:7]\n y_valid = y[7:]\n\n return x_train, y_train, x_valid, y_valid\n\n\ndef test_correctness():\n \"\"\"\n Test that the cost function works with float64\n \"\"\"\n x_train, y_train, x_valid, y_valid = create_dataset()\n\n trainset = DenseDesignMatrix(X=np.array(x_train), y=y_train)\n validset = DenseDesignMatrix(X=np.array(x_valid), y=y_valid)\n\n n_inputs = trainset.X.shape[1]\n n_outputs = 1\n n_hidden = 10\n\n hidden_istdev = 4 * (6 / float(n_inputs + n_hidden)) ** 0.5\n output_istdev = 4 * (6 / float(n_hidden + n_outputs)) ** 0.5\n\n model = MLP(layers=[Sigmoid(dim=n_hidden, layer_name='hidden',\n istdev=hidden_istdev),\n Sigmoid(dim=n_outputs, layer_name='output',\n istdev=output_istdev)],\n nvis=n_inputs, seed=[2013, 9, 16])\n\n termination_criterion = And([EpochCounter(max_epochs=1),\n MonitorBased(prop_decrease=1e-7,\n N=2)])\n\n cost = SumOfCosts([(0.99, Default()),\n (0.01, L1WeightDecay({}))])\n\n algo = SGD(1e-1,\n update_callbacks=[ExponentialDecay(decay_factor=1.00001,\n min_lr=1e-10)],\n cost=cost,\n monitoring_dataset=validset,\n termination_criterion=termination_criterion,\n monitor_iteration_mode='even_shuffled_sequential',\n batch_size=2)\n\n train = Train(model=model, dataset=trainset, algorithm=algo)\n train.main_loop()\n\n\nif __name__ == '__main__':\n test_correctness()\n",
"from __future__ import print_function\n\nimport numpy as np\nimport warnings\n\nfrom theano.compat.six.moves import xrange\nfrom theano import config\nfrom theano import function\nimport theano.tensor as T\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams\n\nfrom pylearn2.expr.probabilistic_max_pooling import max_pool_python\nfrom pylearn2.expr.probabilistic_max_pooling import max_pool_channels_python\nfrom pylearn2.expr.probabilistic_max_pooling import max_pool\nfrom pylearn2.expr.probabilistic_max_pooling import max_pool_channels\nfrom pylearn2.expr.probabilistic_max_pooling import max_pool_b01c\nfrom pylearn2.expr.probabilistic_max_pooling import max_pool_c01b\nfrom pylearn2.expr.probabilistic_max_pooling import max_pool_unstable\nfrom pylearn2.expr.probabilistic_max_pooling import max_pool_softmax_op\nfrom pylearn2.expr.probabilistic_max_pooling import \\\n max_pool_softmax_with_bias_op\nfrom pylearn2.testing import no_debug_mode\n\n\ndef check_correctness_channelwise(f):\n \"\"\"\n Tests that the theano expression emitted by f computes the same values\n as the ground truth python function\n Note: to keep the python version as dead simple as possible (i.e., to make\n sure there are not bugs in the ground truth) it uses the numerically\n unstable verison of softmax. So this test does not work with too big of\n numbers.\n \"\"\"\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n pool_size = 4\n n = 3 * pool_size\n zv = rng.randn(batch_size, n).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)\n\n p_np, h_np = max_pool_channels_python(zv, pool_size, top_down_v)\n\n z_th = T.matrix()\n z_th.name = 'z_th'\n\n top_down_th = T.matrix()\n top_down_th.name = 'top_down_th'\n\n p_th, h_th = f(z_th, pool_size, top_down_th)\n\n func = function([z_th, top_down_th], [p_th, h_th])\n\n pv, hv = func(zv, top_down_v)\n\n assert p_np.shape == pv.shape\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False\n\n\ndef check_correctness_sigmoid_channelwise(f):\n \"\"\"\n Tests that f is equivalent to the sigmoid function when the pool size is 1\n \"\"\"\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n pool_size = 1\n n = 3 * pool_size\n zv = rng.randn(batch_size, n).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)\n\n z_th = T.matrix()\n z_th.name = 'z_th'\n\n top_down_th = T.matrix()\n top_down_th.name = 'top_down_th'\n\n p_th, h_th = f(z_th, pool_size, top_down_th)\n h_s = T.nnet.sigmoid(z_th + top_down_th)\n\n func = function([z_th, top_down_th], [p_th, h_th, h_s])\n\n pv, hv, h_s = func(zv, top_down_v)\n p_s = h_s\n\n assert p_s.shape == pv.shape\n assert h_s.shape == hv.shape\n if not np.allclose(h_s, hv):\n print((h_s.min(), h_s.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_s, pv):\n diff = abs(p_s - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False\n\n\ndef check_correctness(f):\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n zv = rng.randn(batch_size, rows, cols,\n channels).astype(config.floatX) * 2. - 3.\n\n p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols))\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype=config.floatX)()\n z_th.name = 'z_th'\n\n p_th, h_th = f(z_th, (pool_rows, pool_cols))\n\n func = function([z_th], [p_th, h_th])\n\n pv, hv = func(zv)\n\n assert p_np.shape == pv.shape\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n assert np.allclose(p_np, pv)\n\n\ndef check_correctness_bc01(f):\n \"\"\"\n Tests that the theano expression emitted by f computes the same values\n as the ground truth python function\n Note: to keep the python version as dead simple as possible (i.e., to make\n sure there are not bugs in the ground truth) it uses the numerically\n unstable verison of softmax. So this test does not work with too big of\n numbers.\n \"\"\"\n\n rng = np.random.RandomState([2012, 7, 19])\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n zv = rng.randn(batch_size, rows, cols,\n channels).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,\n channels).astype(config.floatX)\n\n p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.name = 'z_th'\n zr = z_th.dimshuffle(0, 3, 1, 2)\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.name = 'top_down_th'\n top_down_r = top_down_th.dimshuffle(0, 3, 1, 2)\n\n p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)\n\n func = function([z_th, top_down_th], [p_th.dimshuffle(0, 2, 3, 1),\n h_th.dimshuffle(0, 2, 3, 1)])\n\n pv, hv = func(zv, top_down_v)\n\n assert p_np.shape == pv.shape\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False\n\n\ndef check_correctness_c01b(f):\n \"\"\"\n Tests that the theano expression emitted by f computes the same values\n as the ground truth python function\n Note: to keep the python version as dead simple as possible (i.e., to make\n sure there are not bugs in the ground truth) it uses the numerically\n unstable version of softmax. So this test does not work with too big of\n numbers.\n \"\"\"\n\n rng = np.random.RandomState([2013, 5, 6])\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n\n # Do the python ground truth in b01c format\n zv = rng.randn(batch_size, rows, cols,\n channels).astype(config.floatX) * 1. - 1.5\n top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,\n channels).astype(config.floatX)\n\n p_np, h_np = max_pool_python(zv, (pool_rows, pool_cols), top_down_v)\n\n # Dimshuffle the inputs into c01b for the theano implementation\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.tag.test_value = zv\n z_th.name = 'z_th'\n zr = z_th.dimshuffle(3, 1, 2, 0)\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.name = 'top_down_th'\n top_down_th.tag.test_value = top_down_v\n top_down_r = top_down_th.dimshuffle(3, 1, 2, 0)\n\n p_th, h_th = f(zr, (pool_rows, pool_cols), top_down_r)\n\n func = function([z_th, top_down_th], [p_th.dimshuffle(3, 1, 2, 0),\n h_th.dimshuffle(3, 1, 2, 0)])\n\n pv, hv = func(zv, top_down_v)\n\n if not p_np.shape == pv.shape:\n raise AssertionError(str((p_np.shape, pv.shape)))\n assert h_np.shape == hv.shape\n if not np.allclose(h_np, hv):\n print((h_np.min(), h_np.max()))\n print((hv.min(), hv.max()))\n assert False\n if not np.allclose(p_np, pv):\n diff = abs(p_np - pv)\n print('max diff ', diff.max())\n print('min diff ', diff.min())\n print('ave diff ', diff.mean())\n assert False\n warnings.warn(\"TODO: make sampling tests run on c01b format of pooling.\")\n\n\n@no_debug_mode\ndef check_sample_correctishness_b01c(f):\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n rng = np.random.RandomState([2012, 9, 26])\n zv = rng.randn(batch_size, rows, cols,\n channels).astype(config.floatX) * 2. - 3.\n top_down_v = rng.randn(batch_size, rows / pool_rows, cols / pool_cols,\n channels).astype(config.floatX)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.name = 'z_th'\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.name = 'top_down_th'\n\n theano_rng = MRG_RandomStreams(rng.randint(2147462579))\n p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,\n theano_rng)\n\n prob_func = function([z_th, top_down_th], [p_th, h_th])\n pv, hv = prob_func(zv, top_down_v)\n\n sample_func = function([z_th, top_down_th], [p_sth, h_sth])\n\n acc_p = 0. * pv\n acc_h = 0. * hv\n\n # make sure the test gets good coverage, ie, that it includes many\n # different activation probs for both detector and pooling layer\n buckets = 10\n bucket_width = 1. / float(buckets)\n for i in xrange(buckets):\n lower_lim = i * bucket_width\n upper_lim = (i+1) * bucket_width\n\n assert np.any((pv >= lower_lim) * (pv < upper_lim))\n assert np.any((hv >= lower_lim) * (hv < upper_lim))\n\n assert upper_lim == 1.\n\n for i in xrange(10000):\n ps, hs = sample_func(zv, top_down_v)\n\n assert ps.shape == pv.shape\n assert hs.shape == hv.shape\n\n acc_p += ps\n acc_h += hs\n\n est_p = acc_p / float(i+1)\n est_h = acc_h / float(i+1)\n\n pd = np.abs(est_p-pv)\n hd = np.abs(est_h-hv)\n\n \"\"\"\n # plot maps of the estimation error, this is to see if it has\n # some spatial pattern this is useful for detecting bugs like\n # not handling the border correctly, etc.\n from pylearn2.gui.patch_viewer import PatchViewer\n\n pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),\n is_color = False)\n for i in xrange(pd.shape[0]):\n for j in xrange(pd.shape[3]):\n pv.add_patch((pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n\n pv = PatchViewer((hd.shape[0],hd.shape[3]),(hd.shape[1],hd.shape[2]),\n is_color = False)\n for i in xrange(hd.shape[0]):\n for j in xrange(hd.shape[3]):\n pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n \"\"\"\n\n \"\"\"\n plot expectation to estimate versus error in estimation\n expect bigger errors for values closer to 0.5\n\n from matplotlib import pyplot as plt\n\n #nelem = reduce( lambda x, y : x*y, pd.shape)\n #plt.scatter( pv.reshape(nelem), pd.reshape(nelem))\n #plt.show()\n\n nelem = reduce( lambda x, y : x*y, hd.shape)\n plt.scatter( hv.reshape(nelem), hd.reshape(nelem))\n plt.show()\n \"\"\"\n\n # don't really know how tight this should be\n # but you can try to pose an equivalent problem\n # and implement it in another way\n # using a numpy implementation in softmax_acc.py\n # I got a max error of .17\n assert max(pd.max(), hd.max()) < .17\n\n # Do exhaustive checks on just the last sample\n assert np.all((ps == 0) + (ps == 1))\n assert np.all((hs == 0) + (hs == 1))\n\n for k in xrange(batch_size):\n for i in xrange(ps.shape[1]):\n for j in xrange(ps.shape[2]):\n for l in xrange(channels):\n p = ps[k, i, j, l]\n h = hs[k, i*pool_rows:(i+1)*pool_rows,\n j*pool_cols:(j+1)*pool_cols, l]\n assert h.shape == (pool_rows, pool_cols)\n assert p == h.max()\n\n \"\"\" If you made it to here, it's correctish\n (cant tell if samples are perfectly \"correct\") \"\"\"\n\n\n@no_debug_mode\ndef check_sample_correctishness_c01b(f):\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n rng = np.random.RandomState([2012, 9, 26])\n zv = rng.randn(channels, rows, cols,\n batch_size).astype(config.floatX) * 2. - 3.\n top_down_v = rng.randn(channels, rows / pool_rows, cols / pool_cols,\n batch_size).astype(config.floatX)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.name = 'z_th'\n z_th.tag.test_value = zv\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.name = 'top_down_th'\n top_down_th.tag.test_value = top_down_v\n\n theano_rng = MRG_RandomStreams(rng.randint(2147462579))\n p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,\n theano_rng)\n\n prob_func = function([z_th, top_down_th], [p_th, h_th])\n pv, hv = prob_func(zv, top_down_v)\n\n sample_func = function([z_th, top_down_th], [p_sth, h_sth])\n\n acc_p = 0. * pv\n acc_h = 0. * hv\n\n # make sure the test gets good coverage, ie, that it includes\n # many different activation probs for both detector and pooling layer\n buckets = 10\n bucket_width = 1. / float(buckets)\n for i in xrange(buckets):\n lower_lim = i * bucket_width\n upper_lim = (i+1) * bucket_width\n\n assert np.any((pv >= lower_lim) * (pv < upper_lim))\n assert np.any((hv >= lower_lim) * (hv < upper_lim))\n\n assert upper_lim == 1.\n\n for i in xrange(10000):\n ps, hs = sample_func(zv, top_down_v)\n\n assert ps.shape == pv.shape\n assert hs.shape == hv.shape\n\n acc_p += ps\n acc_h += hs\n\n est_p = acc_p / float(i+1)\n est_h = acc_h / float(i+1)\n\n pd = np.abs(est_p-pv)\n hd = np.abs(est_h-hv)\n\n # don't really know how tight this should be\n # but you can try to pose an equivalent problem\n # and implement it in another way\n # using a numpy implementation in softmax_acc.py\n # I got a max error of .17\n assert max(pd.max(), hd.max()) < .17\n\n # Do exhaustive checks on just the last sample\n assert np.all((ps == 0) + (ps == 1))\n assert np.all((hs == 0) + (hs == 1))\n\n for k in xrange(batch_size):\n for i in xrange(ps.shape[1]):\n for j in xrange(ps.shape[2]):\n for l in xrange(channels):\n p = ps[l, i, j, k]\n h = hs[l, i*pool_rows:(i+1)*pool_rows,\n j*pool_cols:(j+1)*pool_cols, k]\n assert h.shape == (pool_rows, pool_cols)\n assert p == h.max()\n\n \"\"\" If you made it to here, it's correctish\n (cant tell if samples are perfectly \"correct\") \"\"\"\n\n\n@no_debug_mode\ndef check_sample_correctishness_bc01(f):\n \"\"\"\n Tests that the sample mean converges to the conditional\n expectation given by the function\n Tests that p really is the max of the samples\n Tests that at most one h in a group is on\n \"\"\"\n\n batch_size = 5\n rows = 32\n cols = 30\n channels = 3\n pool_rows = 2\n pool_cols = 3\n\n rng = np.random.RandomState([2012, 9, 26])\n zv = rng.randn(batch_size, channels, rows,\n cols).astype(config.floatX) * 2. - 3.\n top_down_v = rng.randn(batch_size, channels, rows / pool_rows,\n cols / pool_cols).astype(config.floatX)\n\n z_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n z_th.tag.test_value = zv\n z_th.name = 'z_th'\n\n top_down_th = T.TensorType(broadcastable=(False, False, False, False),\n dtype = config.floatX)()\n top_down_th.tag.test_value = top_down_v\n top_down_th.name = 'top_down_th'\n\n theano_rng = MRG_RandomStreams(rng.randint(2147462579))\n p_th, h_th, p_sth, h_sth = f(z_th, (pool_rows, pool_cols), top_down_th,\n theano_rng)\n\n prob_func = function([z_th, top_down_th], [p_th, h_th])\n pv, hv = prob_func(zv, top_down_v)\n\n sample_func = function([z_th, top_down_th], [p_sth, h_sth])\n\n acc_p = 0. * pv\n acc_h = 0. * hv\n\n # make sure the test gets good coverage, ie, that it includes many\n # different activation probs for both detector and pooling layer\n buckets = 10\n bucket_width = 1. / float(buckets)\n for i in xrange(buckets):\n lower_lim = i * bucket_width\n upper_lim = (i+1) * bucket_width\n\n assert np.any((pv >= lower_lim) * (pv < upper_lim))\n assert np.any((hv >= lower_lim) * (hv < upper_lim))\n\n assert upper_lim == 1.\n\n for i in xrange(10000):\n ps, hs = sample_func(zv, top_down_v)\n\n assert ps.shape == pv.shape\n assert hs.shape == hv.shape\n\n acc_p += ps\n acc_h += hs\n\n est_p = acc_p / float(i+1)\n est_h = acc_h / float(i+1)\n\n pd = np.abs(est_p-pv)\n hd = np.abs(est_h-hv)\n\n \"\"\"\n # plot maps of the estimation error, this is to see if it has some\n # spatial pattern this is useful for detecting bugs like not handling\n # the border correctly, etc.\n from pylearn2.gui.patch_viewer import PatchViewer\n\n pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),\n is_color = False)\n for i in xrange(pd.shape[0]):\n for j in xrange(pd.shape[3]):\n pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n\n pv = PatchViewer((hd.shape[0],hd.shape[3]), (hd.shape[1],hd.shape[2]),\n is_color = False)\n for i in xrange(hd.shape[0]):\n for j in xrange(hd.shape[3]):\n pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n \"\"\"\n\n \"\"\"\n plot expectation to estimate versus error in estimation\n expect bigger errors for values closer to 0.5\n\n from matplotlib import pyplot as plt\n\n #nelem = reduce( lambda x, y : x*y, pd.shape)\n #plt.scatter( pv.reshape(nelem), pd.reshape(nelem))\n #plt.show()\n\n nelem = reduce( lambda x, y : x*y, hd.shape)\n plt.scatter( hv.reshape(nelem), hd.reshape(nelem))\n plt.show()\n \"\"\"\n\n # don't really know how tight this should be\n # but you can try to pose an equivalent problem\n # and implement it in another way\n # using a numpy implementation in softmax_acc.py\n # I got a max error of .17\n assert max(pd.max(), hd.max()) < .17\n\n # Do exhaustive checks on just the last sample\n assert np.all((ps == 0) + (ps == 1))\n assert np.all((hs == 0) + (hs == 1))\n\n for k in xrange(batch_size):\n for i in xrange(ps.shape[2]):\n for j in xrange(ps.shape[3]):\n for l in xrange(channels):\n p = ps[k, l, i, j]\n h = hs[k, l, i*pool_rows:(i+1)*pool_rows,\n j*pool_cols:(j+1)*pool_cols]\n assert h.shape == (pool_rows, pool_cols)\n assert p == h.max()\n assert h.sum() <= 1\n\n \"\"\" If you made it to here, it's correctish\n (cant tell if samples are perfectly \"correct\") \"\"\"\n\n\n@no_debug_mode\ndef check_sample_correctishness_channelwise(f):\n \"\"\"\n Tests that the sample mean converges to the conditional expectation given\n by the function Tests that p really is the max of the samples tests that\n at most one h in a group is on\n \"\"\"\n\n batch_size = 27\n pool_size = 4\n n = pool_size * 21\n\n rng = np.random.RandomState([2012, 9, 26])\n zv = rng.randn(batch_size, n).astype(config.floatX) * 3.5 - 5.\n top_down_v = rng.randn(batch_size, n / pool_size).astype(config.floatX)\n\n z_th = T.matrix()\n z_th.tag.test_value = zv\n z_th.name = 'z_th'\n\n top_down_th = T.matrix()\n top_down_th.tag.test_value = top_down_v\n top_down_th.name = 'top_down_th'\n\n theano_rng = MRG_RandomStreams(rng.randint(2147462579))\n p_th, h_th, p_sth, h_sth = f(z_th, pool_size, top_down_th, theano_rng)\n\n prob_func = function([z_th, top_down_th], [p_th, h_th])\n pv, hv = prob_func(zv, top_down_v)\n\n sample_func = function([z_th, top_down_th], [p_sth, h_sth])\n\n acc_p = 0. * pv\n acc_h = 0. * hv\n\n # make sure the test gets good coverage, ie, that it includes\n # many different activation probs for both detector and pooling layer\n buckets = 10\n bucket_width = 1. / float(buckets)\n print(pv.min(), pv.max())\n print(hv.min(), hv.max())\n for i in xrange(buckets):\n lower_lim = i * bucket_width\n upper_lim = (i+1) * bucket_width\n print(lower_lim, upper_lim)\n\n assert np.any((pv >= lower_lim) * (pv < upper_lim))\n assert np.any((hv >= lower_lim) * (hv < upper_lim))\n\n assert upper_lim == 1.\n\n for i in xrange(10000):\n ps, hs = sample_func(zv, top_down_v)\n\n assert ps.shape == pv.shape\n assert hs.shape == hv.shape\n\n acc_p += ps\n acc_h += hs\n\n est_p = acc_p / float(i+1)\n est_h = acc_h / float(i+1)\n\n pd = np.abs(est_p-pv)\n hd = np.abs(est_h-hv)\n\n \"\"\"\n # plot maps of the estimation error, this is to see if it has some\n # spatial pattern this is useful for detecting bugs like not handling\n # the border correctly, etc.\n # from pylearn2.gui.patch_viewer import PatchViewer\n\n pv = PatchViewer((pd.shape[0],pd.shape[3]),(pd.shape[1],pd.shape[2]),\n is_color = False)\n for i in xrange(pd.shape[0]):\n for j in xrange(pd.shape[3]):\n pv.add_patch( (pd[i,:,:,j] / pd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n\n pv = PatchViewer((hd.shape[0],hd.shape[3]),(hd.shape[1],hd.shape[2]),\n is_color = False)\n for i in xrange(hd.shape[0]):\n for j in xrange(hd.shape[3]):\n pv.add_patch( (hd[i,:,:,j] / hd.max() )* 2.0 - 1.0, rescale = False)\n pv.show()\n \"\"\"\n\n \"\"\"\n plot expectation to estimate versus error in estimation\n expect bigger errors for values closer to 0.5\n\n from matplotlib import pyplot as plt\n\n #nelem = reduce( lambda x, y : x*y, pd.shape)\n #plt.scatter( pv.reshape(nelem), pd.reshape(nelem))\n #plt.show()\n\n nelem = reduce( lambda x, y : x*y, hd.shape)\n plt.scatter( hv.reshape(nelem), hd.reshape(nelem))\n plt.show()\n \"\"\"\n\n # don't really know how tight this should be\n # but you can try to pose an equivalent problem\n # and implement it in another way\n # using a numpy implementation in softmax_acc.py\n # I got a max error of .17\n assert max(pd.max(), hd.max()) < .17\n\n # Do exhaustive checks on just the last sample\n assert np.all((ps == 0) + (ps == 1))\n assert np.all((hs == 0) + (hs == 1))\n\n for k in xrange(batch_size):\n for i in xrange(ps.shape[1]):\n p = ps[k, i]\n h = hs[k, i*pool_size:(i+1)*pool_size]\n assert h.shape == (pool_size,)\n assert p == h.max()\n assert h.sum() <= 1\n\n \"\"\" If you made it to here, it's correctish\n (cant tell if samples are perfectly \"correct\") \"\"\"\n\n\ndef test_max_pool_channels():\n check_correctness_channelwise(max_pool_channels)\n\n\ndef test_max_pool_channels_sigmoid():\n check_correctness_sigmoid_channelwise(max_pool_channels)\n\n\ndef test_max_pool_channels_samples():\n check_sample_correctishness_channelwise(max_pool_channels)\n\n\ndef test_max_pool():\n check_correctness_bc01(max_pool)\n\n\ndef test_max_pool_c01b():\n check_correctness_c01b(max_pool_c01b)\n\n\ndef test_max_pool_samples():\n check_sample_correctishness_bc01(max_pool)\n\n\ndef test_max_pool_b01c_samples():\n check_sample_correctishness_b01c(max_pool_b01c)\n\n\ndef test_max_pool_c01b_samples():\n check_sample_correctishness_c01b(max_pool_c01b)\n\n\ndef test_max_pool_b01c():\n check_correctness(max_pool_b01c)\n\n\ndef test_max_pool_unstable():\n check_correctness(max_pool_unstable)\n\n\ndef test_max_pool_softmax_op():\n check_correctness(max_pool_softmax_op)\n\n\ndef test_max_pool_softmax_with_bias_op():\n check_correctness(max_pool_softmax_with_bias_op)\n"
] |
[
[
"numpy.all"
],
[
"numpy.zeros"
],
[
"numpy.array"
],
[
"numpy.random.RandomState",
"numpy.allclose",
"numpy.any",
"numpy.abs",
"numpy.all"
]
] |
Chandy002/PySyncObj
|
[
"aeda3d264aef80e1310aacf770c696e53545b4bd"
] |
[
"cs598_benchmarks/plot_cdf_latencies.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport os\nimport json\n\ndef plot_cdf(directory):\n colors = ['#ffc406', 'green', 'blue', 'red']\n i = 0\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n filepath = os.path.join(directory, filename)\n with open(filepath, 'r') as f:\n lines = f.readlines()\n latencies = []\n for line in lines:\n toks = line.strip().split(':')\n if toks[0] == 'DELAYS':\n latencies += json.loads(toks[1].strip())\n\n data = np.sort(np.array(latencies))\n bins=np.append(data, data[-1]+1)\n counts, bin_edges = np.histogram(latencies, bins=bins, density=False)\n counts=counts.astype(float)/len(latencies)\n cdf = np.cumsum(counts)\n plt.plot(bin_edges[0:-1], cdf, linestyle='--', color=colors[i], label=filename.split('.')[0])\n plt.ylim((0,1))\n i += 1\n\n plt.legend()\n plt.ylabel('CDF')\n plt.xlabel('Latency (s)')\n plt.title('Follower Latency Vs. CDF') \n\n plot_margin = 0.1\n x0, x1, y0, y1 = plt.axis()\n plt.axis((x0 ,\n x1,\n y0 - plot_margin,\n y1 + plot_margin))\n\n plt.savefig(sys.argv[2] + '.png')\n\nif __name__ == '__main__':\n dir = sys.argv[1]\n plot_cdf(dir)"
] |
[
[
"numpy.histogram",
"numpy.array",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.ylabel",
"numpy.append",
"numpy.cumsum",
"matplotlib.pyplot.axis"
]
] |
Linzee/datacatalog-connectors-rdbms
|
[
"f66072732d3eaec8b982a4e389ba29947c5b6c3d"
] |
[
"google-datacatalog-rdbms-connector/src/google/datacatalog_connectors/rdbms/scrape/metadata_scraper.py"
] |
[
"#!/usr/bin/python\n#\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nfrom .metadata_normalizer import MetadataNormalizer\nimport pandas as pd\n\n\nclass MetadataScraper:\n\n def __init__(self):\n pass\n\n def get_metadata(self,\n metadata_definition,\n connection_args=None,\n query=None,\n csv_path=None):\n dataframe = self._get_metadata_as_dataframe(connection_args, query,\n csv_path)\n\n return MetadataNormalizer.to_metadata_dict(dataframe,\n metadata_definition)\n\n def _get_metadata_as_dataframe(self,\n connection_args=None,\n query=None,\n csv_path=None):\n if csv_path:\n logging.info('Scrapping metadata from csv path: \"%s\"', csv_path)\n dataframe = self._get_metadata_from_csv(csv_path)\n elif connection_args and len(connection_args.keys()) > 0:\n logging.info('Scrapping metadata from connection_args')\n dataframe = self._get_metadata_from_rdbms_connection(\n connection_args, query)\n else:\n raise Exception('Must supply either connection_args or csv_path')\n\n return dataframe\n\n def _get_metadata_from_rdbms_connection(self, connection_args, query):\n con = None\n try:\n con = self._create_rdbms_connection(connection_args)\n cur = con.cursor()\n cur.execute(query)\n rows = cur.fetchall()\n dataframe = self._create_dataframe(rows)\n\n if len(rows) == 0:\n raise Exception('RDBMS is empty, no metadata to extract.')\n\n dataframe.columns = [item[0].lower() for item in cur.description]\n return dataframe\n except: # noqa:E722\n logging.error(\n 'Error connecting to the database to extract metadata.')\n raise\n finally:\n if con:\n con.close()\n\n def _create_dataframe(self, rows):\n return pd.DataFrame(rows)\n\n # To connect to the RDBMS, it's required to override this method.\n # If you are ingesting from a CSV file, this method is not used.\n def _create_rdbms_connection(self, connection_args):\n raise NotImplementedError(\n 'Implementing this method is required to connect to a RDBMS!')\n\n @classmethod\n def _get_metadata_from_csv(cls, csv_path):\n return pd.read_csv(csv_path)\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv"
]
] |
Mathiasn21/household_object_detection
|
[
"4c8a22070fae5e516bce4b704102ae42ae40c3b1"
] |
[
"code/resize_images.py"
] |
[
"import os\n\nimport cv2\n\nfrom tools.tools import load_json_data, load_image, save_json_data, load_config_file\nimport numpy as np\n\n\ndef calc_new_dimensions(max_size: int, width, height):\n \"\"\"\n Calculate new minimum dimensions and corresponding scalar\n :param max_size: int\n :param width: int\n :param height: int\n :return: tuple - new dimensions and minimum scalar\n \"\"\"\n width_scalar = max_size / width\n height_scalar = max_size / height\n\n best_fit_scalar = min(width_scalar, height_scalar)\n dimensions = (int(width * best_fit_scalar), int(height * best_fit_scalar))\n return dimensions, best_fit_scalar\n\n\ndef resize_segmentations(image_id, annotations_by_img_id, scalar):\n \"\"\"\n Resize the segmentations such that they match the new image dimensions\n :param image_id: int\n :param annotations_by_img_id: dict - annotations corersponding to image ids\n :param scalar: float - scalar that will be used to alter the segmentations\n \"\"\"\n for segmentations in annotations_by_img_id[image_id]:\n for index, segmentation in enumerate(segmentations):\n segmentations[index] = (np.array(segmentation) * scalar).tolist()\n\n\ndef resize_annotated_imgs(config):\n \"\"\"\n resize the annotated images and teh corresponding annotations.\n :param config: dict - script config\n \"\"\"\n\n # Extract information from config file\n annotations_out = config['annotations_out']\n ann_path = config['ann_path']\n images_path = config['images_path']\n max_size = config['original_max_size']\n\n # Load annotations\n coco_annotations = load_json_data(ann_path)\n annotations = coco_annotations['annotations']\n images_information = coco_annotations['images']\n\n # Sort image information\n images_by_name = dict((image_dict['file_name'], image_dict) for image_dict in images_information)\n\n # Sort annotations by image id\n annotations_by_img_id = {}\n for annotation_dict in annotations:\n key = annotation_dict['image_id']\n\n if key not in annotations_by_img_id:\n annotations_by_img_id[key] = []\n annotations_by_img_id[key].append(annotation_dict['segmentation'])\n\n # Iterate over all images and resize on demand. Also resizes corresponding annotations\n for file_name in os.listdir(images_path):\n full_path = images_path + file_name\n image = load_image(full_path)\n height, width = image.shape[:2]\n\n if width > max_size or height > max_size:\n dimensions, best_fit_scalar = calc_new_dimensions(max_size, width, height)\n\n if file_name in images_by_name:\n # Correct annotations as well\n image_information = images_by_name.get(file_name)\n image_id = image_information['id']\n image_information['width'] = dimensions[1]\n image_information['height'] = dimensions[0]\n\n if image_id in annotations_by_img_id:\n resize_segmentations(image_id, annotations_by_img_id, best_fit_scalar)\n\n save_json_data(coco_annotations, annotations_out)\n cv2.imwrite(full_path, cv2.resize(image, dimensions))\n\n\ndef resize_bg_imgs(images_path, max_size):\n \"\"\"\n Resize the background images\n :param images_path: str: directory path for the background images\n :param max_size: int - max dimension size\n \"\"\"\n\n # Iterate over the images and resize on demand\n for file_name in os.listdir(images_path):\n full_path = images_path + file_name\n image = load_image(full_path)\n height, width = image.shape[:2]\n\n if width > max_size or height > max_size:\n dimensions, best_fit_scalar = calc_new_dimensions(max_size, width, height)\n cv2.imwrite(full_path, cv2.resize(image, dimensions))\n\n\nif __name__ == '__main__':\n config_path = '../configs/resize_config.yaml'\n config = load_config_file(config_path)\n\n resize_bg_imgs(config['background_images_path'], config['background_max_size'])\n # resize_annotated_imgs(config)\n"
] |
[
[
"numpy.array"
]
] |
rackerlabs/mlflow
|
[
"8607e630b77e139756923c7bf513ea6a300acd10"
] |
[
"mlflow/pyfunc/scoring_server/__init__.py"
] |
[
"\"\"\"\nScoring server for python model format.\nThe passed int model is expected to have function:\n predict(pandas.Dataframe) -> pandas.DataFrame\n\nInput, expected intext/csv or application/json format,\nis parsed into pandas.DataFrame and passed to the model.\n\nDefines two endpoints:\n /ping used for health check\n /invocations used for scoring\n\"\"\"\nfrom __future__ import print_function\n\nfrom collections import OrderedDict\nimport flask\nimport json\nfrom json import JSONEncoder\nimport logging\nimport numpy as np\nimport pandas as pd\nfrom six import reraise\nimport sys\nimport traceback\n\n# NB: We need to be careful what we import form mlflow here. Scoring server is used from within\n# model's conda environment. The version of mlflow doing the serving (outside) and the version of\n# mlflow in the model's conda environment (inside) can differ. We should therefore keep mlflow\n# dependencies to the minimum here.\n# ALl of the mlfow dependencies below need to be backwards compatible.\nfrom mlflow.exceptions import MlflowException\n\ntry:\n from mlflow.pyfunc import load_model\nexcept ImportError:\n from mlflow.pyfunc import load_pyfunc as load_model\nfrom mlflow.protos.databricks_pb2 import MALFORMED_REQUEST, BAD_REQUEST\nfrom mlflow.server.handlers import catch_mlflow_exception\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\n_SERVER_MODEL_PATH = \"__pyfunc_model_path__\"\n\nCONTENT_TYPE_CSV = \"text/csv\"\nCONTENT_TYPE_JSON = \"application/json\"\nCONTENT_TYPE_JSON_RECORDS_ORIENTED = \"application/json; format=pandas-records\"\nCONTENT_TYPE_JSON_SPLIT_ORIENTED = \"application/json; format=pandas-split\"\nCONTENT_TYPE_JSON_SPLIT_NUMPY = \"application/json-numpy-split\"\n\nCONTENT_TYPES = [\n CONTENT_TYPE_CSV,\n CONTENT_TYPE_JSON,\n CONTENT_TYPE_JSON_RECORDS_ORIENTED,\n CONTENT_TYPE_JSON_SPLIT_ORIENTED,\n CONTENT_TYPE_JSON_SPLIT_NUMPY\n]\n\n_logger = logging.getLogger(__name__)\n\n\ndef parse_json_input(json_input, orient=\"split\"):\n \"\"\"\n :param json_input: A JSON-formatted string representation of a Pandas DataFrame, or a stream\n containing such a string representation.\n :param orient: The Pandas DataFrame orientation of the JSON input. This is either 'split'\n or 'records'.\n \"\"\"\n # pylint: disable=broad-except\n try:\n return pd.read_json(json_input, orient=orient, dtype=False)\n except Exception:\n _handle_serving_error(\n error_message=(\n \"Failed to parse input as a Pandas DataFrame. Ensure that the input is\"\n \" a valid JSON-formatted Pandas DataFrame with the `{orient}` orient\"\n \" produced using the `pandas.DataFrame.to_json(..., orient='{orient}')`\"\n \" method.\".format(orient=orient)),\n error_code=MALFORMED_REQUEST)\n\n\ndef parse_csv_input(csv_input):\n \"\"\"\n :param csv_input: A CSV-formatted string representation of a Pandas DataFrame, or a stream\n containing such a string representation.\n \"\"\"\n # pylint: disable=broad-except\n try:\n return pd.read_csv(csv_input)\n except Exception:\n _handle_serving_error(\n error_message=(\n \"Failed to parse input as a Pandas DataFrame. Ensure that the input is\"\n \" a valid CSV-formatted Pandas DataFrame produced using the\"\n \" `pandas.DataFrame.to_csv()` method.\"),\n error_code=MALFORMED_REQUEST)\n\n\ndef parse_split_oriented_json_input_to_numpy(json_input):\n \"\"\"\n :param json_input: A JSON-formatted string representation of a Pandas DataFrame with split\n orient, or a stream containing such a string representation.\n \"\"\"\n # pylint: disable=broad-except\n try:\n json_input_list = json.loads(json_input, object_pairs_hook=OrderedDict)\n return pd.DataFrame(index=json_input_list['index'],\n data=np.array(json_input_list['data'], dtype=object),\n columns=json_input_list['columns']).infer_objects()\n except Exception:\n _handle_serving_error(\n error_message=(\n \"Failed to parse input as a Numpy. Ensure that the input is\"\n \" a valid JSON-formatted Pandas DataFrame with the split orient\"\n \" produced using the `pandas.DataFrame.to_json(..., orient='split')`\"\n \" method.\"\n ),\n error_code=MALFORMED_REQUEST)\n\n\ndef predictions_to_json(raw_predictions, output):\n predictions = _get_jsonable_obj(raw_predictions, pandas_orient=\"records\")\n json.dump(predictions, output, cls=NumpyEncoder)\n\n\ndef _handle_serving_error(error_message, error_code):\n \"\"\"\n Logs information about an exception thrown by model inference code that is currently being\n handled and reraises it with the specified error message. The exception stack trace\n is also included in the reraised error message.\n\n :param error_message: A message for the reraised exception.\n :param error_code: An appropriate error code for the reraised exception. This should be one of\n the codes listed in the `mlflow.protos.databricks_pb2` proto.\n \"\"\"\n traceback_buf = StringIO()\n traceback.print_exc(file=traceback_buf)\n reraise(MlflowException,\n MlflowException(\n message=error_message,\n error_code=error_code,\n stack_trace=traceback_buf.getvalue()))\n\n\ndef init(model):\n \"\"\"\n Initialize the server. Loads pyfunc model from the path.\n \"\"\"\n app = flask.Flask(__name__)\n\n @app.route('/ping', methods=['GET'])\n def ping(): # pylint: disable=unused-variable\n \"\"\"\n Determine if the container is working and healthy.\n We declare it healthy if we can load the model successfully.\n \"\"\"\n health = model is not None\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')\n\n @app.route('/invocations', methods=['POST'])\n @catch_mlflow_exception\n def transformation(): # pylint: disable=unused-variable\n \"\"\"\n Do an inference on a single batch of data. In this sample server,\n we take data as CSV or json, convert it to a Pandas DataFrame or Numpy,\n generate predictions and convert them back to json.\n \"\"\"\n # Convert from CSV to pandas\n if flask.request.content_type == CONTENT_TYPE_CSV:\n data = flask.request.data.decode('utf-8')\n csv_input = StringIO(data)\n data = parse_csv_input(csv_input=csv_input)\n elif flask.request.content_type in [CONTENT_TYPE_JSON, CONTENT_TYPE_JSON_SPLIT_ORIENTED]:\n data = parse_json_input(json_input=flask.request.data.decode('utf-8'),\n orient=\"split\")\n elif flask.request.content_type == CONTENT_TYPE_JSON_RECORDS_ORIENTED:\n data = parse_json_input(json_input=flask.request.data.decode('utf-8'),\n orient=\"records\")\n elif flask.request.content_type == CONTENT_TYPE_JSON_SPLIT_NUMPY:\n data = parse_split_oriented_json_input_to_numpy(flask.request.data.decode('utf-8'))\n else:\n return flask.Response(\n response=(\"This predictor only supports the following content types,\"\n \" {supported_content_types}. Got '{received_content_type}'.\".format(\n supported_content_types=CONTENT_TYPES,\n received_content_type=flask.request.content_type)),\n status=415,\n mimetype='text/plain')\n\n # Do the prediction\n # pylint: disable=broad-except\n try:\n raw_predictions = model.predict(data)\n except Exception:\n _handle_serving_error(\n error_message=(\n \"Encountered an unexpected error while evaluating the model. Verify\"\n \" that the serialized input Dataframe is compatible with the model for\"\n \" inference.\"),\n error_code=BAD_REQUEST)\n result = StringIO()\n predictions_to_json(raw_predictions, result)\n return flask.Response(response=result.getvalue(), status=200, mimetype='application/json')\n\n return app\n\n\ndef _predict(model_uri, input_path, output_path, content_type, json_format):\n pyfunc_model = load_model(model_uri)\n if input_path is None:\n input_path = sys.stdin\n\n if content_type == \"json\":\n df = parse_json_input(input_path, orient=json_format)\n elif content_type == \"csv\":\n df = parse_csv_input(input_path)\n else:\n raise Exception(\"Unknown content type '{}'\".format(content_type))\n\n if output_path is None:\n predictions_to_json(pyfunc_model.predict(df), sys.stdout)\n else:\n with open(output_path, \"w\") as fout:\n predictions_to_json(pyfunc_model.predict(df), fout)\n\n\ndef _serve(model_uri, port, host):\n pyfunc_model = load_model(model_uri)\n init(pyfunc_model).run(port=port, host=host)\n\n\nclass NumpyEncoder(JSONEncoder):\n \"\"\" Special json encoder for numpy types.\n Note that some numpy types doesn't have native python equivalence,\n hence json.dumps will raise TypeError.\n In this case, you'll need to convert your numpy types into its closest python equivalence.\n \"\"\"\n\n def default(self, o): # pylint: disable=E0202\n if isinstance(o, np.generic):\n return np.asscalar(o)\n return JSONEncoder.default(self, o)\n\n\ndef _get_jsonable_obj(data, pandas_orient=\"records\"):\n \"\"\"Attempt to make the data json-able via standard library.\n Look for some commonly used types that are not jsonable and convert them into json-able ones.\n Unknown data types are returned as is.\n\n :param data: data to be converted, works with pandas and numpy, rest will be returned as is.\n :param pandas_orient: If `data` is a Pandas DataFrame, it will be converted to a JSON\n dictionary using this Pandas serialization orientation.\n \"\"\"\n if isinstance(data, np.ndarray):\n return data.tolist()\n if isinstance(data, pd.DataFrame):\n return data.to_dict(orient=pandas_orient)\n if isinstance(data, pd.Series):\n return pd.DataFrame(data).to_dict(orient=pandas_orient)\n else: # by default just return whatever this is and hope for the best\n return data\n"
] |
[
[
"numpy.array",
"pandas.DataFrame",
"pandas.read_json",
"numpy.asscalar",
"pandas.read_csv"
]
] |
asfadmin/hyp3-geocode
|
[
"aaee8866b8ee0a27203d20e605ada6608f26a392"
] |
[
"hyp3_geocode/sentinel.py"
] |
[
"\"\"\"Geocode a sentinel-1 granule using Gamma software\"\"\"\n\nimport argparse\nimport datetime\nimport glob\nimport logging\nimport math\nimport os\nimport shutil\nimport zipfile\n\nimport numpy as np\nfrom hyp3lib.asf_geometry import geometry_geo2proj\nfrom hyp3lib.byteSigmaScale import byteSigmaScale\nfrom hyp3lib.createAmp import createAmp\nfrom hyp3lib.execute import execute\nfrom hyp3lib.getParameter import getParameter\nfrom hyp3lib.getSubSwath import get_bounding_box_file\nfrom hyp3lib.ingest_S1_granule import ingest_S1_granule\nfrom hyp3lib.makeAsfBrowse import makeAsfBrowse\nfrom hyp3lib.make_arc_thumb import pngtothumb\n\nimport hyp3_geocode\n\n\ndef create_dem_par(basename, data_type, pixel_size, lat_max, lat_min, lon_max, lon_min, post):\n dem_par_in = \"{}_dem_par.in\".format(basename)\n zone, false_north, y_min, y_max, x_min, x_max = geometry_geo2proj(lat_max, lat_min, lon_max, lon_min)\n\n logging.debug(\"Original Output Coordinates: {} {} {} {}\".format(y_min, y_max, x_min, x_max))\n\n if post is not None:\n shift = 0\n x_max = math.ceil(x_max / post) * post + shift\n x_min = math.floor(x_min / post) * post - shift\n y_max = math.ceil(y_max / post) * post + shift\n y_min = math.floor(y_min / post) * post - shift\n logging.debug(\"Snapped Output Coordinates: {} {} {} {}\".format(y_min, y_max, x_min, x_max))\n\n with open(dem_par_in, \"w\") as f:\n f.write(\"UTM\\n\")\n f.write(\"WGS84\\n\")\n f.write(\"1\\n\")\n f.write(\"{}\\n\".format(zone))\n f.write(\"{}\\n\".format(false_north))\n f.write(\"{}\\n\".format(basename))\n if \"float\" in data_type:\n f.write(\"REAL*4\\n\")\n elif \"int16\" in data_type:\n f.write(\"INTEGER*2\\n\")\n f.write(\"0.0\\n\")\n f.write(\"1.0\\n\")\n\n xsize = np.floor(abs((x_max - x_min) / pixel_size))\n ysize = np.floor(abs((y_max - y_min) / pixel_size))\n\n f.write(\"{}\\n\".format(int(xsize)))\n f.write(\"{}\\n\".format(int(ysize)))\n f.write(\"{} {}\\n\".format(-1.0 * pixel_size, pixel_size))\n f.write(\"{} {}\\n\".format(y_max, x_min))\n\n return dem_par_in\n\n\ndef blank_bad_data(raw_file, x, y, left=15, right=15):\n # Read in the data\n data = np.fromfile(raw_file, dtype=np.float32)\n data = np.reshape(data, (y, x))\n data = data.byteswap()\n\n # For each line in the file\n for i in range(y):\n # Black out the start of the line\n for j in range(x):\n if data[i, j] != 0:\n data[i, :j + left] = 0\n break\n # Black out the end of the line\n for j in range(x - 1, 0, -1):\n if data[i, j] != 0:\n data[i, j - right:] = 0\n break\n\n # Write out the data\n data = data.byteswap()\n data.tofile(raw_file)\n\n\ndef process_pol(pol, type_, infile, outfile, pixel_size, height, make_tab_flag=True, gamma0_flag=False,\n offset=None):\n logging.info(\"Processing the {pol} polarization\".format(pol=pol))\n # FIXME: make_tab_flag isn't used... should it be doing something?\n logging.debug('Unused option make_tab_flag was {make_tab_flag}'.format(make_tab_flag=make_tab_flag))\n\n mgrd = \"{outfile}.{pol}.mgrd\".format(outfile=outfile, pol=pol)\n utm = \"{outfile}.{pol}.utm\".format(outfile=outfile, pol=pol)\n area_map = \"{outfile}_area_map.par\".format(outfile=outfile)\n small_map = \"{outfile}_small_map\".format(outfile=outfile)\n\n look_fact = np.floor((pixel_size / 10.0) + 0.5)\n if look_fact < 1:\n look_fact = 1\n\n # Ingest the granule into gamma format\n ingest_S1_granule(infile, pol, look_fact, mgrd)\n\n if gamma0_flag:\n # Convert sigma-0 to gamma-0\n execute(f\"radcal_MLI {mgrd} {mgrd}.par - {mgrd}.sigma - 0 0 -1\", uselogging=True)\n execute(f\"radcal_MLI {mgrd}.sigma {mgrd}.par - {mgrd}.gamma - 0 0 2\", uselogging=True)\n shutil.move(\"{mgrd}.gamma\".format(mgrd=mgrd), mgrd)\n\n # Blank out the bad data at the left and right edges\n dsx = int(getParameter(\"{mgrd}.par\".format(mgrd=mgrd), \"range_samples\", uselogging=True))\n dsy = int(getParameter(\"{mgrd}.par\".format(mgrd=mgrd), \"azimuth_lines\", uselogging=True))\n\n if \"GRD\" in type_:\n blank_bad_data(mgrd, dsx, dsy, left=20, right=20)\n\n # Create geocoding look up table\n if offset is None:\n offset = '-'\n execute(f\"gec_map {mgrd}.par {offset} {area_map} {height} {small_map}.par {small_map}.utm_to_rdc\", uselogging=True)\n\n # Gecode the granule\n out_size = getParameter(\"{small_map}.par\".format(small_map=small_map), \"width\", uselogging=True)\n execute(f\"geocode_back {mgrd} {dsx} {small_map}.utm_to_rdc {utm} {out_size}\", uselogging=True)\n\n # Create the geotiff file\n tiffile = \"{outfile}_{pol}.tif\".format(outfile=outfile, pol=pol)\n execute(f\"data2geotiff {small_map}.par {utm} 2 {tiffile}\", uselogging=True)\n\n\ndef create_xml_files(infile, outfile, height, type_, gamma0_flag, pixel_size):\n \"\"\"Create XML metadata files\"\"\"\n cfgdir = os.path.abspath(os.path.join(os.path.dirname(hyp3_geocode.__file__), \"etc\"))\n back = os.getcwd()\n os.chdir(\"PRODUCT\")\n now = datetime.datetime.now()\n date = now.strftime(\"%Y%m%d\")\n time = now.strftime(\"%H%M%S\")\n dt = now.strftime(\"%Y-%m-%dT%H:%M:%S\")\n year = now.year\n encoded_jpg = pngtothumb(\"{}.png\".format(outfile))\n basename = os.path.basename(infile)\n granulename = os.path.splitext(basename)[0]\n\n if type_ == \"SLC\":\n full_type = \"Single-Look Complex\"\n else:\n full_type = \"Ground Range Detected\"\n\n if gamma0_flag:\n power_type = \"gamma\"\n else:\n power_type = \"sigma\"\n\n for myfile in glob.glob(\"*.tif\"):\n # NOTE: Need to open as bytes so we can write encoded_jpg thumbnail to file\n with open(\"{cfgdir}/GeocodingTemplate.xml\".format(cfgdir=cfgdir), \"rb\") as f:\n with open(\"{myfile}.xml\".format(myfile=myfile), \"wb\") as g:\n if \"vv\" in myfile:\n pol = \"vv\"\n elif \"vh\" in myfile:\n pol = \"vh\"\n elif \"hh\" in myfile:\n pol = \"hh\"\n elif \"hv\" in myfile:\n pol = \"hv\"\n\n for line in f:\n line = line.replace(b\"[DATE]\", bytes(date, 'utf-8'))\n line = line.replace(b\"[TIME]\", bytes(\"{}00\".format(time), 'utf-8'))\n line = line.replace(b\"[DATETIME]\", bytes(dt, 'utf-8'))\n line = line.replace(b\"[HEIGHT]\", bytes(\"{}\".format(height), 'utf-8'))\n line = line.replace(b\"[YEARPROCESSED]\", bytes(\"{}\".format(year), 'utf-8'))\n line = line.replace(b\"[YEARACQUIRED]\", bytes(infile[17:21], 'utf-8'))\n line = line.replace(b\"[TYPE]\", bytes(type_, 'utf-8'))\n line = line.replace(b\"[FULL_TYPE]\", bytes(full_type, 'utf-8'))\n line = line.replace(b\"[SPACING]\", bytes(\"{}\".format(int(pixel_size)), 'utf-8'))\n line = line.replace(b\"[THUMBNAIL_BINARY_STRING]\", encoded_jpg)\n line = line.replace(b\"[POL]\", bytes(pol, 'utf-8'))\n line = line.replace(b\"[POWERTYPE]\", bytes(power_type, 'utf-8'))\n line = line.replace(b\"[GRAN_NAME]\", bytes(granulename, 'utf-8'))\n line = line.replace(b\"[FORMAT]\", b\"power\")\n g.write(line + b'\\n')\n\n for myfile in glob.glob(\"*.png\"):\n if \"rgb\" in myfile:\n scale = 'color'\n encoded_jpg = pngtothumb(\"{}_rgb.png\".format(outfile))\n else:\n scale = 'grayscale'\n encoded_jpg = pngtothumb(\"{}.png\".format(outfile))\n\n if \"large\" in myfile:\n res = \"medium\"\n else:\n res = \"low\"\n\n with open(\"{cfgdir}/GeocodingTemplate_{scale}_png.xml\".format(cfgdir=cfgdir, scale=scale), \"rb\") as f:\n with open(\"{myfile}.xml\".format(myfile=myfile), \"wb\") as g:\n for line in f:\n line = line.replace(b\"[DATE]\", bytes(date, 'utf-8'))\n line = line.replace(b\"[TIME]\", bytes(\"{}00\".format(time), 'utf-8'))\n line = line.replace(b\"[DATETIME]\", bytes(dt, 'utf-8'))\n line = line.replace(b\"[YEARPROCESSED]\", bytes(\"{}\".format(year), 'utf-8'))\n line = line.replace(b\"[YEARACQUIRED]\", bytes(infile[17:21], 'utf-8'))\n line = line.replace(b\"[TYPE]\", bytes(type_, 'utf-8'))\n line = line.replace(b\"[FULL_TYPE]\", bytes(full_type, 'utf-8'))\n line = line.replace(b\"[THUMBNAIL_BINARY_STRING]\", encoded_jpg)\n line = line.replace(b\"[RES]\", bytes(res, 'utf-8'))\n line = line.replace(b\"[GRAN_NAME]\", bytes(granulename, 'utf-8'))\n line = line.replace(b\"[FORMAT]\", b\"power\")\n g.write(line + b'\\n')\n\n os.chdir(back)\n\n\ndef make_products(outfile, pol, cp=None):\n # Create greyscale geotiff and ASF browse images\n tiffile = \"{out}_{pol}.tif\".format(out=outfile, pol=pol)\n ampfile = createAmp(tiffile, nodata=0)\n newfile = ampfile.replace(\".tif\", \"_sigma.tif\")\n byteSigmaScale(ampfile, newfile)\n makeAsfBrowse(newfile, outfile)\n os.remove(newfile)\n\n # Create color ASF browse images\n if cp is not None:\n if pol == \"vv\":\n basename = \"{}_vh\".format(outfile)\n else:\n basename = \"{}_hv\".format(outfile)\n tiffile2 = \"{}.tif\".format(basename)\n ampfile2 = createAmp(tiffile2, nodata=0)\n outfile2 = ampfile2.replace(\".tif\", \"_rgb.tif\")\n threshold = -24\n\n # Direct call to rtc2color overran the memory (128 GB)\n # rtc2color(ampfile,ampfile2, threshold, outfile2, amp=True, cleanup=True)\n # Trying this instead\n execute(f\"rtc2color.py -amp -cleanup {ampfile} {ampfile2} {threshold} {outfile2}\", uselogging=True)\n\n colorname = \"{}_rgb\".format(outfile)\n makeAsfBrowse(outfile2, colorname)\n os.remove(ampfile2)\n os.remove(outfile2)\n\n os.remove(ampfile)\n\n # Move results to the PRODUCT directory\n if not os.path.isdir(\"PRODUCT\"):\n os.mkdir(\"PRODUCT\")\n for tiffile in glob.glob(\"*.tif\"):\n shutil.move(tiffile, \"PRODUCT\")\n for txtfile in glob.glob(\"*_log.txt\"):\n shutil.move(txtfile, \"PRODUCT\")\n for pngfile in glob.glob(\"*.png*\"):\n shutil.move(pngfile, \"PRODUCT\")\n for kmzfile in glob.glob(\"*.kmz\"):\n shutil.move(kmzfile, \"PRODUCT\")\n\n\ndef geocode_sentinel(infile, outfile, pixel_size=30.0, height=0, gamma0_flag=False, post=None,\n offset=None):\n if not os.path.exists(infile):\n logging.error(\"ERROR: Input file {} does not exist\".format(infile))\n exit(1)\n if \"zip\" in infile:\n zip_ref = zipfile.ZipFile(infile, 'r')\n zip_ref.extractall(\".\")\n zip_ref.close()\n infile = infile.replace(\".zip\", \".SAFE\")\n\n type_ = 'GRD' if 'GRD' in infile else 'SLC'\n\n # Create par file covering the area we want to geocode\n lat_max, lat_min, lon_max, lon_min = get_bounding_box_file(infile)\n logging.debug(\"Input Coordinates: {} {} {} {}\".format(lat_max, lat_min, lon_max, lon_min))\n area_map = f\"{outfile}_area_map\"\n dem_par_in = create_dem_par(area_map, \"float\", pixel_size, lat_max, lat_min, lon_max, lon_min, post)\n execute(f\"create_dem_par {area_map}.par < {dem_par_in}\", uselogging=True)\n\n # Get list of files to process\n vvlist = glob.glob(\"{}/*/*vv*.tiff\".format(infile))\n vhlist = glob.glob(\"{}/*/*vh*.tiff\".format(infile))\n hhlist = glob.glob(\"{}/*/*hh*.tiff\".format(infile))\n hvlist = glob.glob(\"{}/*/*hv*.tiff\".format(infile))\n\n pol = None\n cross_pol = None\n if vvlist:\n pol = \"vv\"\n process_pol(pol, type_, infile, outfile, pixel_size, height, make_tab_flag=True, gamma0_flag=gamma0_flag,\n offset=offset)\n if vhlist:\n process_pol(\"vh\", type_, infile, outfile, pixel_size, height, make_tab_flag=False, gamma0_flag=gamma0_flag,\n offset=offset)\n cross_pol = \"vh\"\n if hhlist:\n pol = \"hh\"\n process_pol(pol, type_, infile, outfile, pixel_size, height, make_tab_flag=True, gamma0_flag=gamma0_flag,\n offset=offset)\n if hvlist:\n process_pol(\"hv\", type_, infile, outfile, pixel_size, height, make_tab_flag=False, gamma0_flag=gamma0_flag,\n offset=offset)\n cross_pol = \"hv\"\n\n make_products(outfile, pol, cp=cross_pol)\n create_xml_files(infile, outfile, height, type_, gamma0_flag, pixel_size)\n\n\ndef main():\n \"\"\"Main entrypoint\"\"\"\n parser = argparse.ArgumentParser(\n prog='geocode_sentinel.py',\n description=__doc__,\n )\n parser.add_argument(\"infile\",\n help=\"Input zip file or SAFE directory\")\n parser.add_argument(\"outfile\",\n help=\"Name of output geocoded file\")\n parser.add_argument(\"-t\", \"--terrain_height\", type=float, default=0.0,\n help=\"Average terrain height for geocoding\")\n parser.add_argument(\"-s\", \"--pixel_size\", type=float, default=30.0,\n help=\"Pixel size for output product (default 30m)\")\n parser.add_argument(\"-p\", \"--post\", type=float,\n help=\"Pixel posting for output product\")\n parser.add_argument(\"-g\", \"--gamma0\", action=\"store_true\",\n help=\"Make output gamma0 instead of sigma0\")\n parser.add_argument(\"-o\", \"--offset\",\n help=\"Optional offset file to use during geocoding\")\n parser.add_argument('--version', action='version',\n version='%(prog)s {}'.format(hyp3_geocode.__version__))\n args = parser.parse_args()\n\n log_file = \"{}_{}_log.txt\".format(args.outfile, os.getpid())\n logging.basicConfig(\n filename=log_file, format='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO\n )\n logging.getLogger().addHandler(logging.StreamHandler())\n logging.info(\"Starting run\")\n\n geocode_sentinel(\n args.infile, args.outfile, height=args.terrain_height, pixel_size=args.pixel_size,\n gamma0_flag=args.gamma0, post=args.post, offset=args.offset\n )\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"numpy.reshape",
"numpy.fromfile",
"numpy.floor"
]
] |
thu-spmi/semi-EBM
|
[
"393e3ea3566dd60c48872a5c573a335e8e802707",
"393e3ea3566dd60c48872a5c573a335e8e802707"
] |
[
"train/run_trf_pretrain.py",
"hrf/trfx_semi.py"
] |
[
"# Copyright 2020 Tsinghua University, Author: Yunfu Song\n# Apache 2.0.\n# This script contrains TRF unsupervised training experiments.\n\nimport tensorflow as tf\nimport numpy as np\nimport json\nimport os\n\nfrom base import *\nimport trf_uns, nce_net_uns\nimport argparse\n\npaser = argparse.ArgumentParser()\npaser.add_argument('--alpha', default=1, type=float)\npaser.add_argument('--sf', default='', type=str)\npaser.add_argument('--net', default='Net', type=str)\npaser.add_argument('--model', default='', type=str)\npaser.add_argument('--nbest', default=0, type=int)\npaser.add_argument('--bs', default=32, type=int)\npaser.add_argument('--df', default=1, type=float)\npaser.add_argument('--nf', default=1, type=float)\npaser.add_argument('--do', default=0.5, type=float)\npaser.add_argument('--nt', default=1, type=int)\npaser.add_argument('--unl', default=50, type=int)\npaser.add_argument('--lab', default=1, type=int)\npaser.add_argument('--data', default='all', type=str)\npaser.add_argument('--lr', default=1e-3, type=float)\npaser.add_argument('--lrd', default=0.03, type=float)\npaser.add_argument('--opt', default='adam', type=str)\npaser.add_argument('--word_emb', action='store_false', default=True)\npaser.add_argument('--std', default=1, type=int)\n# paser.add_argument('--data', default='one-ten', type=str)\npaser.add_argument('--task', default='pos', type=str)\npaser.add_argument('--seed', default=1, type=int)\npaser.add_argument('--hl', default=1, type=int)\nargs = paser.parse_args()\nprint(args)\n\ntf.set_random_seed(args.seed)\nnp.random.seed(args.seed)\nimport random\nrandom.seed(args.seed)\n\ndef main():\n with open('data/processed/%s/data.info'%args.task) as f:\n data_info = json.load(f)\n nbest=None\n if args.nbest:\n nbest=[\n \"data/raw/WSJ92-test-data/1000best.sent\",\n \"data/raw/WSJ92-test-data/transcript.txt\",\n \"data/raw/WSJ92-test-data/1000best.acscore\",\n \"data/raw/WSJ92-test-data/1000best.lmscore\"\n ]\n task2all = {'pos': 56554, 'ner': 14041, 'chunk': 7436}\n\n\n train_num = task2all[args.task]//args.lab\n\n if args.nbest:\n assert args.task=='pos'\n data = seq.Data(vocab_files=data_info['vocab'],\n train_list=data_info['train%d'%train_num],\n valid_list=data_info['valid'],\n test_list=data_info['test']\n )\n else:\n data = seq.Data(vocab_files=data_info['vocab'],\n train_list=data_info['train%d.part%d'%(train_num,args.unl)],\n valid_list=data_info['valid'],\n test_list=data_info['test'],\n max_len=60\n )\n\n config = trf_uns.Config(data)\n\n # config.mix_config.c2w_type = 'cnn'\n # config.mix_config.chr_embedding_size = 30\n # config.mix_config.c2w_cnn_size = 30\n\n config.mix_config.c2w_type = 'cnn'\n config.mix_config.chr_embedding_size = 50\n config.mix_config.c2w_cnn_size = 100\n config.mix_config.c2w_cnn_width = [2, 3, 4]\n\n config.mix_config.rnn_hidden_size = 512\n config.mix_config.rnn_hidden_layers = args.hl\n config.mix_config.embedding_size = 300\n config.mix_config.rnn_proj_size = 512\n config.mix_config.opt_method = args.opt\n config.sampler_config.optimize_method = args.opt\n config.sampler_config.learning_rate = args.lr\n config.mix_config.dropout = args.do\n config.max_epoch = 5\n config.crf_batch_size = 64\n config.trf_batch_size = args.bs\n config.data_factor = args.df\n config.noise_factor = args.nf\n config.lr = args.lr\n\n config.eval_every = 0.1\n config.warm_up_steps=100\n config.lr_decay = 0.005\n\n # config.lr = lr.LearningRateEpochDelay2(1e-3, delay=0.02)\n\n if args.word_emb:\n config.mix_config.embedding_init_npy=data_info['word_emb_d300']\n\n logdir = 'models/%s_trf_pretrain_lab%dunl%d_%s/' % (args.task,args.lab,args.unl, args.sf)\n logdir = wb.mklogdir(logdir, is_recreate=True)\n config.print()\n wb.mkdir(os.path.join(logdir, 'crf_models'))\n\n m = trf_uns.TRF(config, data, logdir, device='/gpu:1',net_name=args.net)\n\n\n sv = tf.train.Supervisor(logdir=os.path.join(logdir, 'logs'))\n sv.summary_writer.add_graph(tf.get_default_graph()) # write the graph to logs\n session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)\n session_config.gpu_options.allow_growth = True\n with sv.managed_session(config=session_config) as session:\n with session.as_default():\n m.initialize()\n if args.nbest:\n ops = trf_uns.DefaultOps(m, nbest)\n else:\n ops=None\n m.train(0.1,ops)\n\n\n\nif __name__ == '__main__':\n\n main()",
"import numpy as np\nimport time\nfrom collections import OrderedDict\n\nfrom base import seq, log\nfrom . import trfx\nfrom .trfx import DefaultOps\n\n\nclass Config(trfx.Config):\n def __init__(self, data):\n super().__init__(data)\n\n self.train_batch_size = 1000\n self.full_batch_size = 100\n self.inter_alpha = 100\n\n\nclass TRF(trfx.TRF):\n def __init__(self, config, data_x, data_full,\n logdir, device='/gpu:0', name='trf'):\n super().__init__(config, data_x, logdir, device, name)\n\n self.data_full = data_full\n self.data_x = data_x\n\n def update(self, data_list, sample_list, data_full_list=None):\n if data_full_list is None:\n return super().update(data_list, sample_list)\n\n # compute the scalars\n data_scalar = np.ones(len(data_list)) / len(data_list)\n sample_len = np.array([len(x) for x in sample_list])\n sample_facter = np.array(self.config.pi_true[self.config.min_len:]) / \\\n np.array(self.config.pi_0[self.config.min_len:])\n sample_scalar = sample_facter[sample_len - self.config.min_len] / len(sample_list)\n\n # update word phi\n if not self.config.fix_trf_model:\n with self.time_recoder.recode('update_word'):\n self.phi_word.update(data_list, data_scalar, sample_list, sample_scalar,\n learning_rate=self.cur_lr_word)\n\n if not self.config.fix_crf_model:\n\n data_full_scalar = self.config.inter_alpha * np.ones(len(data_full_list)) / len(data_full_list)\n\n data_part_list = data_list + sample_list + data_full_list\n data_part_scalar = -np.concatenate([data_scalar, -sample_scalar, -data_full_scalar], axis=0)\n\n # forward-backward for data\n data_part_list_x = [s.x[0] for s in data_part_list]\n with self.time_recoder.recode('update_marginal_data'):\n data_fp_logps_list, logzs_data = self.marginal_logps(data_part_list_x)\n\n with self.time_recoder.recode('update_tag'):\n self.phi_tag.update(data_full_list, data_full_scalar, data_part_list, data_part_scalar,\n data_fp_logps_list=None,\n sample_fp_logps_list=data_fp_logps_list,\n learning_rate=self.cur_lr_tag)\n\n with self.time_recoder.recode('update_mix'):\n self.phi_mix.update(data_full_list, data_full_scalar, data_part_list, data_part_scalar,\n data_fp_logps_list=None,\n sample_fp_logps_list=data_fp_logps_list,\n learning_rate=self.cur_lr_mix)\n\n # update zeta\n with self.time_recoder.recode('update_logz'):\n self.norm_const.update(sample_list, learning_rate=self.cur_lr_logz)\n logz1 = self.get_true_logz(self.config.min_len)[0]\n self.norm_const.set_logz1(logz1)\n\n # update simulater\n with self.time_recoder.recode('update_simulater'):\n self.sampler.update(seq.get_x(sample_list))\n\n # update dbg info\n self.sample_cur_pi.fill(0)\n for x in sample_list:\n self.sample_cur_pi[len(x)] += 1\n self.sample_acc_count += self.sample_cur_pi\n self.sample_cur_pi /= self.sample_cur_pi.sum()\n\n dbg_info = dict()\n dbg_info['logz1'] = logz1\n acc_pi = self.sample_acc_count / np.sum(self.sample_acc_count)\n dbg_info['pi_dist'] = np.arccos(np.dot(acc_pi, self.config.pi_0) /\n np.linalg.norm(acc_pi) / np.linalg.norm(self.config.pi_0))\n\n return dbg_info\n\n def train(self, print_per_epoch=0.1, operation=None):\n\n # initialize\n self.initialize()\n\n if self.exist_model():\n self.restore()\n if self.config.load_crf_model is not None:\n self.restore_crf(self.config.load_crf_model)\n if self.config.load_trf_model is not None:\n self.restore_trf(self.config.load_trf_model)\n\n train_list = self.data.datas[0]\n valid_list = self.data.datas[1]\n\n print('[TRF] [Train]...')\n time_beginning = time.time()\n model_train_nll = []\n # model_train_nll_phi = []\n # model_q_nll = []\n # model_kl_dist = []\n\n self.data.train_batch_size = self.config.train_batch_size\n self.data.is_shuffle = True\n self.data_full.train_batch_size = self.config.full_batch_size\n self.data_full.is_shuffle = True\n epoch_step_num = self.data.get_epoch_step_num()\n print('[TRF] epoch_step_num={}'.format(epoch_step_num))\n print('[TRF] train_list={}'.format(len(train_list)))\n print('[TRF] valid_list={}'.format(len(valid_list)))\n last_epoch = 0\n epoch = 0\n print_next_epoch = 0\n for step, (data_seqs, data_full_seqs) in enumerate(zip(self.data, self.data_full)):\n\n ###########################\n # extra operations\n ###########################\n if operation is not None:\n operation.run(step, epoch)\n\n if int(self.data.get_cur_epoch()) > last_epoch:\n self.save()\n last_epoch = int(self.data.get_cur_epoch())\n\n if epoch >= self.config.max_epoch:\n print('[TRF] train stop!')\n self.save()\n # operation.perform(step, epoch)\n break\n\n # update epoches\n epoch = self.data.get_cur_epoch()\n\n # update training information\n self.training_info['trained_step'] += 1\n self.training_info['trained_epoch'] = self.data.get_cur_epoch()\n self.training_info['trained_time'] = (time.time() - time_beginning) / 60\n\n # draw samples\n with self.time_recoder.recode('sample'):\n sample_seqs = self.draw(self.config.sample_batch_size)\n\n # update paramters\n with self.time_recoder.recode('update'):\n # learining rate\n self.cur_lr_word = self.config.lr_word.get_lr(step+1, epoch)\n self.cur_lr_tag = self.config.lr_tag.get_lr(step+1, epoch)\n self.cur_lr_mix = self.config.lr_mix.get_lr(step+1, epoch)\n self.cur_lr_logz = self.config.lr_logz.get_lr(step+1, epoch)\n # update\n update_info = self.update(data_seqs, sample_seqs, data_full_seqs)\n\n # evaulate the nll\n with self.time_recoder.recode('eval_train_nll'):\n nll_train = self.eval(data_seqs)[0]\n model_train_nll.append(nll_train)\n # model_train_nll_phi.append(self.eval(data_seqs, is_norm=False)[0])\n # model_kl_dist.append(self.eval(sample_seqs)[0] - self.mcmc.eval(sample_seqs)[0])\n\n if epoch >= print_next_epoch:\n print_next_epoch = epoch + print_per_epoch\n\n time_since_beg = (time.time() - time_beginning) / 60\n\n # with self.time_recoder.recode('eval'):\n # model_valid_nll = self.eval(valid_list)[0]\n\n info = OrderedDict()\n info['step'] = step\n info['epoch'] = epoch\n info['time'] = time_since_beg\n info['lr_tag'] = '{:.2e}'.format(self.cur_lr_tag)\n info['lr_mix'] = '{:.2e}'.format(self.cur_lr_mix)\n info['lr_word'] = '{:.2e}'.format(self.cur_lr_word)\n info['lr_logz'] = '{:.2e}'.format(self.cur_lr_logz)\n info['lj_rate'] = self.sampler.lj_rate\n info['mv_rate'] = self.sampler.mv_rate\n info['logz1'] = self.update_global_norm()\n info.update(update_info)\n info['train'] = np.mean(model_train_nll[-epoch_step_num:])\n # info['train_phi'] = np.mean(model_train_nll_phi[-100:])\n # info['valid'] = model_valid_nll\n # info['auxil'] = np.mean(model_q_nll[-epoch_step_num:])\n # info['kl_dist'] = np.mean(model_kl_dist[-epoch_step_num:])\n\n x_list = seq.get_x(sample_seqs)\n info['kl_dist'] = np.mean(-self.get_logpxs(x_list, for_eval=False)) - self.sampler.eval(x_list)[0]\n\n ##########\n true_logz = None\n if self.config.max_len <= 5:\n true_logz = np.array(self.get_true_logz())\n sa_logz = np.array(self.norm_const.get_logz())\n self.norm_const.set_logz(true_logz)\n true_nll_train = self.eval(train_list)[0]\n self.norm_const.set_logz(sa_logz)\n\n info['true_train'] = true_nll_train\n\n log.print_line(info)\n\n print('[end]')\n # self.debug_logz()\n\n # write time\n f = self.write_files.get('time')\n f.write('step={} epoch={:.3f} time={:.2f} '.format(step, epoch, time_since_beg))\n f.write(' '.join(['{}={:.2f}'.format(x[0], x[1]) for x in self.time_recoder.time_recoder.items()]) + '\\n')\n f.flush()\n\n # write zeta, logz, pi\n self.write_log_zeta(step, true_logz)"
] |
[
[
"tensorflow.set_random_seed",
"tensorflow.ConfigProto",
"tensorflow.get_default_graph",
"numpy.random.seed"
],
[
"numpy.concatenate",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot",
"numpy.sum",
"numpy.mean"
]
] |
dineshrajdhanapathyDD/selenium-youtube-scraper-live
|
[
"a0f1f3278165e62a7cf7e26d5f8c099102ea7039"
] |
[
"scraper.py"
] |
[
"import pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\n\n\nYOUTUBE_TRENDING_URL = 'https://www.youtube.com/feed/trending'\n\n\ndef get_driver():\n chrome_options = Options()\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--disable-dev-shm-usage')\n driver = webdriver.Chrome(options=chrome_options)\n return driver\n\n\ndef get_videos(driver):\n VIDEO_DIV_TAG = 'ytd-video-renderer'\n driver.get(YOUTUBE_TRENDING_URL)\n videos = driver.find_elements(By.TAG_NAME, VIDEO_DIV_TAG)\n return videos\n\n#title, url, thumbnail_url,channel,views, uploades description.\ndef parse_video(video):\n title_tag = video.find_element(By.ID, 'video-title')\n title = title_tag.text\n url = title_tag.get_attribute('href')\n \n thumbnail_tag = video.find_element(By.TAG_NAME,'img') \n thumbnail_url = thumbnail_tag.get_attribute('src')\n\n channel_div = video.find_element(By.CLASS_NAME,'ytd-channel-name')\n channel_name = channel_div.text\n\n description = video.find_element(By.ID, 'description-text').text\n return {\n 'title': title,\n 'url':url,\n 'thumbnail':thumbnail_url,\n 'channel':channel_name,\n 'description':description\n\n }\n\nif __name__ == \"__main__\":\n print('Creating driver')\n driver = get_driver()\n print('Fetching trending video')\n videos = get_videos(driver) \n \n print(f'Found {len(videos)} videos')\n \n print('Parsing top 10 videos')\n videos_data = [parse_video(video) for video in videos [:10]]\n\n #print(videos_data[3])\n\n print('save the data to a csv')\n videos_df = pd.DataFrame(videos_data)\n print(videos_df)\n videos_df.to_csv('trending.csv', index=None)\n\n "
] |
[
[
"pandas.DataFrame"
]
] |
ABohynDOE/mldoe
|
[
"0987df34a219b07488647ea3434e81cd07da9687"
] |
[
"src/mldoe/design.py"
] |
[
"# Packages\r\nfrom math import log2\r\nfrom mldoe.matrix import bmat, gmat\r\nfrom mldoe.wlp import wlp\r\nfrom itertools import chain\r\nimport oapackage as oa\r\nfrom typing import List\r\nimport numpy as np\r\n\r\n\r\nclass Design:\r\n \"\"\"\r\n Private meta-class for two-level design (TLD) and mixed-level design (MLD) class.\r\n\r\n :param n_runs: Number of experimental runs\r\n :type n_runs: int\r\n :param cols: Column numbers of the two-level columns of the design. Must also include the basic factors.\r\n :type cols: List[int]\r\n :raises TypeError: n_runs must be an integer\r\n :raises ValueError: n_runs must be a positive power of 2\r\n :raises TypeError: cols must be a list of integers\r\n :raises ValueError: cols can only contain integers between 0 and N (non-included)\r\n \"\"\"\r\n\r\n def __init__(self, n_runs: int, cols: List[int]):\r\n \"\"\"Constructor method\"\"\"\r\n # Check value for number of runs\r\n if not isinstance(n_runs, int):\r\n raise TypeError('Number of runs must be an integer')\r\n elif n_runs <= 0 or (n_runs & n_runs - 1) != 0:\r\n raise ValueError('Number of runs must be a positive power of two')\r\n else:\r\n self.n_runs = n_runs\r\n\r\n # Check values for column numbers\r\n if any([not isinstance(i, int) for i in cols]):\r\n raise TypeError('All column number should integer')\r\n elif any([(i < 1 or i >= self.n_runs) for i in cols]):\r\n raise ValueError(\r\n 'All column numbers should be positive integers between 0 and N (non-included)')\r\n else:\r\n self.cols = cols\r\n\r\n # Compute additional values\r\n self.k = len(self.cols)\r\n \"\"\"Total number of two-level factors\"\"\"\r\n self.n = int(log2(self.n_runs))\r\n \"\"\"Number of basic (independent) factors\"\"\"\r\n self.p = self.k - self.n\r\n \"\"\"Number of added factors (created from generators)\"\"\"\r\n self.bf = [2**i for i in range(int(log2(self.n_runs)))]\r\n \"\"\"List of the basic factors in the design\"\"\"\r\n\r\n\r\nclass TLD(Design):\r\n __doc__ = Design.__doc__ # + \"\"\"Class for a regular two-level design. Inherits from the meta-class `Design`.\"\"\"\r\n\r\n def __init__(self, n_runs: int, cols: List[int]):\r\n \"\"\"Constructor method\"\"\"\r\n # Inherit the characteristics from the parent Design class\r\n super().__init__(n_runs, cols)\r\n\r\n @property\r\n def array(self):\r\n \"\"\"\r\n Design matrix in (0,1) coding.\r\n \"\"\"\r\n b_mat = bmat(self.n)\r\n return b_mat[:, [i - 1 for i in self.cols]]\r\n\r\n @property\r\n def wlp(self):\r\n \"\"\"\r\n Word-length pattern. It is a :math:`1` by :math:`k+1` vector :math:`\\mathbf{W}`, where :math:`W_i` is the number\r\n of words of length :math:`i` among the :math:`2^{p}-1` words of the design.\r\n \"\"\"\r\n return oa.array_link(self.array).GWLP()\r\n\r\n @property\r\n def resolution(self):\r\n \"\"\"\r\n Minimum length of words in the design. Also defined as the smallest value of :math:`i` for which\r\n :math:`W_i > 0`.\r\n \"\"\"\r\n for i in self.wlp[1:]:\r\n if i != 0:\r\n return i\r\n\r\n def __repr__(self):\r\n \"\"\"Representation method\"\"\"\r\n return f'TLD({self.n_runs},{self.cols})'\r\n\r\n def __str__(self):\r\n \"\"\"Formatted string print method\"\"\"\r\n return f'Two-level design in {self.n_runs} runs, with {self.k} factors'\r\n\r\n\r\nclass MLD(Design):\r\n __doc__ = Design.__doc__ + \"\"\"\r\n Class for regular mixed-level designs. Inherits from the meta-class `Design`. Mixed-level designs only\r\n contains four-level factors and two-level factors. Four-level factors are built using three pseudo-factors,\r\n according to the grouping scheme of Wu [1989]_.\r\n\r\n :param pf_lst: List of the pseudo-factor triplets used for the four-level factors. All triplets should be list of integers of the form a,b,ab.\r\n :type pf_lst: List[List[int]]\r\n :raises TypeError: pf_list must contain lists of integers\r\n :raises ValueError: all pseudo-factors must be between 0 and N\r\n :raises ValueError: all pseudo-factors must be of the form a, b, ab\r\n :raises ValueError: pseudo-factors cannot be used as two-level columns\r\n \"\"\"\r\n\r\n def __init__(self, n_runs: int, pf_lst: List[List[int]], cols: List[int]):\r\n \"\"\"Constructor method\"\"\"\r\n # Inherit the characteristics from the parent Design class\r\n super().__init__(n_runs, cols)\r\n # Additional definition for the pseudo-factors\r\n for pf_set in pf_lst:\r\n if any([(not isinstance(i, int)) for i in pf_set]):\r\n raise TypeError('All pseudo-factors must be integers')\r\n elif pf_set[0] ^ pf_set[1] != pf_set[2]:\r\n raise ValueError(\r\n 'All pseudo-factor triplets must be of the form a, b, ab')\r\n elif any([(i in cols) for i in pf_set]):\r\n raise ValueError(\r\n 'Pseudo-factors cannot be used as two-level columns')\r\n elif any([(i <= 0 or i >= n_runs) for i in pf_set]):\r\n raise ValueError(\r\n 'Pseudo-factors must be positive integers between 0 and N (non-included)')\r\n else:\r\n self.pf_lst = pf_lst\r\n\r\n # Compute additional variables concerning the four-level factor(s)\r\n self.m = len(self.pf_lst)\r\n \"\"\"Number of four-level factors\"\"\"\r\n self.pf = list(chain(*self.pf_lst))\r\n \"\"\"All two-level factors used as pseudo-factors for the four-level factors\"\"\"\r\n self.af = [i for i in self.cols if i not in self.pf and i not in self.bf]\r\n \"\"\"List of all the added factors (factors that are not basic factors nor pseudo-factors)\"\"\"\r\n\r\n @property\r\n def array(self):\r\n \"\"\"\r\n Design matrix in (0,1) coding with the four-level factor in (0,1,2,3) coding.\r\n The four-level factors are created according to the grouping scheme of Wu [1989]_:\r\n \"\"\"\r\n # TODO: add the grouping scheme to the documentation\r\n\r\n b_mat = bmat(self.n)\r\n two_lvl_part = b_mat[:, [i - 1 for i in self.cols]]\r\n four_lvl_part = np.zeros((self.n_runs, self.m))\r\n for i, pf_set in enumerate(self.pf_lst):\r\n four_lvl_part[:, i] = b_mat[:, pf_set[0] - 1] * \\\r\n 2 + b_mat[:, pf_set[1] - 1]\r\n return np.concatenate((four_lvl_part, two_lvl_part), axis=1).astype(int)\r\n\r\n @property\r\n def wlp(self):\r\n \"\"\"\r\n Word-length pattern. It is a :math:`1` by :math:`k+1` vector :math:`\\mathbf{W}`, where :math:`W_i` is the number\r\n of words of length :math:`i` among the :math:`2^{p}-1` words of the design.\r\n \"\"\"\r\n # TODO: add a type-specific word-length pattern definition\r\n #return oa.array_link(self.array.astype(int)).GWLP()\r\n return wlp(self.array,self.m).tolist()\r\n\r\n @property\r\n def resolution(self):\r\n \"\"\"\r\n Minimum length of words in the design. Also defined as the smallest value of :math:`i` for which\r\n :math:`W_i > 0`.\r\n \"\"\"\r\n # TODO: implement type-specific resolution for the type-specific wlp\r\n return next((i for i, x in enumerate(self.wlp) if x), self.k) + 1\r\n\r\n def generators(self, resolution: int = 3) -> List[int]:\r\n \"\"\"\r\n List the generators of the design that have a minimal resolution of r.\r\n\r\n :param resolution: minimal resolution needed for the generators\r\n :type resolution: int\r\n :return: list of suitable generators\r\n :rtype: List[int]\r\n \"\"\"\r\n generators = []\r\n for i in range(1, self.n_runs):\r\n if (i & i - 1) == 0:\r\n continue\r\n elif i in self.cols or i in self.pf:\r\n continue\r\n elif gen_len(i, self.pf_lst) < resolution - 1:\r\n continue\r\n else:\r\n generators.append(i)\r\n return generators\r\n\r\n def dop(self):\r\n \"\"\"\r\n Generate the delete-one-factor projections (DOP) (only on two-level designs) of the design.\r\n\r\n :return: generator with all the DOP\r\n \"\"\"\r\n for i in range(self.k):\r\n new_cols = self.cols.copy()\r\n new_cols.pop(i)\r\n yield MLD(self.n_runs, self.pf_lst, new_cols)\r\n\r\n\r\n# Additional functions\r\ndef pow2(x: int) -> List[int]:\r\n \"\"\"\r\n Decompose a number into powers of 2\r\n :param x: number to decompose\r\n :type x: int\r\n :return: list of powers of 2 that compose the number\r\n :rtype: List[int]\r\n \"\"\"\r\n powers = []\r\n i = 1\r\n while i <= x:\r\n if i & x:\r\n powers.append(i)\r\n i <<= 1\r\n return powers\r\n\r\n\r\ndef gen_len(gen: int, pf_lst: List[List[int]]) -> int:\r\n \"\"\"\r\n Determine the length of a generator, according to the pseudo-factors used in the mixed-level design.\r\n\r\n :param gen: generator as a column number\r\n :type gen: int\r\n :param pf_lst: list of the pseudo-factor triplets\r\n :type pf_lst: List[List[int]]\r\n :return: adapted length of the generator\r\n \"\"\"\r\n gen_bf = pow2(gen)\r\n gen_len_temp = len(gen_bf)\r\n for pf in pf_lst:\r\n pf_bf = np.unique(list(chain(*[pow2(i) for i in pf])))\r\n in_gen = False\r\n for i in pf_bf:\r\n if i in gen_bf:\r\n gen_len_temp -= 1\r\n in_gen = True\r\n if in_gen:\r\n gen_len_temp += 1\r\n return gen_len_temp\r\n\r\n\r\ndef gen_char(x: int) -> str:\r\n \"\"\"\r\n Character representation of a generator.\r\n The generator is represented as a word containing the basic factors used (starting at a).\r\n\r\n :param x: generator number.\r\n :type x: int\r\n :rtype: str\r\n :return: generator string representation\r\n \"\"\"\r\n pow_lst = pow2(x)\r\n return ''.join([chr(97 + int(log2(i))) for i in pow_lst])\r\n\r\ndef mat_wlp(des, s=3):\r\n # Word interaction matrix\r\n k = len(des.af)\r\n G = gmat(des.n)\r\n Sk = np.concatenate((G[:, [i-1 for i in des.af]], np.eye(k, dtype=int)), axis=0)\r\n Gk = np.dot(Sk, gmat(k)) % 2\r\n\r\n # Pseudo-factor matrix\r\n n4 = des.m\r\n P = np.eye(des.n+k, dtype=np.float32)[n4:, :]\r\n for ind, val in enumerate(des.pf_lst):\r\n P[ind, 0:des.n] = G[:, [i-1 for i in val]].any(1)*(2/3)\r\n\r\n # Adapted word interaction matrix\r\n W = np.rint(np.dot(P, Gk))\r\n t = W[0:n4, :].sum(0)\r\n wlpmat = np.zeros((n4+1, des.k+n4))\r\n for ii in range(n4+1):\r\n wvt = W[:, t == ii].sum(0)\r\n if not wvt.size > 0:\r\n continue\r\n for jj in range(s-1, n4+des.k):\r\n wlpmat[ii, jj] = np.count_nonzero(wvt == jj+1)\r\n return wlpmat[:, s-1:].astype(int)\r\n\r\n# Function to get the first iteration of added factors\r\ndef first_added_fact(des: MLD, res: int):\r\n \"\"\"\r\n Select the generators that correspond to the non-isomorphic MLD's with a single \r\n added factor.\r\n \r\n :param des: Root design (MLD with basic factors only)\r\n :type des: mldoe.design.MLD\r\n :param res: Minimal resolution of the designs\r\n :type res: int\r\n :return: list of the generators for the added factors\r\n :rtype: List[int]\r\n\r\n \"\"\"\r\n fac_lst = []\r\n type_len_cache = []\r\n for gen in des.generators():\r\n len_gen = gen_len(gen, des.pf_lst)\r\n if len_gen+1 < res:\r\n continue\r\n if any([gen&i for i in des.pf]):\r\n gen_type = 1\r\n else:\r\n gen_type = 0\r\n type_len = 2*len_gen + gen_type\r\n if type_len not in type_len_cache:\r\n fac_lst.append(gen)\r\n type_len_cache.append(type_len)\r\n return fac_lst"
] |
[
[
"numpy.concatenate",
"numpy.count_nonzero",
"numpy.dot",
"numpy.zeros",
"numpy.eye"
]
] |
812610357/Optics
|
[
"713759a95f442f2f7f6efb75bbf15d5836029fac"
] |
[
"FraunDiffraRecAper.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom math import pi\n\nLambda = 0.5\na = 20\nb = 20\nsintheta1 = np.linspace(-0.12, 0.12, 1201)\nsintheta2 = np.linspace(-0.12, 0.12, 1201)\n\nalpha = pi*a*sintheta1/Lambda\nbeta = pi*b*sintheta2/Lambda\nia = (np.sin(alpha)/alpha)**2\nib = (np.sin(beta)/beta)**2\nia = np.column_stack((ia, np.zeros((1201, 1))))\nib = np.column_stack((ib, np.zeros((1201, 1))))\ni = np.dot(ia, ib.T)\n\nplt.figure(figsize=(6, 6))\n\nplt.subplot(2, 1, 1)\nplt.axis([min(sintheta1), max(sintheta1), 0, 1])\nplt.plot(sintheta1, ia[:, 0], label=\"$I$\")\nplt.ylabel(\"$I/I_0$\")\nplt.xlabel(\"$\\sin{\\\\theta}$\")\nplt.title(\"Light-strength distribution\")\n\nplt.subplot(2, 2, 3)\nplt.pcolor(sintheta1, sintheta2, np.sqrt(i), vmax=0.5, cmap='gray')\nplt.xlabel(\"$\\sin{\\\\theta_1}$\")\nplt.ylabel(\"$\\sin{\\\\theta_2}$\")\n\nplt.subplot(2, 2, 4)\nplt.pcolor(sintheta1, sintheta2, np.sqrt(i), vmax=0.5, cmap='jet')\nplt.xlabel(\"$\\sin{\\\\theta_1}$\")\nplt.ylabel(\"$\\sin{\\\\theta_2}$\")\n\nplt.show()\n"
] |
[
[
"numpy.sin",
"numpy.dot",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"numpy.sqrt",
"matplotlib.pyplot.show",
"numpy.linspace",
"matplotlib.pyplot.subplot"
]
] |
manoloesparta/stuff
|
[
"3cd3c5b6d76593ee3ccad9334f567b68853de9e1"
] |
[
"poketsp/main.py"
] |
[
"import numpy as np\nimport pandas as pd\n\n\ndef solve(adj):\n visited = {}\n \n start = adj.iloc[0]\n for index, row in adj.iterrows():\n print(\"INDEX: \", index)\n print(\"ROW: \", row)\n\n\ndef main():\n adj = pd.read_csv('adjacency.csv', index_col='compared')\n solve(adj)\n\n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"pandas.read_csv"
]
] |
dingdingdingyinyinyin/text-cnn
|
[
"652e69b01bfb6569fb282571d1e446f1087d1c1e"
] |
[
"text_test.py"
] |
[
"#encoding:utf-8\nfrom __future__ import print_function\nfrom text_model import *\nfrom loader import *\nfrom sklearn import metrics\nimport sys\nimport os\nimport time\nfrom datetime import timedelta\n\n\ndef evaluate(sess, x_, y_):\n data_len = len(x_)\n batch_eval = batch_iter(x_, y_, 128)\n total_loss = 0.0\n total_acc = 0.0\n for x_batch, y_batch in batch_eval:\n batch_len = len(x_batch)\n feed_dict = feed_data(x_batch, y_batch, 1.0)\n loss, acc = sess.run([model.loss, model.acc], feed_dict=feed_dict)\n total_loss += loss * batch_len\n total_acc += acc * batch_len\n\n return total_loss / data_len, total_acc / data_len\n\ndef feed_data(x_batch, y_batch, keep_prob):\n feed_dict = {\n model.input_x: x_batch,\n model.input_y: y_batch,\n model.keep_prob:keep_prob\n }\n return feed_dict\n\ndef test():\n print(\"Loading test data...\")\n t1=time.time()\n x_test,y_test=process_file(config.test_filename,word_to_id,cat_to_id,config.seq_length)\n\n session=tf.Session()\n session.run(tf.global_variables_initializer())\n saver=tf.train.Saver()\n saver.restore(sess=session,save_path=save_path)\n\n print('Testing...')\n test_loss,test_accuracy = evaluate(session,x_test,y_test)\n msg = 'Test Loss: {0:>6.2}, Test Acc: {1:>7.2%}'\n print(msg.format(test_loss, test_accuracy))\n\n batch_size=config.batch_size\n data_len=len(x_test)\n num_batch=int((data_len-1)/batch_size)+1\n y_test_cls=np.argmax(y_test,1)\n y_pred_cls=np.zeros(shape=len(x_test),dtype=np.int32)\n\n for i in range(num_batch):\n start_id=i*batch_size\n end_id=min((i+1)*batch_size,data_len)\n feed_dict={\n model.input_x:x_test[start_id:end_id],\n model.keep_prob:1.0,\n }\n y_pred_cls[start_id:end_id]=session.run(model.y_pred_cls,feed_dict=feed_dict)\n\n #evaluate\n print(\"Precision, Recall and F1-Score...\")\n print(metrics.classification_report(y_test_cls, y_pred_cls, target_names=categories))\n\n print(\"Confusion Matrix...\")\n cm = metrics.confusion_matrix(y_test_cls, y_pred_cls)\n print(cm)\n\n print(\"Time usage:%.3f seconds...\\n\"%(time.time() - t1))\n\nif __name__ == '__main__':\n print('Configuring CNN model...')\n config = TextConfig()\n filenames = [config.train_filename, config.test_filename, config.val_filename]\n if not os.path.exists(config.vocab_filename):\n build_vocab(filenames, config.vocab_filename, config.vocab_size)\n #read vocab and categories\n categories,cat_to_id = read_category()\n words,word_to_id = read_vocab(config.vocab_filename)\n config.vocab_size = len(words)\n\n # trans vector file to numpy file\n if not os.path.exists(config.vector_word_npz):\n export_word2vec_vectors(word_to_id, config.vector_word_filename, config.vector_word_npz)\n config.pre_trianing = get_training_word2vec_vectors(config.vector_word_npz)\n model = TextCNN(config)\n\n save_dir = './checkpoints/textcnn'\n save_path = os.path.join(save_dir, 'best_validation')\n test()"
] |
[
[
"sklearn.metrics.classification_report",
"sklearn.metrics.confusion_matrix"
]
] |
laurencer/recipes
|
[
"60b7c5f0304c7eb44a39295eba78da02608ae858"
] |
[
"torchrecipes/core/base_train_app.py"
] |
[
"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\n\n#!/usr/bin/env python3\n\n# pyre-strict\n\nimport os\nimport time\nimport traceback\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Optional, TypedDict, List, Tuple\n\nimport hydra\nimport torch\nfrom omegaconf import OmegaConf\nfrom pytorch_lightning import LightningDataModule, LightningModule\nfrom pytorch_lightning.callbacks import Callback\nfrom pytorch_lightning.callbacks import ModelCheckpoint as OSSModelCheckpoint\nfrom pytorch_lightning.loggers import TensorBoardLogger as OSSTensorboardLogger\nfrom pytorch_lightning.trainer import Trainer\nfrom pytorch_lightning.utilities.types import _EVALUATE_OUTPUT, _PREDICT_OUTPUT\nfrom torchrecipes._internal_patches import log_run, ModelCheckpoint, TensorBoardLogger\nfrom torchrecipes.core.conf import ModuleConf, DataModuleConf, TrainerConf\nfrom torchrecipes.core.logger import JobStatus\nfrom torchrecipes.utils.checkpoint import find_last_checkpoint_path\nfrom torchrecipes.utils.trainer_plugins import get_trainer_params\n\nOmegaConf.register_new_resolver(\"get_method\", hydra.utils.get_method)\n\n\n@dataclass\nclass TrainOutput:\n tensorboard_log_dir: Optional[str] = None\n\n\nclass TestOutput(TypedDict):\n pass\n\n\nclass BaseTrainApp:\n \"\"\"\n A training recipe that contains all necessary parts to train a model.\n One can easily start a trainig flow with this training application.\n To use the interface, create your own TrainApp and subclass from the BaseTrainApp.\n You also need to subclass YourTaskConfig from ModuleConf.\n \"\"\"\n\n module_conf: ModuleConf\n module: LightningModule\n datamodule_conf: Optional[DataModuleConf]\n datamodule: Optional[LightningDataModule]\n trainer_conf: TrainerConf\n log_dir: Optional[str]\n root_dir: Optional[str]\n\n def __init__(\n self,\n module: ModuleConf,\n trainer: TrainerConf,\n datamodule: Optional[DataModuleConf] = None,\n ) -> None:\n super().__init__()\n self.datamodule_conf = datamodule\n self.datamodule = self.get_data_module()\n\n self.module_conf = module\n self.module = self.get_lightning_module()\n\n self.trainer_conf = trainer\n self.log_dir = None\n self.root_dir = None\n torch._C._log_api_usage_once(f\"torchrecipes.{self.__class__.__name__}\")\n\n def get_lightning_module(self) -> LightningModule:\n \"\"\"\n Override this method to instantiate a LightningModule\n \"\"\"\n return hydra.utils.instantiate(self.module_conf, _recursive_=False)\n\n def get_data_module(self) -> Optional[LightningDataModule]:\n \"\"\"\n Override this method to instantiate a LightningDataModule\n \"\"\"\n if self.datamodule_conf:\n return hydra.utils.instantiate(self.datamodule_conf, _recursive_=False)\n\n return None\n\n def get_callbacks(self) -> List[Callback]:\n \"\"\"\n Override this method to return a list of callbacks to be passed into Trainer\n You can add additional ModelCheckpoint here\n \"\"\"\n return []\n\n def get_logger(self) -> OSSTensorboardLogger:\n \"\"\"\n Override this method to return a logger for trainer\n TODO: T88650989 set different default logger for OSS and FB TrainApp\n \"\"\"\n return TensorBoardLogger()\n\n def get_default_model_checkpoint(self) -> OSSModelCheckpoint:\n \"\"\"\n Override this method to return a default ModelCheckpoint callback.\n Note: If you want to use more than 1 ModelCheckpoint callback, add it through\n get_callbacks() function.\n \"\"\"\n dirpath: Optional[str] = None\n root_dir = self.root_dir\n if root_dir:\n dirpath = os.path.join(root_dir, ModelCheckpoint.CHECKPOINT_PATH_SUFFIX)\n\n return ModelCheckpoint(\n # will auto generate dirpath if not provided\n dirpath=dirpath,\n save_top_k=-1,\n has_user_data=False,\n ttl_days=1,\n monitor=None,\n )\n\n def _get_trainer(self) -> Tuple[Trainer, Dict[str, Any]]:\n trainer_params = self._init_trainer_params()\n self._set_trainer_params(trainer_params)\n\n # log trainer params\n log_params = dict(trainer_params)\n log_params[\"oncall_team\"] = \"pt_lightning\"\n log_params[\"run_status\"] = JobStatus.RUNNING.value\n log_run(**log_params)\n\n return Trainer(**trainer_params), log_params\n\n def _init_trainer_params(self) -> Dict[str, Any]:\n return get_trainer_params(self.trainer_conf)\n\n def _set_trainer_params(\n self,\n trainer_params: Dict[str, Any],\n ) -> None:\n # set default logger if not specified\n # if logger=False, do not add a logger\n if trainer_params.get(\"logger\", True):\n logger = self.get_logger()\n trainer_params[\"logger\"] = logger\n self.log_dir = logger.log_dir\n self.root_dir = logger.root_dir\n\n callbacks = trainer_params.get(\"callbacks\", [])\n callbacks.extend(self.get_callbacks())\n\n # create default model checkpoint callback unless disabled\n if trainer_params.get(\"checkpoint_callback\", True):\n checkpoint_callback = self.get_default_model_checkpoint()\n callbacks.append(checkpoint_callback)\n\n # auto-resume from last default checkpoint\n ckpt_path = checkpoint_callback.dirpath\n if not trainer_params.get(\"resume_from_checkpoint\") and ckpt_path:\n last_checkpoint = find_last_checkpoint_path(ckpt_path)\n trainer_params[\"resume_from_checkpoint\"] = last_checkpoint\n\n trainer_params[\"callbacks\"] = callbacks\n\n def train(self) -> TrainOutput:\n trainer, log_params = self._get_trainer()\n\n start_time = time.monotonic()\n got_exception = None\n try:\n trainer.fit(self.module, datamodule=self.datamodule)\n except Exception as ex:\n got_exception = ex\n\n # log trainer status to Scuba and Hive\n total_run_time = int(time.monotonic() - start_time)\n log_params[\"global_rank\"] = trainer.global_rank\n log_params[\"world_size\"] = trainer.world_size\n log_params[\"total_run_time\"] = total_run_time\n if got_exception is None:\n log_params[\"run_status\"] = JobStatus.COMPLETED.value\n log_run(**log_params)\n else:\n log_params[\"error_message\"] = str(got_exception)\n log_params[\"stacktrace\"] = traceback.format_stack()\n log_params[\"run_status\"] = JobStatus.FAILED.value\n log_run(**log_params)\n raise got_exception\n\n return TrainOutput(tensorboard_log_dir=self.log_dir)\n\n def test(self) -> _EVALUATE_OUTPUT:\n trainer, _ = self._get_trainer()\n return trainer.test(self.module, datamodule=self.datamodule)\n\n def predict(self) -> Optional[_PREDICT_OUTPUT]:\n trainer, _ = self._get_trainer()\n return trainer.predict(self.module, datamodule=self.datamodule)\n"
] |
[
[
"torch._C._log_api_usage_once"
]
] |
allenai/learning_from_interaction
|
[
"a266bc16d682832aa854348fa557a30d86b84674",
"a266bc16d682832aa854348fa557a30d86b84674"
] |
[
"source/models/poke_rcnn.py",
"source/losses/fgbg_losses.py"
] |
[
"import torch\nimport numpy as np\nfrom torch import nn\nfrom random import sample\nfrom detectron2.modeling.meta_arch.rcnn import GeneralizedRCNN\nfrom detectron2.modeling.postprocessing import detector_postprocess\nfrom detectron2.structures import Instances, Boxes, BitMasks\nfrom detectron2.utils.events import EventStorage\n\nfrom losses.losses import ObjectnessLossConfig, ObjectnessLoss\nfrom losses.loss_utils import focal_loss_function\nfrom models.model import Model\nfrom models.backbones import make_rpn50_fpn_config\nfrom config import global_config\n\n\nclass PokeRCNN(Model):\n \"\"\"\n This wraps a standard detectron2 MaskRCNN (including standard losses) for instance segmentation, but also predicts\n objectness logits like the clustering models, and can therefore be used for fully self-supervised training.\n \"\"\"\n\n def __init__(self, uncertainty=False):\n super(PokeRCNN, self).__init__()\n self.mask_rcnn = MaskRCNNWithPokeHead(uncertainty)\n self.poking_grid = [(i, j) for i in range(global_config.grid_size) for j in range(global_config.grid_size)]\n self.register_buffer('background_mask', torch.zeros(1, 800, 800, dtype=torch.int))\n self.register_buffer('background_box', torch.tensor([[1, 1, 799, 799]]))\n\n def forward(self, images: torch.tensor, targets=None):\n batched_inputs = self.rescale_and_zip(images, targets)\n if targets is None:\n return self.mask_rcnn.inference(batched_inputs)\n return self.mask_rcnn(batched_inputs)\n\n def rescale_and_zip(self, images, targets=None):\n with torch.no_grad():\n if targets is None:\n targets = [None] * images.shape[0]\n else:\n targets = list(zip(*targets))\n batched_output = []\n for image, target in zip(images, targets):\n d = {\"image\": nn.functional.interpolate(image.unsqueeze(0), (800, 800), mode='bilinear').squeeze(0)}\n if target is not None:\n masks, foreground, background = target\n instances = self.scale_and_process_masks(masks)\n d[\"instances\"] = instances\n d[\"poking_targets\"] = torch.stack([foreground, background])\n batched_output.append(d)\n return batched_output\n\n def scale_and_process_masks(self, masks):\n device = masks.device\n dummy_mask = torch.zeros_like(masks[0]).unsqueeze(0)\n non_emptys = masks.sum(dim=(1, 2)) > 0\n non_empty = non_emptys.sum().item()\n masks = torch.cat([masks[non_emptys], dummy_mask], dim=0) if non_empty else dummy_mask\n masks = (nn.functional.interpolate(masks.float().unsqueeze(1),\n size=(800, 800)) > .5).squeeze(1)\n\n if non_empty:\n box_coordinates = [torch.where(mask) for mask in masks[:-1]]\n box_coordinates = torch.tensor([[x[1].min(), x[0].min(), x[1].max(), x[0].max()] for x in box_coordinates])\n box_coordinates = torch.cat([box_coordinates.to(device), self.background_box], dim=0)\n else:\n box_coordinates = self.background_box\n\n instances = Instances((800, 800))\n instances.gt_boxes = Boxes(box_coordinates)\n instances.gt_masks = BitMasks(masks)\n classes = torch.zeros(non_empty + 1, dtype=torch.int64)\n classes[-1] = 1\n instances.gt_classes = classes\n return instances.to(device)\n\n @staticmethod\n def select_largest_on_mask(mask, scores):\n mask = mask.reshape(global_config.grid_size, global_config.stride,\n global_config.grid_size, global_config.stride).mean(axis=(1, 3)) > .5\n argmax = (mask * scores).argmax() if np.any(mask) else scores.argmax()\n return argmax // global_config.grid_size, argmax % global_config.grid_size\n\n def compute_actions(self, images: torch.tensor, num_pokes: int, episode: int, episodes: int):\n with torch.no_grad():\n results, poking_scores = self.forward(images)\n poking_scores_numpy = poking_scores[:, 0].sigmoid().cpu().numpy()\n detections = [result['instances'].pred_masks.cpu().numpy() for result in results]\n\n actions = []\n for masks, poking_score in zip(detections, poking_scores_numpy):\n action = []\n for mask in masks[:num_pokes // 2]:\n point = self.select_largest_on_mask(mask, poking_score)\n action.append(dict(point=point))\n action += sample(self.poking_grid, num_pokes - len(action))\n actions.append(action)\n\n return actions, (poking_scores,)\n\n def compute_masks(self, images: torch.tensor, threshold: float):\n ret_masks, scores, actions = [], [], []\n self.eval()\n with torch.no_grad():\n results, poking_scores = self.forward(images)\n poking_scores_numpy = poking_scores[:, 0].sigmoid().cpu().numpy()\n detections = [(result['instances'].pred_masks.cpu().numpy(), result['instances'].scores.cpu().numpy(),\n result['instances'].pred_classes.cpu().numpy())\n for result in results]\n\n for (masks, mask_scores, classes), poking_score in zip(detections, poking_scores_numpy):\n action, new_masks, new_scores = [], [], []\n for mask, mask_score, cl, _ in zip(masks, mask_scores, classes, [None] * global_config.max_pokes):\n if cl > 0:\n continue\n if mask_score < threshold:\n break\n point = self.select_largest_on_mask(mask, poking_score)\n action.append(dict(point=point))\n new_masks.append(mask)\n new_scores.append(mask_score)\n ret_masks.append(new_masks)\n scores.append(new_scores)\n actions.append(action)\n return actions, ret_masks, (poking_scores,), scores\n\n\nclass MaskRCNNWithPokeHead(GeneralizedRCNN):\n def __init__(self, uncertainty=True):\n super(MaskRCNNWithPokeHead, self).__init__(make_rpn50_fpn_config())\n self.poking_head = nn.Sequential(nn.Conv2d(256, 64, kernel_size=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(),\n nn.Conv2d(64, 2, kernel_size=1))\n\n self.poking_loss = MaskPokingLoss(uncertainty)\n self.event_storage = EventStorage()\n\n def forward(self, batched_inputs):\n with self.event_storage:\n images = self.preprocess_image(batched_inputs)\n gt_instances = [x[\"instances\"] for x in batched_inputs]\n features = self.backbone(images.tensor)\n proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)\n _, detector_losses = self.roi_heads(images, features, proposals, gt_instances)\n poking_scores = self.poking_head(features['p3'])\n poking_targets = torch.stack([x['poking_targets'] for x in batched_inputs])\n poking_losses = self.poking_loss(poking_scores, poking_targets)\n\n losses = list(detector_losses.values()) + list(proposal_losses.values()) + [poking_losses]\n return losses\n\n def inference(self, batched_inputs, *kwargs):\n images = self.preprocess_image(batched_inputs)\n features = self.backbone(images.tensor)\n proposals, _ = self.proposal_generator(images, features, None)\n results, _ = self.roi_heads(images, features, proposals, None)\n poking_scores = self.poking_head(features['p3'])\n return self.postprocess(results, batched_inputs), poking_scores\n\n @staticmethod\n def postprocess(instances, batched_inputs):\n processed_results = []\n for results_per_image, input_per_image in zip(instances, batched_inputs):\n r = detector_postprocess(results_per_image.to('cpu'), 300, 300)\n processed_results.append({\"instances\": r})\n return processed_results\n\n\nclass DummyObjectnessLoss(ObjectnessLoss):\n def __init__(self, conf: ObjectnessLossConfig):\n super(DummyObjectnessLoss, self).__init__(conf)\n assert conf.prioritized_replay is False\n self.loss_summary_length = 6\n # NOTE: Since per-image mask losses are not accessible in MaskRCNN, prioritized replay is not supported.\n\n def __call__(self, losses, targets, weights, superpixels=None):\n if type(losses) == list:\n return losses\n return torch.zeros(self.loss_summary_length)\n\n\nclass MaskPokingLoss(nn.Module):\n def __init__(self, uncertainty):\n super(MaskPokingLoss, self).__init__()\n self.uncertainty = uncertainty\n self.loss = focal_loss_function(1)\n self.register_buffer('dummy_weight', torch.tensor(1, dtype=torch.float32))\n\n def forward(self, poking_scores, poking_targets):\n foreground, background = poking_targets[:, 0], poking_targets[:, 1]\n objectness, uncertainty = poking_scores[:, 0], poking_scores[:, 1]\n objectness_loss = self.loss(objectness, foreground, background, self.dummy_weight)\n if self.uncertainty:\n unc_foreground = (objectness >= 0) * background + (objectness <= 0) * foreground\n unc_background = (objectness > 0) * foreground + (objectness < 0) * background\n else:\n unc_foreground = foreground\n unc_background = background\n uncertainty_loss = self.loss(uncertainty, unc_foreground, unc_background, self.dummy_weight)\n return objectness_loss + uncertainty_loss\n",
"import numpy as np\nimport torch\n\nfrom config import FgBgLossConfig, global_config\nfrom losses.loss_utils import focal_loss_function\nfrom losses.losses import LossFunction\n\n\nclass FgBgLossFunction(LossFunction):\n \"\"\"\n This is a simple loss for foreground-background segmentation models with some adjustability for unlabeled pixels.\n \"\"\"\n def __init__(self, loss_config: FgBgLossConfig):\n super(FgBgLossFunction, self).__init__()\n self.loss_summary_length = 1\n self.prioritized_replay = loss_config.prioritized_replay\n self.config = loss_config\n self.focal_loss = focal_loss_function(1)\n\n def __call__(self, model_predictions: tuple, targets: tuple, weights, *superpixels):\n _, foreground_masks, background_masks = targets\n objectness_losses = []\n for weight, objectness, foreground, background in zip(\n weights, model_predictions[0], foreground_masks, background_masks):\n objectness_loss = self.compute_objectness_loss(objectness, foreground, background, weight)\n objectness_losses.append(objectness_loss)\n losses = [torch.stack(objectness_losses)]\n\n if self.prioritized_replay:\n priorities = self.compute_priorities(losses)\n return [l.sum() / (l > 0).sum().clamp(min=1) for l in losses], priorities\n\n return [l.sum() / (l > 0).sum().clamp(min=1) for l in losses]\n\n def process_feedback(self, actions: list, feedback: list, superpixels=None):\n targets = []\n num_successes = 0\n for act, fb in zip(actions, feedback):\n target, new_successes = self.process_single_feedback(act, fb)\n targets.append(target)\n num_successes += new_successes\n return targets, num_successes\n\n def process_single_feedback(self, actions, feedbacks):\n foreground_mask = np.zeros((global_config.grid_size, global_config.grid_size), dtype=np.float32)\n background_mask = np.zeros((global_config.grid_size, global_config.grid_size), dtype=np.float32)\n poking_mask = np.zeros((global_config.max_pokes, global_config.grid_size, global_config.grid_size),\n dtype=np.bool)\n successes = 0\n\n for i, (action, mask, pm) in enumerate(zip(actions, feedbacks, poking_mask)):\n weights = self.get_neighbourhood(action['point'])\n if mask.sum() > self.config.foreground_threshold:\n if self.config.restrict_positives:\n foreground_mask += weights\n pm[:] = mask > 0\n else:\n foreground_mask = (foreground_mask + mask) > 0\n successes += 1\n elif self.config.restrict_negatives:\n background_mask += weights\n if not self.config.restrict_negatives:\n background_mask = ~ foreground_mask\n return (poking_mask, foreground_mask, background_mask), successes\n\n def get_neighbourhood(self, action):\n x, y = action\n weights = np.zeros((global_config.grid_size, global_config.grid_size), dtype=np.float32)\n dx1 = min(x, self.config.kernel_size)\n dx2 = min(global_config.grid_size - 1 - x, self.config.kernel_size) + 1\n dy1 = min(y, self.config.kernel_size)\n dy2 = min(global_config.grid_size - 1 - y, self.config.kernel_size) + 1\n x1, x2, y1, y2 = x - dx1, x + dx2, y - dy1, y + dy2\n weights[x1:x2, y1:y2] = self.config.kernel[self.config.kernel_size - dx1:\n self.config.kernel_size + dx2,\n self.config.kernel_size - dy1:\n self.config.kernel_size + dy2]\n return weights\n\n def compute_objectness_loss(self, objectness, foreground, background, weight):\n b = objectness.shape[0] > 1\n objectness, uncertainty = objectness[0], objectness[1] if b else None\n objectness_loss = self.focal_loss(objectness, foreground, background, weight)\n if b:\n uncertainty_foreground = foreground * (objectness < 0) + background * (objectness > 0)\n uncertainty_background = foreground * (objectness >= 0) + background * (objectness <= 0)\n uncertainty_loss = self.focal_loss(uncertainty, uncertainty_foreground, uncertainty_background, weight)\n return objectness_loss + uncertainty_loss\n return objectness_loss\n\n def compute_priorities(self, losses: list):\n raise NotImplementedError\n\n\nclass SoftMaskLossFunction(LossFunction):\n \"\"\"\n This is an L2 loss for fitting soft fg-bg targets. It is used for the videoPCA baseline.\n \"\"\"\n def __init__(self):\n super(SoftMaskLossFunction, self).__init__()\n self.loss_summary_length = 2\n\n def __call__(self, model_predictions: tuple, targets: tuple, weights, *superpixels):\n soft_masks = targets[0]\n objectness_losses = []\n for weight, objectness, soft_mask in zip(weights, model_predictions[0].sigmoid(), soft_masks):\n loss = weight * ((objectness - soft_mask)**2).sum()\n objectness_losses.append(loss)\n losses = [torch.stack(objectness_losses)]\n\n return [l.sum() / (l > 0).sum().clamp(min=1) for l in losses]\n\n def process_feedback(self, actions: list, feedback: list, superpixels=None):\n targets = []\n num_successes = 0\n for act, fb in zip(actions, feedback):\n target, new_successes = self.process_single_feedback(act, fb)\n targets.append(target)\n num_successes += new_successes\n return targets, num_successes\n\n @staticmethod\n def process_single_feedback(actions, feedbacks):\n soft_mask = np.zeros((global_config.grid_size, global_config.grid_size), dtype=np.float32)\n successes = 0\n\n for i, (action, mask) in enumerate(zip(actions, feedbacks)):\n soft_mask += mask\n successes += mask.sum() > 0\n return (soft_mask, ), successes\n\n def compute_priorities(self, losses: list):\n raise NotImplementedError\n"
] |
[
[
"torch.zeros",
"torch.cat",
"torch.stack",
"torch.no_grad",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"numpy.any",
"torch.nn.Conv2d",
"torch.tensor",
"torch.zeros_like",
"torch.where"
],
[
"torch.stack",
"numpy.zeros"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.